summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-12-04 08:20:37 +0100
committerMichaël Zasso <targos@protonmail.com>2018-12-06 15:23:33 +0100
commit9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3 (patch)
tree2b0c843168dafb939d8df8a15b2aa72b76dee51d /deps/v8
parentb8fbe69db1292307adb2c2b2e0d5ef48c4ab2faf (diff)
downloadandroid-node-v8-9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3.tar.gz
android-node-v8-9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3.tar.bz2
android-node-v8-9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3.zip
deps: update V8 to 7.1.302.28
PR-URL: https://github.com/nodejs/node/pull/23423 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Gus Caplan <me@gus.host> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.clang-tidy20
-rw-r--r--deps/v8/.gitattributes2
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS2
-rw-r--r--deps/v8/BUILD.gn198
-rw-r--r--deps/v8/ChangeLog1640
-rw-r--r--deps/v8/DEPS62
-rw-r--r--deps/v8/PRESUBMIT.py16
-rw-r--r--deps/v8/gni/v8.gni10
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h10
-rw-r--r--deps/v8/include/v8-inspector.h14
-rw-r--r--deps/v8/include/v8-internal.h316
-rw-r--r--deps/v8/include/v8-platform.h15
-rw-r--r--deps/v8/include/v8-profiler.h24
-rw-r--r--deps/v8/include/v8-util.h8
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h764
-rw-r--r--deps/v8/include/v8config.h30
-rw-r--r--deps/v8/infra/config/cq.cfg17
-rw-r--r--deps/v8/infra/mb/mb_config.pyl121
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/accessors.cc5
-rw-r--r--deps/v8/src/accessors.h57
-rw-r--r--deps/v8/src/address-map.cc9
-rw-r--r--deps/v8/src/address-map.h11
-rw-r--r--deps/v8/src/allocation-site-scopes-inl.h52
-rw-r--r--deps/v8/src/allocation-site-scopes.h34
-rw-r--r--deps/v8/src/allocation.cc169
-rw-r--r--deps/v8/src/allocation.h78
-rw-r--r--deps/v8/src/api-arguments-inl.h85
-rw-r--r--deps/v8/src/api-arguments.h7
-rw-r--r--deps/v8/src/api-inl.h1
-rw-r--r--deps/v8/src/api-natives.cc54
-rw-r--r--deps/v8/src/api-natives.h9
-rw-r--r--deps/v8/src/api.cc456
-rw-r--r--deps/v8/src/api.h1
-rw-r--r--deps/v8/src/arguments.h2
-rw-r--r--deps/v8/src/arm/assembler-arm.cc27
-rw-r--r--deps/v8/src/arm/assembler-arm.h11
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc10
-rw-r--r--deps/v8/src/arm/codegen-arm.cc46
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc14
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc17
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h21
-rw-r--r--deps/v8/src/arm/simulator-arm.cc10
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h4
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc43
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h4
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc9
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc5
-rw-r--r--deps/v8/src/arm64/constants-arm64.h2
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc14
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc33
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h23
-rw-r--r--deps/v8/src/asmjs/asm-js.cc2
-rw-r--r--deps/v8/src/assembler.cc32
-rw-r--r--deps/v8/src/assembler.h56
-rw-r--r--deps/v8/src/assert-scope.h4
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.cc2
-rw-r--r--deps/v8/src/ast/ast-source-ranges.h25
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc11
-rw-r--r--deps/v8/src/ast/ast-value-factory.h96
-rw-r--r--deps/v8/src/ast/ast.cc6
-rw-r--r--deps/v8/src/ast/ast.h36
-rw-r--r--deps/v8/src/ast/prettyprinter.cc4
-rw-r--r--deps/v8/src/ast/scopes-inl.h66
-rw-r--r--deps/v8/src/ast/scopes.cc231
-rw-r--r--deps/v8/src/ast/scopes.h73
-rw-r--r--deps/v8/src/ast/variables.h3
-rw-r--r--deps/v8/src/base/address-region.h70
-rw-r--r--deps/v8/src/base/atomic-utils.h16
-rw-r--r--deps/v8/src/base/bits.h8
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc101
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h79
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/debug/stack_trace.cc2
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc8
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc7
-rw-r--r--deps/v8/src/base/ieee754.cc20
-rw-r--r--deps/v8/src/base/logging.h10
-rw-r--r--deps/v8/src/base/lsan-page-allocator.cc59
-rw-r--r--deps/v8/src/base/lsan-page-allocator.h56
-rw-r--r--deps/v8/src/base/macros.h48
-rw-r--r--deps/v8/src/base/optional.h2
-rw-r--r--deps/v8/src/base/page-allocator.cc8
-rw-r--r--deps/v8/src/base/page-allocator.h11
-rw-r--r--deps/v8/src/base/platform/OWNERS2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc12
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc8
-rw-r--r--deps/v8/src/base/platform/platform-posix-time.h2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc13
-rw-r--r--deps/v8/src/base/platform/platform-posix.h2
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc6
-rw-r--r--deps/v8/src/base/platform/platform.h2
-rw-r--r--deps/v8/src/base/platform/semaphore.cc4
-rw-r--r--deps/v8/src/base/platform/time.h5
-rw-r--r--deps/v8/src/base/region-allocator.cc291
-rw-r--r--deps/v8/src/base/region-allocator.h164
-rw-r--r--deps/v8/src/base/safe_math.h2
-rw-r--r--deps/v8/src/base/threaded-list.h267
-rw-r--r--deps/v8/src/base/timezone-cache.h2
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc2
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h9
-rw-r--r--deps/v8/src/basic-block-profiler.cc7
-rw-r--r--deps/v8/src/basic-block-profiler.h4
-rw-r--r--deps/v8/src/bit-vector.h9
-rw-r--r--deps/v8/src/bootstrapper.cc566
-rw-r--r--deps/v8/src/bootstrapper.h5
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc158
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc157
-rw-r--r--deps/v8/src/builtins/array-copywithin.tq26
-rw-r--r--deps/v8/src/builtins/array-foreach.tq69
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq159
-rw-r--r--deps/v8/src/builtins/array-reverse.tq77
-rw-r--r--deps/v8/src/builtins/array-splice.tq395
-rw-r--r--deps/v8/src/builtins/array-unshift.tq107
-rw-r--r--deps/v8/src/builtins/array.tq321
-rw-r--r--deps/v8/src/builtins/base.tq653
-rw-r--r--deps/v8/src/builtins/builtins-api.cc2
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc65
-rw-r--r--deps/v8/src/builtins/builtins-array.cc318
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc20
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc92
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc93
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h53
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc55
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc54
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc5
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc61
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc8
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-console.cc3
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc72
-rw-r--r--deps/v8/src/builtins/builtins-data-view-gen.h14
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc29
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-date.cc63
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h199
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h3
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-function.cc33
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc210
-rw-r--r--deps/v8/src/builtins/builtins-interpreter-gen.cc18
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc15
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc1296
-rw-r--r--deps/v8/src/builtins/builtins-intl.h30
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc141
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-number.cc3
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc204
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc36
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc2
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc41
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc298
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc55
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc273
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h14
-rw-r--r--deps/v8/src/builtins/builtins-string.cc3
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc184
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc52
-rw-r--r--deps/v8/src/builtins/builtins.cc43
-rw-r--r--deps/v8/src/builtins/builtins.h52
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc6
-rw-r--r--deps/v8/src/builtins/data-view.tq951
-rw-r--r--deps/v8/src/builtins/generate-bytecodes-builtins-list.cc97
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc973
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc122
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc120
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc121
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc125
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc64
-rw-r--r--deps/v8/src/builtins/typed-array.tq162
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc173
-rw-r--r--deps/v8/src/callable.h2
-rw-r--r--deps/v8/src/cancelable-task.h54
-rw-r--r--deps/v8/src/char-predicates-inl.h11
-rw-r--r--deps/v8/src/checks.h2
-rw-r--r--deps/v8/src/code-events.h4
-rw-r--r--deps/v8/src/code-factory.cc30
-rw-r--r--deps/v8/src/code-factory.h3
-rw-r--r--deps/v8/src/code-stub-assembler.cc1556
-rw-r--r--deps/v8/src/code-stub-assembler.h326
-rw-r--r--deps/v8/src/code-stubs.h7
-rw-r--r--deps/v8/src/codegen.cc24
-rw-r--r--deps/v8/src/codegen.h10
-rw-r--r--deps/v8/src/collector.h6
-rw-r--r--deps/v8/src/compilation-cache.cc2
-rw-r--r--deps/v8/src/compilation-cache.h2
-rw-r--r--deps/v8/src/compilation-statistics.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h28
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc450
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h37
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc18
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc342
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h49
-rw-r--r--deps/v8/src/compiler.cc418
-rw-r--r--deps/v8/src/compiler.h93
-rw-r--r--deps/v8/src/compiler/access-builder.cc52
-rw-r--r--deps/v8/src/compiler/access-builder.h1
-rw-r--r--deps/v8/src/compiler/access-info.cc49
-rw-r--r--deps/v8/src/compiler/access-info.h7
-rw-r--r--deps/v8/src/compiler/allocation-builder.h2
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc140
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h557
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc21
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc235
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc5
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc2
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc2
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc6
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h2
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc77
-rw-r--r--deps/v8/src/compiler/c-linkage.cc17
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.h2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc104
-rw-r--r--deps/v8/src/compiler/code-assembler.h134
-rw-r--r--deps/v8/src/compiler/code-generator.cc44
-rw-r--r--deps/v8/src/compiler/code-generator.h28
-rw-r--r--deps/v8/src/compiler/common-node-cache.h2
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/common-operator.cc21
-rw-r--r--deps/v8/src/compiler/common-operator.h11
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc23
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc2
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h2
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc549
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h13
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc78
-rw-r--r--deps/v8/src/compiler/escape-analysis.h8
-rw-r--r--deps/v8/src/compiler/frame-states.cc6
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc13
-rw-r--r--deps/v8/src/compiler/gap-resolver.h2
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc30
-rw-r--r--deps/v8/src/compiler/graph-assembler.h19
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc2
-rw-r--r--deps/v8/src/compiler/graph-reducer.h6
-rw-r--r--deps/v8/src/compiler/graph-trimmer.cc2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc307
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h41
-rw-r--r--deps/v8/src/compiler/graph.h2
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc215
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h721
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc21
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc235
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h9
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc153
-rw-r--r--deps/v8/src/compiler/instruction.cc12
-rw-r--r--deps/v8/src/compiler/instruction.h24
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc19
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc470
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h11
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc8
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc262
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h20
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc52
-rw-r--r--deps/v8/src/compiler/js-graph.cc16
-rw-r--r--deps/v8/src/compiler/js-graph.h6
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc2280
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h251
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc43
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc23
-rw-r--r--deps/v8/src/compiler/js-inlining.h3
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc46
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h7
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc269
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h19
-rw-r--r--deps/v8/src/compiler/js-operator.cc11
-rw-r--r--deps/v8/src/compiler/js-operator.h4
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc370
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h12
-rw-r--r--deps/v8/src/compiler/linkage.cc14
-rw-r--r--deps/v8/src/compiler/load-elimination.cc117
-rw-r--r--deps/v8/src/compiler/load-elimination.h44
-rw-r--r--deps/v8/src/compiler/loop-peeling.h2
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc4
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc57
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc55
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc118
-rw-r--r--deps/v8/src/compiler/machine-operator.h16
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc56
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h12
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc98
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h11
-rw-r--r--deps/v8/src/compiler/mips/instruction-scheduler-mips.cc9
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc136
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc354
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h628
-rw-r--r--deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc37
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc324
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc8
-rw-r--r--deps/v8/src/compiler/node-cache.cc4
-rw-r--r--deps/v8/src/compiler/node-cache.h4
-rw-r--r--deps/v8/src/compiler/node-properties.cc1
-rw-r--r--deps/v8/src/compiler/node.h17
-rw-r--r--deps/v8/src/compiler/opcodes.h35
-rw-r--r--deps/v8/src/compiler/operation-typer.cc130
-rw-r--r--deps/v8/src/compiler/operation-typer.h6
-rw-r--r--deps/v8/src/compiler/operator-properties.cc106
-rw-r--r--deps/v8/src/compiler/operator-properties.h2
-rw-r--r--deps/v8/src/compiler/operator.h4
-rw-r--r--deps/v8/src/compiler/per-isolate-compiler-cache.h64
-rw-r--r--deps/v8/src/compiler/pipeline.cc275
-rw-r--r--deps/v8/src/compiler/pipeline.h10
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc112
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h268
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc40
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc299
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc8
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h70
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc178
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h6
-rw-r--r--deps/v8/src/compiler/refs-map.cc35
-rw-r--r--deps/v8/src/compiler/refs-map.h54
-rw-r--r--deps/v8/src/compiler/register-allocator.cc14
-rw-r--r--deps/v8/src/compiler/representation-change.cc260
-rw-r--r--deps/v8/src/compiler/representation-change.h59
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc617
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h338
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc40
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc319
-rw-r--r--deps/v8/src/compiler/schedule.cc80
-rw-r--r--deps/v8/src/compiler/schedule.h28
-rw-r--r--deps/v8/src/compiler/select-lowering.cc2
-rw-r--r--deps/v8/src/compiler/select-lowering.h2
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc3
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc665
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h2
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc23
-rw-r--r--deps/v8/src/compiler/simplified-operator.h14
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc14
-rw-r--r--deps/v8/src/compiler/type-cache.h17
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.cc4
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc41
-rw-r--r--deps/v8/src/compiler/typed-optimization.h4
-rw-r--r--deps/v8/src/compiler/typer.cc45
-rw-r--r--deps/v8/src/compiler/typer.h3
-rw-r--r--deps/v8/src/compiler/types.cc57
-rw-r--r--deps/v8/src/compiler/types.h22
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc2
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.h2
-rw-r--r--deps/v8/src/compiler/verifier.cc42
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc671
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h73
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc213
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h22
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc22
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc110
-rw-r--r--deps/v8/src/contexts-inl.h4
-rw-r--r--deps/v8/src/contexts.h114
-rw-r--r--deps/v8/src/conversions.cc16
-rw-r--r--deps/v8/src/counters.cc62
-rw-r--r--deps/v8/src/counters.h73
-rw-r--r--deps/v8/src/d8-console.cc13
-rw-r--r--deps/v8/src/d8-posix.cc2
-rw-r--r--deps/v8/src/d8.cc159
-rw-r--r--deps/v8/src/d8.h7
-rw-r--r--deps/v8/src/d8.js2
-rw-r--r--deps/v8/src/date.cc2
-rw-r--r--deps/v8/src/dateparser.h8
-rw-r--r--deps/v8/src/debug/debug-coverage.cc44
-rw-r--r--deps/v8/src/debug/debug-coverage.h2
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc102
-rw-r--r--deps/v8/src/debug/debug-evaluate.h1
-rw-r--r--deps/v8/src/debug/debug-frames.cc1
-rw-r--r--deps/v8/src/debug/debug-frames.h2
-rw-r--r--deps/v8/src/debug/debug-interface.h24
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc6
-rw-r--r--deps/v8/src/debug/debug-type-profile.h2
-rw-r--r--deps/v8/src/debug/debug.cc94
-rw-r--r--deps/v8/src/debug/debug.h18
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc20
-rw-r--r--deps/v8/src/debug/interface-types.h4
-rw-r--r--deps/v8/src/deoptimizer.cc108
-rw-r--r--deps/v8/src/deoptimizer.h27
-rw-r--r--deps/v8/src/disasm.h2
-rw-r--r--deps/v8/src/disassembler.cc75
-rw-r--r--deps/v8/src/elements-kind.h2
-rw-r--r--deps/v8/src/elements.cc140
-rw-r--r--deps/v8/src/elements.h6
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc11
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h4
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.h4
-rw-r--r--deps/v8/src/extensions/gc-extension.cc4
-rw-r--r--deps/v8/src/extensions/gc-extension.h4
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc5
-rw-r--r--deps/v8/src/extensions/statistics-extension.h4
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.h4
-rw-r--r--deps/v8/src/external-reference-table.cc6
-rw-r--r--deps/v8/src/external-reference-table.h5
-rw-r--r--deps/v8/src/external-reference.cc64
-rw-r--r--deps/v8/src/external-reference.h17
-rw-r--r--deps/v8/src/feedback-vector-inl.h18
-rw-r--r--deps/v8/src/feedback-vector.cc130
-rw-r--r--deps/v8/src/feedback-vector.h53
-rw-r--r--deps/v8/src/flag-definitions.h57
-rw-r--r--deps/v8/src/frames.cc2
-rw-r--r--deps/v8/src/frames.h21
-rw-r--r--deps/v8/src/futex-emulation.cc8
-rw-r--r--deps/v8/src/gdb-jit.cc42
-rw-r--r--deps/v8/src/global-handles.cc24
-rw-r--r--deps/v8/src/global-handles.h1
-rw-r--r--deps/v8/src/globals.h116
-rw-r--r--deps/v8/src/handles.cc16
-rw-r--r--deps/v8/src/handles.h2
-rw-r--r--deps/v8/src/heap-symbols.h609
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc64
-rw-r--r--deps/v8/src/heap/array-buffer-collector.h23
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h9
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc28
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h4
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc121
-rw-r--r--deps/v8/src/heap/concurrent-marking.h5
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc7
-rw-r--r--deps/v8/src/heap/embedder-tracing.h3
-rw-r--r--deps/v8/src/heap/factory-inl.h67
-rw-r--r--deps/v8/src/heap/factory.cc182
-rw-r--r--deps/v8/src/heap/factory.h58
-rw-r--r--deps/v8/src/heap/gc-tracer.cc7
-rw-r--r--deps/v8/src/heap/gc-tracer.h3
-rw-r--r--deps/v8/src/heap/heap-controller.cc82
-rw-r--r--deps/v8/src/heap/heap-controller.h47
-rw-r--r--deps/v8/src/heap/heap-inl.h60
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h4
-rw-r--r--deps/v8/src/heap/heap.cc703
-rw-r--r--deps/v8/src/heap/heap.h416
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h21
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc5
-rw-r--r--deps/v8/src/heap/incremental-marking.cc47
-rw-r--r--deps/v8/src/heap/incremental-marking.h21
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc2
-rw-r--r--deps/v8/src/heap/item-parallel-job.h4
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h98
-rw-r--r--deps/v8/src/heap/mark-compact.cc136
-rw-r--r--deps/v8/src/heap/mark-compact.h62
-rw-r--r--deps/v8/src/heap/marking.cc52
-rw-r--r--deps/v8/src/heap/object-stats.cc22
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h9
-rw-r--r--deps/v8/src/heap/objects-visiting.h6
-rw-r--r--deps/v8/src/heap/scavenge-job.cc5
-rw-r--r--deps/v8/src/heap/scavenger-inl.h279
-rw-r--r--deps/v8/src/heap/scavenger.cc286
-rw-r--r--deps/v8/src/heap/scavenger.h142
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc98
-rw-r--r--deps/v8/src/heap/spaces-inl.h45
-rw-r--r--deps/v8/src/heap/spaces.cc516
-rw-r--r--deps/v8/src/heap/spaces.h295
-rw-r--r--deps/v8/src/heap/store-buffer.cc8
-rw-r--r--deps/v8/src/heap/store-buffer.h2
-rw-r--r--deps/v8/src/heap/sweeper.cc13
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h4
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc128
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h153
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc38
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc32
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc42
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc65
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc175
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h67
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc383
-rw-r--r--deps/v8/src/ic/accessor-assembler.h14
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc5
-rw-r--r--deps/v8/src/ic/call-optimization.h2
-rw-r--r--deps/v8/src/ic/handler-configuration.cc4
-rw-r--r--deps/v8/src/ic/handler-configuration.h2
-rw-r--r--deps/v8/src/ic/ic-inl.h7
-rw-r--r--deps/v8/src/ic/ic-stats.cc2
-rw-r--r--deps/v8/src/ic/ic.cc60
-rw-r--r--deps/v8/src/ic/ic.h11
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc306
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h5
-rw-r--r--deps/v8/src/identity-map.h2
-rw-r--r--deps/v8/src/inspector/injected-script-source.js17
-rw-r--r--deps/v8/src/inspector/injected-script.cc6
-rw-r--r--deps/v8/src/inspector/injected-script.h8
-rw-r--r--deps/v8/src/inspector/remote-object-id.h6
-rw-r--r--deps/v8/src/inspector/string-16.cc23
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc32
-rw-r--r--deps/v8/src/inspector/v8-console-message.h1
-rw-r--r--deps/v8/src/inspector/v8-console.cc40
-rw-r--r--deps/v8/src/inspector/v8-console.h2
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc31
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc56
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h3
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc43
-rw-r--r--deps/v8/src/inspector/v8-debugger.h14
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc10
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h7
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-regex.h3
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc6
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc6
-rw-r--r--deps/v8/src/instruction-stream.cc16
-rw-r--r--deps/v8/src/interface-descriptors.cc18
-rw-r--r--deps/v8/src/interface-descriptors.h96
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc28
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h16
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc19
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc338
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h13
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h13
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h6
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc8
-rw-r--r--deps/v8/src/interpreter/bytecodes.h21
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h2
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h16
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc36
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc77
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.h11
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc33
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h5
-rw-r--r--deps/v8/src/interpreter/interpreter.cc98
-rw-r--r--deps/v8/src/interpreter/interpreter.h9
-rw-r--r--deps/v8/src/interpreter/setup-interpreter-internal.cc102
-rw-r--r--deps/v8/src/interpreter/setup-interpreter.h33
-rw-r--r--deps/v8/src/intl.h6
-rw-r--r--deps/v8/src/isolate-inl.h22
-rw-r--r--deps/v8/src/isolate.cc535
-rw-r--r--deps/v8/src/isolate.h117
-rw-r--r--deps/v8/src/js/array.js321
-rw-r--r--deps/v8/src/js/intl.js1082
-rw-r--r--deps/v8/src/js/macros.py2
-rw-r--r--deps/v8/src/json-parser.h4
-rw-r--r--deps/v8/src/json-stringifier.cc88
-rw-r--r--deps/v8/src/keys.cc7
-rw-r--r--deps/v8/src/keys.h4
-rw-r--r--deps/v8/src/libplatform/default-platform.cc8
-rw-r--r--deps/v8/src/libplatform/default-platform.h2
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.cc1
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.h2
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.cc2
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.h2
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.h2
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc2
-rw-r--r--deps/v8/src/libplatform/worker-thread.h2
-rw-r--r--deps/v8/src/libsampler/sampler.cc12
-rw-r--r--deps/v8/src/locked-queue.h2
-rw-r--r--deps/v8/src/log-utils.h4
-rw-r--r--deps/v8/src/log.cc64
-rw-r--r--deps/v8/src/log.h50
-rw-r--r--deps/v8/src/lookup.cc27
-rw-r--r--deps/v8/src/lookup.h4
-rw-r--r--deps/v8/src/machine-type.h6
-rw-r--r--deps/v8/src/macro-assembler.h2
-rw-r--r--deps/v8/src/math-random.cc70
-rw-r--r--deps/v8/src/math-random.h33
-rw-r--r--deps/v8/src/maybe-handles-inl.h6
-rw-r--r--deps/v8/src/maybe-handles.h2
-rw-r--r--deps/v8/src/messages.cc96
-rw-r--r--deps/v8/src/messages.h25
-rw-r--r--deps/v8/src/mips/assembler-mips.cc36
-rw-r--r--deps/v8/src/mips/assembler-mips.h9
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc10
-rw-r--r--deps/v8/src/mips/codegen-mips.cc32
-rw-r--r--deps/v8/src/mips/disasm-mips.cc10
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc16
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc42
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h31
-rw-r--r--deps/v8/src/mips/simulator-mips.cc15
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc66
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h18
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc10
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc33
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc16
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc22
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h25
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h119
-rw-r--r--deps/v8/src/objects-body-descriptors.h11
-rw-r--r--deps/v8/src/objects-debug.cc147
-rw-r--r--deps/v8/src/objects-definitions.h568
-rw-r--r--deps/v8/src/objects-inl.h1236
-rw-r--r--deps/v8/src/objects-printer.cc152
-rw-r--r--deps/v8/src/objects.cc814
-rw-r--r--deps/v8/src/objects.h2110
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h197
-rw-r--r--deps/v8/src/objects/allocation-site.h186
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h18
-rw-r--r--deps/v8/src/objects/api-callbacks.h23
-rw-r--r--deps/v8/src/objects/arguments-inl.h3
-rw-r--r--deps/v8/src/objects/arguments.h28
-rw-r--r--deps/v8/src/objects/bigint.cc42
-rw-r--r--deps/v8/src/objects/bigint.h3
-rw-r--r--deps/v8/src/objects/builtin-function-id.h217
-rw-r--r--deps/v8/src/objects/code-inl.h10
-rw-r--r--deps/v8/src/objects/code.h48
-rw-r--r--deps/v8/src/objects/compilation-cache.h11
-rw-r--r--deps/v8/src/objects/debug-objects.cc2
-rw-r--r--deps/v8/src/objects/debug-objects.h3
-rw-r--r--deps/v8/src/objects/dictionary.h12
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h4
-rw-r--r--deps/v8/src/objects/fixed-array.h15
-rw-r--r--deps/v8/src/objects/frame-array.h3
-rw-r--r--deps/v8/src/objects/hash-table-inl.h8
-rw-r--r--deps/v8/src/objects/hash-table.h6
-rw-r--r--deps/v8/src/objects/intl-objects.cc1543
-rw-r--r--deps/v8/src/objects/intl-objects.h276
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h88
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc45
-rw-r--r--deps/v8/src/objects/js-array-buffer.h106
-rw-r--r--deps/v8/src/objects/js-array.h3
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h49
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc170
-rw-r--r--deps/v8/src/objects/js-break-iterator.h87
-rw-r--r--deps/v8/src/objects/js-collator-inl.h12
-rw-r--r--deps/v8/src/objects/js-collator.cc212
-rw-r--r--deps/v8/src/objects/js-collator.h37
-rw-r--r--deps/v8/src/objects/js-collection.h3
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h33
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc980
-rw-r--r--deps/v8/src/objects/js-date-time-format.h100
-rw-r--r--deps/v8/src/objects/js-generator.h2
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h3
-rw-r--r--deps/v8/src/objects/js-list-format.cc12
-rw-r--r--deps/v8/src/objects/js-list-format.h13
-rw-r--r--deps/v8/src/objects/js-locale-inl.h37
-rw-r--r--deps/v8/src/objects/js-locale.cc170
-rw-r--r--deps/v8/src/objects/js-locale.h80
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h58
-rw-r--r--deps/v8/src/objects/js-number-format.cc709
-rw-r--r--deps/v8/src/objects/js-number-format.h135
-rw-r--r--deps/v8/src/objects/js-objects-inl.h904
-rw-r--r--deps/v8/src/objects/js-objects.h1408
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc7
-rw-r--r--deps/v8/src/objects/js-plural-rules.h9
-rw-r--r--deps/v8/src/objects/js-promise.h2
-rw-r--r--deps/v8/src/objects/js-proxy.h4
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h3
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc220
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h21
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h56
-rw-r--r--deps/v8/src/objects/js-segmenter.cc214
-rw-r--r--deps/v8/src/objects/js-segmenter.h118
-rw-r--r--deps/v8/src/objects/map-inl.h51
-rw-r--r--deps/v8/src/objects/map.h36
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h54
-rw-r--r--deps/v8/src/objects/maybe-object.h56
-rw-r--r--deps/v8/src/objects/microtask-queue-inl.h28
-rw-r--r--deps/v8/src/objects/microtask-queue.cc40
-rw-r--r--deps/v8/src/objects/microtask-queue.h55
-rw-r--r--deps/v8/src/objects/module.cc12
-rw-r--r--deps/v8/src/objects/module.h7
-rw-r--r--deps/v8/src/objects/name-inl.h26
-rw-r--r--deps/v8/src/objects/name.h39
-rw-r--r--deps/v8/src/objects/object-macros-undef.h2
-rw-r--r--deps/v8/src/objects/object-macros.h6
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h16
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc2
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h12
-rw-r--r--deps/v8/src/objects/promise.h2
-rw-r--r--deps/v8/src/objects/property-array-inl.h83
-rw-r--r--deps/v8/src/objects/property-array.h73
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h6
-rw-r--r--deps/v8/src/objects/scope-info.cc1
-rw-r--r--deps/v8/src/objects/scope-info.h3
-rw-r--r--deps/v8/src/objects/script.h3
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h69
-rw-r--r--deps/v8/src/objects/shared-function-info.h20
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h38
-rw-r--r--deps/v8/src/objects/stack-frame-info.h62
-rw-r--r--deps/v8/src/objects/string-inl.h26
-rw-r--r--deps/v8/src/objects/string-table.h2
-rw-r--r--deps/v8/src/objects/string.h55
-rw-r--r--deps/v8/src/objects/templates.h3
-rw-r--r--deps/v8/src/optimized-compilation-info.cc86
-rw-r--r--deps/v8/src/optimized-compilation-info.h37
-rw-r--r--deps/v8/src/ostreams.cc7
-rw-r--r--deps/v8/src/ostreams.h6
-rw-r--r--deps/v8/src/parsing/OWNERS1
-rw-r--r--deps/v8/src/parsing/duplicate-finder.h2
-rw-r--r--deps/v8/src/parsing/expression-classifier.h528
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h8
-rw-r--r--deps/v8/src/parsing/parse-info.cc96
-rw-r--r--deps/v8/src/parsing/parse-info.h30
-rw-r--r--deps/v8/src/parsing/parser-base.h1324
-rw-r--r--deps/v8/src/parsing/parser.cc343
-rw-r--r--deps/v8/src/parsing/parser.h109
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc242
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data-impl.h259
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc433
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h163
-rw-r--r--deps/v8/src/parsing/preparser.cc92
-rw-r--r--deps/v8/src/parsing/preparser.h232
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc276
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h3
-rw-r--r--deps/v8/src/parsing/scanner-inl.h530
-rw-r--r--deps/v8/src/parsing/scanner.cc620
-rw-r--r--deps/v8/src/parsing/scanner.h151
-rw-r--r--deps/v8/src/parsing/token.cc1
-rw-r--r--deps/v8/src/parsing/token.h205
-rw-r--r--deps/v8/src/pending-compilation-error-handler.h7
-rw-r--r--deps/v8/src/perf-jit.h2
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc32
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h24
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc10
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc17
-rw-r--r--deps/v8/src/ppc/constants-ppc.h24
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc8
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc14
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc16
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h21
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc44
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h2
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc5
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h2
-rw-r--r--deps/v8/src/profiler/circular-queue-inl.h7
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h6
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc14
-rw-r--r--deps/v8/src/profiler/heap-profiler.h4
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h40
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc1130
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h314
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc46
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h18
-rw-r--r--deps/v8/src/profiler/tick-sample.cc2
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h2
-rw-r--r--deps/v8/src/profiler/unbound-queue.h4
-rw-r--r--deps/v8/src/property-details.h2
-rw-r--r--deps/v8/src/property.cc8
-rw-r--r--deps/v8/src/property.h14
-rw-r--r--deps/v8/src/prototype.h2
-rw-r--r--deps/v8/src/regexp/jsregexp.cc5
-rw-r--r--deps/v8/src/regexp/jsregexp.h179
-rw-r--r--deps/v8/src/regexp/property-sequences.cc1115
-rw-r--r--deps/v8/src/regexp/property-sequences.h28
-rw-r--r--deps/v8/src/regexp/regexp-ast.h15
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc5
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.h121
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc10
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h4
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc149
-rw-r--r--deps/v8/src/regexp/regexp-parser.h12
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h117
-rw-r--r--deps/v8/src/register-configuration.h2
-rw-r--r--deps/v8/src/reloc-info.h3
-rw-r--r--deps/v8/src/roots-inl.h125
-rw-r--r--deps/v8/src/roots.cc54
-rw-r--r--deps/v8/src/roots.h241
-rw-r--r--deps/v8/src/runtime/runtime-array.cc12
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc401
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc8
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc38
-rw-r--r--deps/v8/src/runtime/runtime-date.cc7
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc23
-rw-r--r--deps/v8/src/runtime/runtime-function.cc80
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc3
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc6
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc55
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc1
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc399
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc8
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc72
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc73
-rw-r--r--deps/v8/src/runtime/runtime-object.cc298
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc4
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc4
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc12
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc22
-rw-r--r--deps/v8/src/runtime/runtime-test.cc70
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc4
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc155
-rw-r--r--deps/v8/src/runtime/runtime.cc43
-rw-r--r--deps/v8/src/runtime/runtime.h581
-rw-r--r--deps/v8/src/s390/assembler-s390.cc38
-rw-r--r--deps/v8/src/s390/assembler-s390.h7
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc10
-rw-r--r--deps/v8/src/s390/codegen-s390.cc17
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc14
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc27
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h28
-rw-r--r--deps/v8/src/s390/simulator-s390.cc60
-rw-r--r--deps/v8/src/safepoint-table.h11
-rw-r--r--deps/v8/src/setup-isolate-deserialize.cc13
-rw-r--r--deps/v8/src/setup-isolate-full.cc10
-rw-r--r--deps/v8/src/setup-isolate.h4
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.cc137
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.h33
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc117
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.h11
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.cc62
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.h16
-rw-r--r--deps/v8/src/snapshot/builtin-snapshot-utils.cc67
-rw-r--r--deps/v8/src/snapshot/builtin-snapshot-utils.h56
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc63
-rw-r--r--deps/v8/src/snapshot/code-serializer.h15
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.cc5
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.h2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc36
-rw-r--r--deps/v8/src/snapshot/deserializer.h5
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc97
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc15
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc8
-rw-r--r--deps/v8/src/snapshot/serializer-common.h42
-rw-r--r--deps/v8/src/snapshot/serializer.cc47
-rw-r--r--deps/v8/src/snapshot/serializer.h8
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc122
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h6
-rw-r--r--deps/v8/src/snapshot/snapshot.h34
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc6
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc12
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h10
-rw-r--r--deps/v8/src/splay-tree.h10
-rw-r--r--deps/v8/src/string-constants.cc186
-rw-r--r--deps/v8/src/string-constants.h115
-rw-r--r--deps/v8/src/string-stream.h7
-rw-r--r--deps/v8/src/torque-assembler.h58
-rw-r--r--deps/v8/src/torque/ast.h74
-rw-r--r--deps/v8/src/torque/cfg.cc134
-rw-r--r--deps/v8/src/torque/cfg.h149
-rw-r--r--deps/v8/src/torque/csa-generator.cc487
-rw-r--r--deps/v8/src/torque/csa-generator.h53
-rw-r--r--deps/v8/src/torque/declarable.cc12
-rw-r--r--deps/v8/src/torque/declarable.h84
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc136
-rw-r--r--deps/v8/src/torque/declaration-visitor.h29
-rw-r--r--deps/v8/src/torque/declarations.cc37
-rw-r--r--deps/v8/src/torque/declarations.h16
-rw-r--r--deps/v8/src/torque/file-visitor.cc2
-rw-r--r--deps/v8/src/torque/file-visitor.h4
-rw-r--r--deps/v8/src/torque/global-context.h36
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc1589
-rw-r--r--deps/v8/src/torque/implementation-visitor.h258
-rw-r--r--deps/v8/src/torque/instructions.cc204
-rw-r--r--deps/v8/src/torque/instructions.h354
-rw-r--r--deps/v8/src/torque/source-positions.h2
-rw-r--r--deps/v8/src/torque/torque-parser.cc191
-rw-r--r--deps/v8/src/torque/torque.cc5
-rw-r--r--deps/v8/src/torque/types.cc74
-rw-r--r--deps/v8/src/torque/types.h39
-rw-r--r--deps/v8/src/torque/utils.cc85
-rw-r--r--deps/v8/src/torque/utils.h176
-rw-r--r--deps/v8/src/tracing/trace-event.h13
-rw-r--r--deps/v8/src/transitions-inl.h22
-rw-r--r--deps/v8/src/transitions.cc50
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h10
-rw-r--r--deps/v8/src/turbo-assembler.cc11
-rw-r--r--deps/v8/src/turbo-assembler.h18
-rw-r--r--deps/v8/src/unicode-cache.h2
-rw-r--r--deps/v8/src/unicode-decoder.h2
-rw-r--r--deps/v8/src/unicode.h4
-rw-r--r--deps/v8/src/utils.cc26
-rw-r--r--deps/v8/src/utils.h125
-rw-r--r--deps/v8/src/v8.cc8
-rw-r--r--deps/v8/src/v8threads.h2
-rw-r--r--deps/v8/src/value-serializer.cc55
-rw-r--r--deps/v8/src/vector-slot-pair.cc14
-rw-r--r--deps/v8/src/vector-slot-pair.h10
-rw-r--r--deps/v8/src/visitors.h20
-rw-r--r--deps/v8/src/vm-state.h5
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h34
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h52
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h87
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc2
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h31
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc117
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h17
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h132
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h79
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h34
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h34
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h67
-rw-r--r--deps/v8/src/wasm/decoder.h23
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h53
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc93
-rw-r--r--deps/v8/src/wasm/function-compiler.cc9
-rw-r--r--deps/v8/src/wasm/function-compiler.h3
-rw-r--r--deps/v8/src/wasm/module-compiler.cc348
-rw-r--r--deps/v8/src/wasm/module-compiler.h3
-rw-r--r--deps/v8/src/wasm/module-decoder.cc147
-rw-r--r--deps/v8/src/wasm/object-access.h48
-rw-r--r--deps/v8/src/wasm/value-type.h3
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc451
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h126
-rw-r--r--deps/v8/src/wasm/wasm-constants.h12
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc27
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc25
-rw-r--r--deps/v8/src/wasm/wasm-engine.h12
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc95
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h2
-rw-r--r--deps/v8/src/wasm/wasm-js.cc43
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h41
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc136
-rw-r--r--deps/v8/src/wasm/wasm-memory.h7
-rw-r--r--deps/v8/src/wasm/wasm-module.cc31
-rw-r--r--deps/v8/src/wasm/wasm-module.h44
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h15
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc126
-rw-r--r--deps/v8/src/wasm/wasm-objects.h41
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h3
-rw-r--r--deps/v8/src/wasm/wasm-result.cc2
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc35
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h12
-rw-r--r--deps/v8/src/x64/assembler-x64.cc53
-rw-r--r--deps/v8/src/x64/assembler-x64.h13
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc20
-rw-r--r--deps/v8/src/x64/codegen-x64.cc17
-rw-r--r--deps/v8/src/x64/disasm-x64.cc21
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc14
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc25
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h35
-rw-r--r--deps/v8/src/zone/zone-allocator.h4
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h4
-rw-r--r--deps/v8/src/zone/zone-containers.h4
-rw-r--r--deps/v8/src/zone/zone.cc7
-rw-r--r--deps/v8/src/zone/zone.h17
-rw-r--r--deps/v8/test/benchmarks/testcfg.py2
-rw-r--r--deps/v8/test/cctest/BUILD.gn3
-rw-r--r--deps/v8/test/cctest/cctest.cc21
-rw-r--r--deps/v8/test/cctest/cctest.h45
-rw-r--r--deps/v8/test/cctest/cctest.status36
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h4
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h28
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc44
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc15
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc160
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc54
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc92
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h20
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc7
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc76
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc15
-rw-r--r--deps/v8/test/cctest/heap/test-external-string-tracker.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc89
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc51
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc100
-rw-r--r--deps/v8/test/cctest/heap/test-unmapper.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc90
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden150
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden458
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden220
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden423
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden183
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden86
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden116
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden50
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h2
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc97
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc34
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc66
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc2
-rw-r--r--deps/v8/test/cctest/libsampler/test-sampler.cc2
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc90
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc227
-rw-r--r--deps/v8/test/cctest/print-extension.h4
-rw-r--r--deps/v8/test/cctest/profiler-extension.h4
-rw-r--r--deps/v8/test/cctest/scope-test-helper.h2
-rw-r--r--deps/v8/test/cctest/setup-isolate-for-tests.cc9
-rw-r--r--deps/v8/test/cctest/setup-isolate-for-tests.h4
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-allocation.cc20
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc79
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc152
-rw-r--r--deps/v8/test/cctest/test-api.cc949
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc8
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc20
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc8
-rw-r--r--deps/v8/test/cctest/test-circular-queue.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc40
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc4
-rw-r--r--deps/v8/test/cctest/test-compiler.cc13
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc56
-rw-r--r--deps/v8/test/cctest/test-date.cc4
-rw-r--r--deps/v8/test/cctest/test-debug.cc45
-rw-r--r--deps/v8/test/cctest/test-decls.cc26
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc42
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc13
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc8
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc70
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc31
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc109
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc2
-rw-r--r--deps/v8/test/cctest/test-inspector.cc2
-rw-r--r--deps/v8/test/cctest/test-intl.cc43
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc28
-rw-r--r--deps/v8/test/cctest/test-log.cc25
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc2
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc2
-rw-r--r--deps/v8/test/cctest/test-mementos.cc4
-rw-r--r--deps/v8/test/cctest/test-parsing.cc872
-rw-r--r--deps/v8/test/cctest/test-platform.cc2
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm.cc123
-rw-r--r--deps/v8/test/cctest/test-regexp.cc8
-rw-r--r--deps/v8/test/cctest/test-roots.cc65
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc2
-rw-r--r--deps/v8/test/cctest/test-serialize.cc34
-rw-r--r--deps/v8/test/cctest/test-smi-lexicographic-compare.cc79
-rw-r--r--deps/v8/test/cctest/test-strings.cc118
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc4
-rw-r--r--deps/v8/test/cctest/test-threads.cc2
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc17
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc2
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc21
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc6
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc6
-rw-r--r--deps/v8/test/cctest/testcfg.py12
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc27
-rw-r--r--deps/v8/test/cctest/trace-extension.h4
-rw-r--r--deps/v8/test/cctest/unicode-helpers.cc31
-rw-r--r--deps/v8/test/cctest/unicode-helpers.h27
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc9
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc72
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc262
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc16
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc5
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc6
-rw-r--r--deps/v8/test/cctest/wasm/wasm-atomics-utils.h8
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc28
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h4
-rw-r--r--deps/v8/test/common/assembler-tester.h21
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h1
-rw-r--r--deps/v8/test/debugger/debug/debug-bigint.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-break-class-fields.js139
-rw-r--r--deps/v8/test/debugger/debug/debug-liveedit-recursion.js (renamed from deps/v8/test/debugger/debug/debug-live-edit-recursion.js)0
-rw-r--r--deps/v8/test/debugger/debug/es6/generators-debug-scopes.js3
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js2
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js3
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js22
-rw-r--r--deps/v8/test/debugger/debugger.status19
-rw-r--r--deps/v8/test/debugger/testcfg.py2
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc19
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h2
-rw-r--r--deps/v8/test/inspector/debugger/break-on-exception-compiler-errors-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-liveedit-expected.txt17
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-liveedit.js50
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-set-script-source-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-set-script-source.js33
-rw-r--r--deps/v8/test/inspector/debugger/eval-scopes-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-at-first-module-line-expected.txt11
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-at-first-module-line.js31
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields-expected.txt206
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields.js37
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties.js2
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-promise-rejections-expected.txt22
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-promise-rejections.js68
-rw-r--r--deps/v8/test/inspector/debugger/resources/break-locations-class-fields.js204
-rw-r--r--deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile.js2
-rw-r--r--deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt308
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js123
-rw-r--r--deps/v8/test/inspector/inspector-test.cc13
-rw-r--r--deps/v8/test/inspector/inspector.status21
-rw-r--r--deps/v8/test/inspector/isolate-data.cc2
-rw-r--r--deps/v8/test/inspector/protocol-test.js16
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async.js2
-rw-r--r--deps/v8/test/inspector/runtime/compile-script-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/console-context-expected.txt7
-rw-r--r--deps/v8/test/inspector/runtime/console-time-log-expected.txt37
-rw-r--r--deps/v8/test/inspector/runtime/console-time-log.js29
-rw-r--r--deps/v8/test/inspector/runtime/error-preview-expected.txt8
-rw-r--r--deps/v8/test/inspector/runtime/error-preview.js15
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-unserializable.js2
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js2
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt20
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt12
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js5
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt108
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-expected.txt82
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties.js2
-rw-r--r--deps/v8/test/inspector/runtime/stable-object-id-expected.txt15
-rw-r--r--deps/v8/test/inspector/runtime/stable-object-id.js87
-rw-r--r--deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt18
-rw-r--r--deps/v8/test/inspector/task-runner.h4
-rw-r--r--deps/v8/test/inspector/testcfg.py7
-rw-r--r--deps/v8/test/intl/break-iterator/options.js13
-rw-r--r--deps/v8/test/intl/break-iterator/subclass.js29
-rw-r--r--deps/v8/test/intl/break-iterator/supported-locales-is-method.js (renamed from deps/v8/test/mjsunit/regress/regress-splice-large-index.js)19
-rw-r--r--deps/v8/test/intl/collator/de-sort.js14
-rw-r--r--deps/v8/test/intl/collator/options.js121
-rw-r--r--deps/v8/test/intl/date-format/constructor-order.js100
-rw-r--r--deps/v8/test/intl/date-format/date-format-to-parts.js10
-rw-r--r--deps/v8/test/intl/date-format/format-is-bound.js7
-rw-r--r--deps/v8/test/intl/date-format/resolved-options-unwrap.js11
-rw-r--r--deps/v8/test/intl/date-format/timezone-conversion.js17
-rw-r--r--deps/v8/test/intl/general/getCanonicalLocales.js5
-rw-r--r--deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js2
-rw-r--r--deps/v8/test/intl/general/language_tags_with_preferred_values.js4
-rw-r--r--deps/v8/test/intl/general/supported-locales-of.js90
-rw-r--r--deps/v8/test/intl/intl.status15
-rw-r--r--deps/v8/test/intl/list-format/supported-locale.js19
-rw-r--r--deps/v8/test/intl/locale/locale-properties.js2
-rw-r--r--deps/v8/test/intl/number-format/format-is-bound.js6
-rw-r--r--deps/v8/test/intl/number-format/resolved-options-unwrap.js11
-rw-r--r--deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js12
-rw-r--r--deps/v8/test/intl/plural-rules/check-to-number.js21
-rw-r--r--deps/v8/test/intl/regress-7982.js36
-rw-r--r--deps/v8/test/intl/regress-888299.js7
-rw-r--r--deps/v8/test/intl/relative-time-format/format-en.js23
-rw-r--r--deps/v8/test/intl/relative-time-format/format-to-parts-plural.js28
-rw-r--r--deps/v8/test/intl/relative-time-format/supported-locale.js19
-rw-r--r--deps/v8/test/intl/segmenter/constructor.js216
-rw-r--r--deps/v8/test/intl/segmenter/resolved-options.js299
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-following.js17
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-next.js11
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-preceding.js21
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator.js13
-rw-r--r--deps/v8/test/intl/segmenter/segment.js7
-rw-r--r--deps/v8/test/intl/segmenter/supported-locale.js22
-rw-r--r--deps/v8/test/intl/testcfg.py2
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js11
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js (renamed from deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLarge/run.js)13
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallPacked/run.js (renamed from deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmall/run.js)2
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json101
-rw-r--r--deps/v8/test/js-perf-test/Parsing/arrowfunctions.js12
-rw-r--r--deps/v8/test/js-perf-test/SpreadCallsGeneral/run.js2
-rw-r--r--deps/v8/test/js-perf-test/TurboFan/typedLowering.js4
-rw-r--r--deps/v8/test/message/README.md12
-rw-r--r--deps/v8/test/message/fail/class-fields-computed.js9
-rw-r--r--deps/v8/test/message/fail/class-fields-computed.out5
-rw-r--r--deps/v8/test/message/fail/class-fields-static-throw.js11
-rw-r--r--deps/v8/test/message/fail/class-fields-static-throw.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-throw.js11
-rw-r--r--deps/v8/test/message/fail/class-fields-throw.out7
-rw-r--r--deps/v8/test/message/fail/map-arg-non-iterable.out4
-rw-r--r--deps/v8/test/message/fail/undefined-keyed-property.out4
-rw-r--r--deps/v8/test/message/message.status6
-rw-r--r--deps/v8/test/message/testcfg.py7
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.js2
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.js2
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js2
-rw-r--r--deps/v8/test/mjsunit/array-functions-prototype-misc.js40
-rw-r--r--deps/v8/test/mjsunit/array-splice.js18
-rw-r--r--deps/v8/test/mjsunit/array-unshift.js11
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-1.js31
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-2.js31
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-3.js31
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces.js270
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js44
-rw-r--r--deps/v8/test/mjsunit/code-coverage-class-fields.js199
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js135
-rw-r--r--deps/v8/test/mjsunit/compiler/array-buffer-is-view.js64
-rw-r--r--deps/v8/test/mjsunit/compiler/array-is-array.js105
-rw-r--r--deps/v8/test/mjsunit/compiler/context-sensitivity.js550
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-constant.js173
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-neutered.js376
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-nonconstant.js173
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js53
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-array.js32
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof2.js233
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof3.js233
-rw-r--r--deps/v8/test/mjsunit/compiler/int64.js91
-rw-r--r--deps/v8/test/mjsunit/compiler/math-imul.js76
-rw-r--r--deps/v8/test/mjsunit/compiler/math-max.js38
-rw-r--r--deps/v8/test/mjsunit/compiler/math-min.js38
-rw-r--r--deps/v8/test/mjsunit/compiler/number-abs.js76
-rw-r--r--deps/v8/test/mjsunit/compiler/number-add.js62
-rw-r--r--deps/v8/test/mjsunit/compiler/number-ceil.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/number-comparison-truncations.js152
-rw-r--r--deps/v8/test/mjsunit/compiler/number-divide.js207
-rw-r--r--deps/v8/test/mjsunit/compiler/number-floor.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/number-issafeinteger.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/number-max.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/number-min.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/number-modulus.js256
-rw-r--r--deps/v8/test/mjsunit/compiler/number-round.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/number-subtract.js34
-rw-r--r--deps/v8/test/mjsunit/compiler/number-toboolean.js45
-rw-r--r--deps/v8/test/mjsunit/compiler/number-trunc.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/redundancy-elimination.js194
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-7121.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-884052.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-890057.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-890620.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-895799.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/strict-equal-symbol.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/string-add-try-catch.js57
-rw-r--r--deps/v8/test/mjsunit/compiler/string-from-code-point.js32
-rw-r--r--deps/v8/test/mjsunit/compiler/typed-array-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker-script.js39
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker-script.txt8
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js4
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js4
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker.js24
-rw-r--r--deps/v8/test/mjsunit/es6/array-spread-holey.js52
-rw-r--r--deps/v8/test/mjsunit/es6/proxy-function-tostring.js5
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator2.js26
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator3.js20
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator4.js30
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator5.js15
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator6.js11
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator7.js13
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator8.js14
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js4
-rw-r--r--deps/v8/test/mjsunit/es9/object-spread-ic-dontenum-transition.js26
-rw-r--r--deps/v8/test/mjsunit/es9/object-spread-ic-multiple-transitions.js16
-rw-r--r--deps/v8/test/mjsunit/external-backing-store-gc.js13
-rw-r--r--deps/v8/test/mjsunit/for-in-special-cases.js72
-rw-r--r--deps/v8/test/mjsunit/harmony/async-await-optimization.js124
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-notify.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-value-check.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/add.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/and.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/as-int-n.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/basics.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/comparisons.js26
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/dataview.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/dec.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/div.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/exp.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/inc.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/json.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/mod.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/mul.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/neg.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/not.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/or.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regressions.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/sar.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/shl.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/sub.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/tonumber.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/turbo.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/typedarray.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/xor.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigintarray-keyedstore-tobigint.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/function-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/futex.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/global.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-13.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js266
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js538
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js782
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-invalid.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-sequence.js88
-rw-r--r--deps/v8/test/mjsunit/harmony/to-number.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/to-primitive.js54
-rw-r--r--deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js2575
-rw-r--r--deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js2575
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-616064.js2
-rw-r--r--deps/v8/test/mjsunit/json.js18
-rw-r--r--deps/v8/test/mjsunit/lexicographic-compare.js62
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js10
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status54
-rw-r--r--deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3255.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4271.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4279.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-707066.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8133-1.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8133-2.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-821368.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8237.js57
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8265.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8449.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-883059.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-889722.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-890553.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-892858.js (renamed from deps/v8/test/mjsunit/regress/regress-2185.js)14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-arrow-single-expression-eval.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-380671.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-503578.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-503698.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-503968.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-503991.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-504136.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-504727.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-504729.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-505778.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-506549.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-511880.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-514081.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-518747.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-522496.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-687063.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-722871.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-876443.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-878845.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-879560.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-879898.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-880207.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-882233-1.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-884933.js85
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-885404.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-887891.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-888825.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-890243.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-891627.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-892472-1.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-892472-2.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-897514.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-899524.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-90771.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-7682.js8
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-801850.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-803427.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8059.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808012.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808848.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8094.js30
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8095.js25
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-810973.js5
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-810973b.js1209
-rw-r--r--deps/v8/test/mjsunit/samevalue.js5
-rw-r--r--deps/v8/test/mjsunit/stack-traces-class-fields.js246
-rw-r--r--deps/v8/test/mjsunit/string-trim.js2
-rw-r--r--deps/v8/test/mjsunit/test-async.js2
-rw-r--r--deps/v8/test/mjsunit/testcfg.py66
-rw-r--r--deps/v8/test/mjsunit/typeof.js65
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref.js13
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-64bit.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange-stress.js207
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js213
-rw-r--r--deps/v8/test/mjsunit/wasm/data-segments.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-export.js81
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-import.js96
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-shared.js158
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js930
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-detaching.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js34
-rw-r--r--deps/v8/test/mjsunit/wasm/import-memory.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/import-mutable-global.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-handler-fallback.js21
-rw-r--r--deps/v8/test/mjsunit/wasm/unicode.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js65
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js66
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-interpreter.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-memory.js9
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-module.js2
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc54
-rw-r--r--deps/v8/test/mozilla/mozilla.status15
-rw-r--r--deps/v8/test/mozilla/testcfg.py2
-rw-r--r--deps/v8/test/preparser/testcfg.py2
-rw-r--r--deps/v8/test/test262/harness-agent.js2
-rw-r--r--deps/v8/test/test262/test262.status322
-rw-r--r--deps/v8/test/test262/testcfg.py15
-rw-r--r--deps/v8/test/torque/test-torque.tq274
-rw-r--r--deps/v8/test/unittests/BUILD.gn6
-rw-r--r--deps/v8/test/unittests/allocation-unittest.cc41
-rw-r--r--deps/v8/test/unittests/api/interceptor-unittest.cc2
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc69
-rw-r--r--deps/v8/test/unittests/asmjs/asm-types-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/address-region-unittest.cc66
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/platform/condition-variable-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/region-allocator-unittest.cc356
-rw-r--r--deps/v8/test/unittests/base/threaded-list-unittest.cc309
-rw-r--r--deps/v8/test/unittests/cancelable-tasks-unittest.cc2
-rw-r--r--deps/v8/test/unittests/code-stub-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc590
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc227
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc19
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/instruction-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc19
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc50
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc76
-rw-r--r--deps/v8/test/unittests/compiler/node-cache-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/node-matchers-unittest.cc468
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc51
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h19
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc1170
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/counters-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/bitmap-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc19
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc16
-rw-r--r--deps/v8/test/unittests/heap/heap-controller-unittest.cc60
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc12
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc16
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc11
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/libplatform/default-platform-unittest.cc10
-rw-r--r--deps/v8/test/unittests/libplatform/worker-thread-unittest.cc3
-rw-r--r--deps/v8/test/unittests/object-unittest.cc4
-rw-r--r--deps/v8/test/unittests/objects/microtask-queue-unittest.cc55
-rw-r--r--deps/v8/test/unittests/parser/preparser-unittest.cc2
-rw-r--r--deps/v8/test/unittests/register-configuration-unittest.cc4
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc6
-rw-r--r--deps/v8/test/unittests/source-position-table-unittest.cc4
-rw-r--r--deps/v8/test/unittests/test-helpers.cc32
-rw-r--r--deps/v8/test/unittests/test-helpers.h2
-rw-r--r--deps/v8/test/unittests/test-utils.cc41
-rw-r--r--deps/v8/test/unittests/test-utils.h24
-rw-r--r--deps/v8/test/unittests/unittests.status5
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc20
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc87
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc153
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-unittest.cc4
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc139
-rw-r--r--deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc2
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py2
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/webkit/array-splice.js5
-rw-r--r--deps/v8/test/webkit/fast/js/toString-overrides-expected.txt4
-rw-r--r--deps/v8/test/webkit/string-trim.js2
-rw-r--r--deps/v8/test/webkit/testcfg.py2
-rw-r--r--deps/v8/test/webkit/webkit.status4
-rw-r--r--deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha12
-rw-r--r--deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha12
-rw-r--r--deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h3
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq1119
-rw-r--r--deps/v8/tools/BUILD.gn4
-rwxr-xr-xdeps/v8/tools/bigint-tester.py4
-rw-r--r--deps/v8/tools/blink_tests/TestExpectations2
-rw-r--r--deps/v8/tools/callstats.html7
-rwxr-xr-xdeps/v8/tools/callstats.py37
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py9
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py1
-rwxr-xr-xdeps/v8/tools/grokdump.py6
-rw-r--r--deps/v8/tools/heap-stats/categories.js8
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py3
-rwxr-xr-xdeps/v8/tools/node/update_node.py13
-rwxr-xr-xdeps/v8/tools/perf-to-html.py381
-rw-r--r--deps/v8/tools/profile.js16
-rw-r--r--deps/v8/tools/profview/index.html6
-rw-r--r--deps/v8/tools/profview/profile-utils.js62
-rw-r--r--deps/v8/tools/profview/profview.css53
-rw-r--r--deps/v8/tools/profview/profview.js277
-rwxr-xr-xdeps/v8/tools/release/create_release.py2
-rwxr-xr-xdeps/v8/tools/release/filter_build_files.py2
-rw-r--r--deps/v8/tools/release/git_recipes.py8
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py2
-rwxr-xr-xdeps/v8/tools/run-clang-tidy.py420
-rwxr-xr-xdeps/v8/tools/run_perf.py4
-rw-r--r--deps/v8/tools/sanitizers/tsan_suppressions.txt4
-rw-r--r--deps/v8/tools/test262-results-parser.js41
-rw-r--r--deps/v8/tools/testrunner/base_runner.py15
-rw-r--r--deps/v8/tools/testrunner/local/android.py7
-rw-r--r--deps/v8/tools/testrunner/local/command.py2
-rw-r--r--deps/v8/tools/testrunner/local/pool.py3
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py2
-rw-r--r--deps/v8/tools/testrunner/objects/predictable.py3
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py90
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py7
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py61
-rw-r--r--deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json4
-rwxr-xr-xdeps/v8/tools/try_perf.py2
-rw-r--r--deps/v8/tools/turbolizer/README.md4
-rw-r--r--deps/v8/tools/turbolizer/rollup.config.js5
-rw-r--r--deps/v8/tools/turbolizer/src/graphmultiview.ts5
-rw-r--r--deps/v8/tools/turbolizer/src/sequence-view.ts235
-rw-r--r--deps/v8/tools/turbolizer/src/source-resolver.ts13
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css62
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py23
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json24
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py8
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py83
-rw-r--r--deps/v8/tools/v8heapconst.py463
-rw-r--r--deps/v8/tools/whitespace.txt4
1526 files changed, 78425 insertions, 41694 deletions
diff --git a/deps/v8/.clang-tidy b/deps/v8/.clang-tidy
new file mode 100644
index 0000000000..31d7ddc750
--- /dev/null
+++ b/deps/v8/.clang-tidy
@@ -0,0 +1,20 @@
+---
+---
+ Checks: '-*,
+ modernize-redundant-void-arg,
+ modernize-replace-random-shuffle,
+ modernize-shrink-to-fit,
+ modernize-use-auto,
+ modernize-use-bool-literals,
+ modernize-use-equals-default,
+ modernize-use-equals-delete,
+ modernize-use-nullptr,
+ modernize-use-override,
+ google-build-explicit-make-pair,
+ google-explicit-constructor,
+ google-readability-casting'
+WarningsAsErrors: ''
+HeaderFilterRegex: ''
+AnalyzeTemporaryDtors: false
+...
+
diff --git a/deps/v8/.gitattributes b/deps/v8/.gitattributes
index 7ef1e1b74b..b3e9762a93 100644
--- a/deps/v8/.gitattributes
+++ b/deps/v8/.gitattributes
@@ -3,3 +3,5 @@
# Do not modify line endings for binary files (which are sometimes auto
# detected as text files by git).
*.png binary
+# Don't include minified JS in git grep/diff output
+test/mjsunit/asm/sqlite3/*.js -diff
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 511e24d90c..7f09c89e36 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -73,7 +73,6 @@
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
-/tools/gyp
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 898bc8feae..b935565945 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -132,6 +132,7 @@ Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
Paul Lind <plind44@gmail.com>
+PhistucK <phistuck@gmail.com>
Qingyan Li <qingyan.liqy@alibaba-inc.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>
@@ -162,6 +163,7 @@ Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Xiaoyin Liu <xiaoyin.l@outlook.com>
+Yannic Bonenberger <contact@yannic-bonenberger.com>
Yong Wang <ccyongwang@tencent.com>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 7e3406fb67..83f1fdb0bf 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -76,9 +76,6 @@ declare_args() {
v8_enable_embedded_builtins = v8_use_snapshot && v8_current_cpu != "x86" &&
!is_aix && (!is_win || is_clang)
- # Enable embedded bytecode handlers.
- v8_enable_embedded_bytecode_handlers = false
-
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@@ -164,6 +161,17 @@ declare_args() {
# setting the "check_v8_header_includes" gclient variable to run a
# specific hook).
v8_check_header_includes = false
+
+ # We reuse the snapshot toolchain for building torque and other generators to
+ # avoid building v8_libbase on the host more than once. On mips with big
+ # endian, the snapshot toolchain is the target toolchain and, hence, can't be
+ # used.
+}
+
+v8_generator_toolchain = v8_snapshot_toolchain
+if (host_cpu == "x64" &&
+ (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
+ v8_generator_toolchain = "//build/toolchain/linux:clang_x64"
}
# Derived defaults.
@@ -197,9 +205,6 @@ assert(
!v8_untrusted_code_mitigations,
"Embedded builtins on ia32 and untrusted code mitigations are incompatible")
-assert(!v8_enable_embedded_bytecode_handlers || v8_enable_embedded_builtins,
- "Embedded bytecode handlers only work with embedded builtins")
-
# Specifies if the target build is a simulator build. Comparing target cpu
# with v8 target cpu to not affect simulator builds for making cross-compile
# snapshots.
@@ -377,10 +382,10 @@ config("features") {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
if (v8_enable_embedded_builtins) {
- defines += [ "V8_EMBEDDED_BUILTINS" ]
- }
- if (v8_enable_embedded_bytecode_handlers) {
- defines += [ "V8_EMBEDDED_BYTECODE_HANDLERS" ]
+ defines += [
+ "V8_EMBEDDED_BUILTINS",
+ "V8_EMBEDDED_BYTECODE_HANDLERS",
+ ]
}
if (v8_use_multi_snapshots) {
defines += [ "V8_MULTI_SNAPSHOTS" ]
@@ -849,6 +854,8 @@ action("postmortem-metadata") {
sources = [
"src/objects.h",
"src/objects-inl.h",
+ "src/objects/allocation-site-inl.h",
+ "src/objects/allocation-site.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/data-handler.h",
@@ -859,6 +866,8 @@ action("postmortem-metadata") {
"src/objects/js-array.h",
"src/objects/js-array-buffer-inl.h",
"src/objects/js-array-buffer.h",
+ "src/objects/js-objects-inl.h",
+ "src/objects/js-objects.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/js-regexp-string-iterator-inl.h",
@@ -889,7 +898,10 @@ torque_files = [
"src/builtins/array.tq",
"src/builtins/array-copywithin.tq",
"src/builtins/array-foreach.tq",
+ "src/builtins/array-lastindexof.tq",
"src/builtins/array-reverse.tq",
+ "src/builtins/array-splice.tq",
+ "src/builtins/array-unshift.tq",
"src/builtins/typed-array.tq",
"src/builtins/data-view.tq",
"test/torque/test-torque.tq",
@@ -911,17 +923,8 @@ action("run_torque") {
"test/cctest/:*",
]
- # We reuse the snapshot toolchain for building torque to not build v8_libbase
- # on the host more than once. On mips with big endian, the snapshot toolchain
- # is the target toolchain and, hence, can't be used.
- v8_torque_toolchain = v8_snapshot_toolchain
- if (host_cpu == "x64" &&
- (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
- v8_torque_toolchain = "//build/toolchain/linux:clang_x64"
- }
-
deps = [
- ":torque($v8_torque_toolchain)",
+ ":torque($v8_generator_toolchain)",
]
script = "tools/run.py"
@@ -939,7 +942,7 @@ action("run_torque") {
}
args = [
- "./" + rebase_path(get_label_info(":torque($v8_torque_toolchain)",
+ "./" + rebase_path(get_label_info(":torque($v8_generator_toolchain)",
"root_out_dir") + "/torque",
root_build_dir),
"-o",
@@ -969,6 +972,7 @@ v8_source_set("torque_generated_initializers") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
+ ":generate_bytecode_builtins_list",
":run_torque",
]
@@ -989,6 +993,24 @@ v8_source_set("torque_generated_initializers") {
configs = [ ":internal_config" ]
}
+action("generate_bytecode_builtins_list") {
+ script = "tools/run.py"
+ outputs = [
+ "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
+ ]
+ deps = [
+ ":bytecode_builtins_list_generator($v8_generator_toolchain)",
+ ]
+ args = [
+ "./" + rebase_path(
+ get_label_info(
+ ":bytecode_builtins_list_generator($v8_generator_toolchain)",
+ "root_out_dir") + "/bytecode_builtins_list_generator",
+ root_build_dir),
+ rebase_path("$target_gen_dir/builtins-generated/bytecodes-builtins-list.h"),
+ ]
+}
+
# Template to generate different V8 snapshots based on different runtime flags.
# Can be invoked with run_mksnapshot(<name>). The target will resolve to
# run_mksnapshot_<name>. If <name> is "default", no file suffixes will be used.
@@ -1382,8 +1404,6 @@ v8_source_set("v8_initializers") {
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h",
- "src/interpreter/setup-interpreter-internal.cc",
- "src/interpreter/setup-interpreter.h",
]
if (use_jumbo_build == true) {
@@ -1485,6 +1505,7 @@ v8_header_set("v8_headers") {
configs = [ ":internal_config" ]
sources = [
+ "include/v8-internal.h",
"include/v8.h",
"include/v8config.h",
]
@@ -1504,8 +1525,10 @@ v8_source_set("v8_base") {
"//base/trace_event/common/trace_event_common.h",
### gcmole(all) ###
+ "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
+ "include/v8-internal.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
@@ -1516,6 +1539,7 @@ v8_source_set("v8_base") {
"src/accessors.h",
"src/address-map.cc",
"src/address-map.h",
+ "src/allocation-site-scopes-inl.h",
"src/allocation-site-scopes.h",
"src/allocation.cc",
"src/allocation.h",
@@ -1562,6 +1586,7 @@ v8_source_set("v8_base") {
"src/ast/modules.h",
"src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h",
+ "src/ast/scopes-inl.h",
"src/ast/scopes.cc",
"src/ast/scopes.h",
"src/ast/variables.cc",
@@ -1599,7 +1624,6 @@ v8_source_set("v8_base") {
"src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-intl.cc",
- "src/builtins/builtins-intl.h",
"src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
@@ -1813,6 +1837,7 @@ v8_source_set("v8_base") {
"src/compiler/operator.h",
"src/compiler/osr.cc",
"src/compiler/osr.h",
+ "src/compiler/per-isolate-compiler-cache.h",
"src/compiler/persistent-map.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
@@ -1824,6 +1849,8 @@ v8_source_set("v8_base") {
"src/compiler/raw-machine-assembler.h",
"src/compiler/redundancy-elimination.cc",
"src/compiler/redundancy-elimination.h",
+ "src/compiler/refs-map.cc",
+ "src/compiler/refs-map.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
"src/compiler/register-allocator.cc",
@@ -2140,6 +2167,8 @@ v8_source_set("v8_base") {
"src/macro-assembler.h",
"src/map-updater.cc",
"src/map-updater.h",
+ "src/math-random.cc",
+ "src/math-random.h",
"src/maybe-handles-inl.h",
"src/maybe-handles.h",
"src/messages.cc",
@@ -2158,6 +2187,7 @@ v8_source_set("v8_base") {
"src/objects/arguments.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
+ "src/objects/builtin-function-id.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/compilation-cache-inl.h",
@@ -2181,11 +2211,17 @@ v8_source_set("v8_base") {
"src/objects/js-array-buffer.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
+ "src/objects/js-break-iterator-inl.h",
+ "src/objects/js-break-iterator.cc",
+ "src/objects/js-break-iterator.h",
"src/objects/js-collator-inl.h",
"src/objects/js-collator.cc",
"src/objects/js-collator.h",
"src/objects/js-collection-inl.h",
"src/objects/js-collection.h",
+ "src/objects/js-date-time-format-inl.h",
+ "src/objects/js-date-time-format.cc",
+ "src/objects/js-date-time-format.h",
"src/objects/js-generator-inl.h",
"src/objects/js-generator.h",
"src/objects/js-list-format-inl.h",
@@ -2194,6 +2230,11 @@ v8_source_set("v8_base") {
"src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
"src/objects/js-locale.h",
+ "src/objects/js-number-format-inl.h",
+ "src/objects/js-number-format.cc",
+ "src/objects/js-number-format.h",
+ "src/objects/js-objects-inl.h",
+ "src/objects/js-objects.h",
"src/objects/js-plural-rules-inl.h",
"src/objects/js-plural-rules.cc",
"src/objects/js-plural-rules.h",
@@ -2208,6 +2249,9 @@ v8_source_set("v8_base") {
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
+ "src/objects/js-segmenter-inl.h",
+ "src/objects/js-segmenter.cc",
+ "src/objects/js-segmenter.h",
"src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
@@ -2218,6 +2262,9 @@ v8_source_set("v8_base") {
"src/objects/maybe-object-inl.h",
"src/objects/maybe-object.h",
"src/objects/microtask-inl.h",
+ "src/objects/microtask-queue-inl.h",
+ "src/objects/microtask-queue.cc",
+ "src/objects/microtask-queue.h",
"src/objects/microtask.h",
"src/objects/module-inl.h",
"src/objects/module.cc",
@@ -2231,6 +2278,8 @@ v8_source_set("v8_base") {
"src/objects/ordered-hash-table.h",
"src/objects/promise-inl.h",
"src/objects/promise.h",
+ "src/objects/property-array-inl.h",
+ "src/objects/property-array.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
"src/objects/prototype-info-inl.h",
@@ -2242,6 +2291,8 @@ v8_source_set("v8_base") {
"src/objects/script.h",
"src/objects/shared-function-info-inl.h",
"src/objects/shared-function-info.h",
+ "src/objects/stack-frame-info-inl.h",
+ "src/objects/stack-frame-info.h",
"src/objects/string-inl.h",
"src/objects/string-table.h",
"src/objects/string.h",
@@ -2267,6 +2318,7 @@ v8_source_set("v8_base") {
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
"src/parsing/pattern-rewriter.cc",
+ "src/parsing/preparsed-scope-data-impl.h",
"src/parsing/preparsed-scope-data.cc",
"src/parsing/preparsed-scope-data.h",
"src/parsing/preparser-logger.h",
@@ -2323,6 +2375,8 @@ v8_source_set("v8_base") {
"src/regexp/jsregexp-inl.h",
"src/regexp/jsregexp.cc",
"src/regexp/jsregexp.h",
+ "src/regexp/property-sequences.cc",
+ "src/regexp/property-sequences.h",
"src/regexp/regexp-ast.cc",
"src/regexp/regexp-ast.h",
"src/regexp/regexp-macro-assembler-irregexp-inl.h",
@@ -2344,6 +2398,7 @@ v8_source_set("v8_base") {
"src/reloc-info.cc",
"src/reloc-info.h",
"src/roots-inl.h",
+ "src/roots.cc",
"src/roots.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
@@ -2363,7 +2418,6 @@ v8_source_set("v8_base") {
"src/runtime/runtime-interpreter.cc",
"src/runtime/runtime-intl.cc",
"src/runtime/runtime-literals.cc",
- "src/runtime/runtime-maths.cc",
"src/runtime/runtime-module.cc",
"src/runtime/runtime-numbers.cc",
"src/runtime/runtime-object.cc",
@@ -2395,8 +2449,6 @@ v8_source_set("v8_base") {
"src/snapshot/builtin-serializer-allocator.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
- "src/snapshot/builtin-snapshot-utils.cc",
- "src/snapshot/builtin-snapshot-utils.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-deserializer-allocator.cc",
@@ -2439,6 +2491,8 @@ v8_source_set("v8_base") {
"src/string-builder.cc",
"src/string-case.cc",
"src/string-case.h",
+ "src/string-constants.cc",
+ "src/string-constants.h",
"src/string-hasher-inl.h",
"src/string-hasher.h",
"src/string-search.h",
@@ -2447,6 +2501,7 @@ v8_source_set("v8_base") {
"src/strtod.cc",
"src/strtod.h",
"src/third_party/utf8-decoder/utf8-decoder.h",
+ "src/torque-assembler.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/tracing/traced-value.cc",
@@ -2518,6 +2573,7 @@ v8_source_set("v8_base") {
"src/wasm/module-compiler.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
+ "src/wasm/object-access.h",
"src/wasm/signature-map.cc",
"src/wasm/signature-map.h",
"src/wasm/streaming-decoder.cc",
@@ -2869,6 +2925,7 @@ v8_source_set("v8_base") {
defines = []
deps = [
+ ":generate_bytecode_builtins_list",
":torque_generated_core",
":v8_headers",
":v8_libbase",
@@ -2886,28 +2943,39 @@ v8_source_set("v8_base") {
} else {
sources -= [
"src/builtins/builtins-intl.cc",
- "src/builtins/builtins-intl.h",
"src/char-predicates.cc",
"src/intl.cc",
"src/intl.h",
"src/objects/intl-objects-inl.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
+ "src/objects/js-break-iterator-inl.h",
+ "src/objects/js-break-iterator.cc",
+ "src/objects/js-break-iterator.h",
"src/objects/js-collator-inl.h",
"src/objects/js-collator.cc",
"src/objects/js-collator.h",
+ "src/objects/js-date-time-format-inl.h",
+ "src/objects/js-date-time-format.cc",
+ "src/objects/js-date-time-format.h",
"src/objects/js-list-format-inl.h",
"src/objects/js-list-format.cc",
"src/objects/js-list-format.h",
"src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
"src/objects/js-locale.h",
+ "src/objects/js-number-format-inl.h",
+ "src/objects/js-number-format.cc",
+ "src/objects/js-number-format.h",
"src/objects/js-plural-rules-inl.h",
"src/objects/js-plural-rules.cc",
"src/objects/js-plural-rules.h",
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
+ "src/objects/js-segmenter-inl.h",
+ "src/objects/js-segmenter.cc",
+ "src/objects/js-segmenter.h",
"src/runtime/runtime-intl.cc",
]
}
@@ -2916,6 +2984,15 @@ v8_source_set("v8_base") {
sources += [ "$target_gen_dir/debug-support.cc" ]
deps += [ ":postmortem-metadata" ]
}
+
+ # Platforms that don't have CAS support need to link atomic library
+ # to implement atomic memory access
+ if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
+ v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
+ v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
+ v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ libs = [ "atomic" ]
+ }
}
v8_source_set("torque_base") {
@@ -2923,7 +3000,11 @@ v8_source_set("torque_base") {
sources = [
"src/torque/ast.h",
+ "src/torque/cfg.cc",
+ "src/torque/cfg.h",
"src/torque/contextual.h",
+ "src/torque/csa-generator.cc",
+ "src/torque/csa-generator.h",
"src/torque/declarable.cc",
"src/torque/declarable.h",
"src/torque/declaration-visitor.cc",
@@ -2937,6 +3018,8 @@ v8_source_set("torque_base") {
"src/torque/global-context.h",
"src/torque/implementation-visitor.cc",
"src/torque/implementation-visitor.h",
+ "src/torque/instructions.cc",
+ "src/torque/instructions.h",
"src/torque/scope.cc",
"src/torque/scope.h",
"src/torque/source-positions.cc",
@@ -2956,11 +3039,15 @@ v8_source_set("torque_base") {
]
configs = [ ":internal_config" ]
+ if (is_win && is_asan) {
+ remove_configs = [ "//build/config/sanitizers:default_sanitizer_flags" ]
+ }
}
v8_component("v8_libbase") {
sources = [
"src/base/adapters.h",
+ "src/base/address-region.h",
"src/base/atomic-utils.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_atomicword_compat.h",
@@ -2969,6 +3056,8 @@ v8_component("v8_libbase") {
"src/base/base-export.h",
"src/base/bits.cc",
"src/base/bits.h",
+ "src/base/bounded-page-allocator.cc",
+ "src/base/bounded-page-allocator.h",
"src/base/build_config.h",
"src/base/compiler-specific.h",
"src/base/cpu.cc",
@@ -2994,6 +3083,8 @@ v8_component("v8_libbase") {
"src/base/list.h",
"src/base/logging.cc",
"src/base/logging.h",
+ "src/base/lsan-page-allocator.cc",
+ "src/base/lsan-page-allocator.h",
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
@@ -3010,6 +3101,8 @@ v8_component("v8_libbase") {
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
+ "src/base/region-allocator.cc",
+ "src/base/region-allocator.h",
"src/base/ring-buffer.h",
"src/base/safe_conversions.h",
"src/base/safe_conversions_impl.h",
@@ -3237,6 +3330,29 @@ if (v8_monolithic) {
# Executables
#
+if (current_toolchain == v8_generator_toolchain) {
+ v8_executable("bytecode_builtins_list_generator") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ include_dirs = [ "." ]
+
+ sources = [
+ "src/builtins/generate-bytecodes-builtins-list.cc",
+ "src/interpreter/bytecode-operands.cc",
+ "src/interpreter/bytecode-operands.h",
+ "src/interpreter/bytecodes.cc",
+ "src/interpreter/bytecodes.h",
+ ]
+
+ configs = [ ":internal_config" ]
+
+ deps = [
+ ":v8_libbase",
+ "//build/win:default_exe_manifest",
+ ]
+ }
+}
+
if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
v8_executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -3272,6 +3388,9 @@ if (current_toolchain == v8_snapshot_toolchain) {
]
configs = [ ":internal_config" ]
+ if (is_win && is_asan) {
+ remove_configs = [ "//build/config/sanitizers:default_sanitizer_flags" ]
+ }
}
}
@@ -3325,11 +3444,32 @@ group("v8_clusterfuzz") {
}
group("v8_archive") {
+ testonly = true
+
deps = [
":d8",
+ "test/cctest:cctest",
]
}
+# TODO(dglazkov): Remove the "!build_with_chromium" condition once this clause
+# is removed from Chromium.
+if (is_fuchsia && !build_with_chromium) {
+ import("//build/config/fuchsia/rules.gni")
+
+ fuchsia_package("d8_fuchsia_pkg") {
+ testonly = true
+ binary = ":d8"
+ package_name_override = "d8"
+ }
+
+ fuchsia_package_runner("d8_fuchsia") {
+ testonly = true
+ package = ":d8_fuchsia_pkg"
+ package_name_override = "d8"
+ }
+}
+
group("v8_fuzzers") {
testonly = true
data_deps = [
@@ -3636,6 +3776,7 @@ v8_source_set("wasm_module_runner") {
]
deps = [
+ ":generate_bytecode_builtins_list",
":torque_generated_core",
]
@@ -3719,6 +3860,7 @@ v8_source_set("lib_wasm_fuzzer_common") {
]
deps = [
+ ":generate_bytecode_builtins_list",
":torque_generated_core",
]
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 428325ad58..a3377ab473 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1643 @@
+2018-10-09: Version 7.1.302
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.301
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.300
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.299
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.298
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.297
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.296
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.295
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.294
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.293
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.292
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.291
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.290
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.289
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.288
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.287
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.286
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.285
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.284
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-08: Version 7.1.283
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-07: Version 7.1.282
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-07: Version 7.1.281
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-07: Version 7.1.280
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-06: Version 7.1.279
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-06: Version 7.1.278
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-06: Version 7.1.277
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-06: Version 7.1.276
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-06: Version 7.1.275
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.274
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.273
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.272
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.271
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.270
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.269
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.268
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.267
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.266
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.265
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.264
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.263
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.262
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.261
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-05: Version 7.1.260
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.259
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.258
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.257
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.256
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.255
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.254
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.253
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.252
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.251
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.250
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.249
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.248
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.247
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-04: Version 7.1.246
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.245
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.244
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.243
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.242
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.241
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.240
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.239
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.238
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.237
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.236
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-03: Version 7.1.235
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.234
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.233
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.232
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.231
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.230
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.229
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.228
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-02: Version 7.1.227
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.226
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.225
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.224
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.223
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.222
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.221
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.220
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-01: Version 7.1.219
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-30: Version 7.1.218
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-29: Version 7.1.217
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-29: Version 7.1.216
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-29: Version 7.1.215
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.214
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.213
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.212
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.211
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.210
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.209
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.208
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.207
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.206
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.205
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.204
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.203
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-28: Version 7.1.202
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.201
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.200
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.199
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.198
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.197
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.196
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.195
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.194
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.193
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.192
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.191
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.190
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.189
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.188
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.187
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-27: Version 7.1.186
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-26: Version 7.1.185
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-26: Version 7.1.184
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-26: Version 7.1.183
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-26: Version 7.1.182
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-26: Version 7.1.181
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-26: Version 7.1.180
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-26: Version 7.1.179
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-25: Version 7.1.178
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-25: Version 7.1.177
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-25: Version 7.1.176
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-25: Version 7.1.175
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-25: Version 7.1.174
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-25: Version 7.1.173
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-25: Version 7.1.172
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.171
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.170
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.169
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.168
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.167
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.166
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.165
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-24: Version 7.1.164
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.163
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.162
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.161
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.160
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.159
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.158
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.157
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.156
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.155
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.154
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.153
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.152
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.151
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.150
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-21: Version 7.1.149
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-20: Version 7.1.148
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.147
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.146
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.145
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.144
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.143
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.142
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.141
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.140
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.139
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.138
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.137
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.136
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.135
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.134
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-19: Version 7.1.133
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.132
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.131
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.130
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.129
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.128
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.127
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.126
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.125
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.124
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.123
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.122
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-18: Version 7.1.121
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-17: Version 7.1.120
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-17: Version 7.1.119
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-17: Version 7.1.118
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-17: Version 7.1.117
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-17: Version 7.1.116
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-17: Version 7.1.115
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-17: Version 7.1.114
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.113
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.112
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.111
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.110
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.109
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.108
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.107
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.106
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.105
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.104
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.103
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.102
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.101
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.100
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-14: Version 7.1.99
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-13: Version 7.1.98
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-13: Version 7.1.97
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-13: Version 7.1.96
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-13: Version 7.1.95
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-13: Version 7.1.94
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-13: Version 7.1.93
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-13: Version 7.1.92
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-12: Version 7.1.91
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-12: Version 7.1.90
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-12: Version 7.1.89
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-12: Version 7.1.88
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-12: Version 7.1.87
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-12: Version 7.1.86
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-11: Version 7.1.85
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-11: Version 7.1.84
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-11: Version 7.1.83
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-11: Version 7.1.82
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-11: Version 7.1.81
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-11: Version 7.1.80
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.79
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.78
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.77
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.76
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.75
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.74
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.73
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.72
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-10: Version 7.1.71
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-09: Version 7.1.70
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-08: Version 7.1.69
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.68
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.67
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.66
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.65
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.64
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.63
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.62
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.61
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.60
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.59
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.58
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.57
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-07: Version 7.1.56
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.55
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.54
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.53
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.52
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.51
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.50
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.49
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.48
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.47
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.46
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.45
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.44
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.43
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.42
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-06: Version 7.1.41
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.40
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.39
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.38
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.37
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.36
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.35
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.34
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.33
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.32
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.31
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.30
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.29
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.28
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-05: Version 7.1.27
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.26
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.25
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.24
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.23
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.22
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.21
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.20
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.19
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.18
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.17
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-04: Version 7.1.16
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-03: Version 7.1.15
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-03: Version 7.1.14
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-03: Version 7.1.13
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-03: Version 7.1.12
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-03: Version 7.1.11
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-03: Version 7.1.10
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-02: Version 7.1.9
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-02: Version 7.1.8
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-02: Version 7.1.7
+
+ Performance and stability improvements on all platforms.
+
+
+2018-09-01: Version 7.1.6
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-31: Version 7.1.5
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-31: Version 7.1.4
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-31: Version 7.1.3
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-31: Version 7.1.2
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-31: Version 7.1.1
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.302
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.301
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.300
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.299
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.298
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.297
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.296
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.295
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.294
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.293
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.292
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.291
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.290
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.289
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-29: Version 7.0.288
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.287
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.286
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.285
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.284
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.283
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.282
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.281
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.280
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.279
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.278
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-28: Version 7.0.277
+
+ Performance and stability improvements on all platforms.
+
+
2018-08-27: Version 7.0.276
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 42e7a40baa..fdd96f9b82 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -13,15 +13,13 @@ vars = {
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dd6b994b32b498e9e766ce60c44da0aec3a2a188',
- 'v8/tools/gyp':
- Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'a7674eacc34947257c78fe6ba5cf0da17f60696c',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'aaf2cc09c6874e394c6c1e4692360cc400d6b388',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '71e3be7a50c21faeee91ed99a8d5addfb7594e7c',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a191af9d025859e8368b8b469120d78006e9f5f6',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c52a2a250d6c5f5cbdd015dff36af7c5d0ae1150',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'd8cf40c4592dcec7fb01fcbdf1f6d4958b3fbf11',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a90cbf3b4216430a437991fb53ede8e048dea454',
'v8/buildtools':
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2dff9c9c74e9d732e6fe57c84ef7fd044cc45d96',
'v8/base/trace_event/common':
@@ -35,7 +33,7 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'bc2c0a9307285fa36e03e7cdb6bf8623390ff855',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '9ec8468cfde0868ce5f3893e819087278c5af988',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -43,17 +41,15 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '3ec92c896bcbddc46e2a073ebfdd25aa1194656e',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '6e1868c9083769d489d3fc25657339d50c22b1d8',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'd5266326752f0a1dadbd310932d8f4fd8c3c5e7d',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '2e68926a9d4929e9289373cd49e40ddcb9a628f7',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
- 'v8/third_party/proguard':
- Var('chromium_url') + '/chromium/src/third_party/proguard.git' + '@' + 'a3729bea473bb5ffc5eaf289f5733bc5e2861c07',
'v8/tools/swarming_client':
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '486c9b53c4d54dd4b95bb6ce0e31160e600dfc11',
'v8/test/benchmarks/data':
@@ -61,25 +57,35 @@ deps = {
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a6c1d05ac4fed084fa047e4c52ab2a8c9c2a8aef',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '00cfe1628cc03164dcf03f01ba9c84376e9be735',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
- 'v8/third_party/qemu': {
+ 'v8/third_party/qemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/qemu/linux-amd64',
'version': '9cc486c5b18a0be515c39a280ca9a309c54cf994'
},
],
- 'condition': 'checkout_fuchsia',
+ 'condition': 'host_os == "linux" and checkout_fuchsia',
+ 'dep_type': 'cipd',
+ },
+ 'v8/third_party/qemu-mac-x64': {
+ 'packages': [
+ {
+ 'package': 'fuchsia/qemu/mac-amd64',
+ 'version': '2d3358ae9a569b2d4a474f498b32b202a152134f'
+ },
+ ],
+ 'condition': 'host_os == "mac" and checkout_fuchsia',
'dep_type': 'cipd',
},
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'bb4146fb8a9dde405b71914657bb461dc93912ab',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '7792d28b069af6dd3a86d1ba83b7f5c4ede605dc',
'v8/tools/luci-go':
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '445d7c4b6a4f10e188edb395b132e3996b127691',
'v8/test/wasm-js':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '240ea673de6e75d78ae472f66127301ecab22a99',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'db9cd40808a90ecc5f4a23e88fb375c8f60b8d52',
}
recursedeps = [
@@ -344,13 +350,6 @@ hooks = [
'condition': 'checkout_win',
'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
},
- {
- # Update the Mac toolchain if necessary.
- 'name': 'mac_toolchain',
- 'pattern': '.',
- 'condition': 'checkout_mac',
- 'action': ['python', 'v8/build/mac_toolchain.py'],
- },
# Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise.
# https://code.google.com/p/chromium/issues/detail?id=352046
@@ -388,6 +387,23 @@ hooks = [
],
},
{
+ # Mac doesn't use lld so it's not included in the default clang bundle
+ # there. However, lld is need in Fuchsia cross builds, so
+ # download it there.
+ # Should run after the clang hook.
+ 'name': 'lld/mac',
+ 'pattern': '.',
+ 'condition': 'host_os == "mac" and checkout_fuchsia',
+ 'action': ['python', 'v8/tools/clang/scripts/download_lld_mac.py'],
+ },
+ {
+ # Mac does not have llvm-objdump, download it for cross builds in Fuchsia.
+ 'name': 'llvm-objdump',
+ 'pattern': '.',
+ 'condition': 'host_os == "mac" and checkout_fuchsia',
+ 'action': ['python', 'v8/tools/clang/scripts/download_objdump.py'],
+ },
+ {
'name': 'mips_toolchain',
'pattern': '.',
'condition': 'download_mips_toolchain',
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index bd780ce62f..88a64e727b 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -73,9 +73,10 @@ def _V8PresubmitChecks(input_api, output_api):
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
- from presubmit import CppLintProcessor
- from presubmit import SourceProcessor
- from presubmit import StatusFilesProcessor
+ from v8_presubmit import CppLintProcessor
+ from v8_presubmit import TorqueFormatProcessor
+ from v8_presubmit import SourceProcessor
+ from v8_presubmit import StatusFilesProcessor
def FilterFile(affected_file):
return input_api.FilterSourceFile(
@@ -83,10 +84,19 @@ def _V8PresubmitChecks(input_api, output_api):
white_list=None,
black_list=_NO_LINT_PATHS)
+ def FilterTorqueFile(affected_file):
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=(r'.+\.tq'))
+
results = []
if not CppLintProcessor().RunOnFiles(
input_api.AffectedFiles(file_filter=FilterFile, include_deletes=False)):
results.append(output_api.PresubmitError("C++ lint check failed"))
+ if not TorqueFormatProcessor().RunOnFiles(
+ input_api.AffectedFiles(file_filter=FilterTorqueFile,
+ include_deletes=False)):
+ results.append(output_api.PresubmitError("Torque format check failed"))
if not SourceProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError(
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index a5312f76ac..6cb248f160 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -143,7 +143,15 @@ template("v8_source_set") {
}
}
target(link_target_type, target_name) {
- forward_variables_from(invoker, "*", [ "configs" ])
+ forward_variables_from(invoker,
+ "*",
+ [
+ "configs",
+ "remove_configs",
+ ])
+ if (defined(invoker.remove_configs)) {
+ configs -= invoker.remove_configs
+ }
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index e430e7c350..250d5fbdb9 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -35,7 +35,7 @@ class V8_PLATFORM_EXPORT TraceObject {
const char* as_string;
};
- TraceObject() {}
+ TraceObject() = default;
~TraceObject();
void Initialize(
char phase, const uint8_t* category_enabled_flag, const char* name,
@@ -106,8 +106,8 @@ class V8_PLATFORM_EXPORT TraceObject {
class V8_PLATFORM_EXPORT TraceWriter {
public:
- TraceWriter() {}
- virtual ~TraceWriter() {}
+ TraceWriter() = default;
+ virtual ~TraceWriter() = default;
virtual void AppendTraceEvent(TraceObject* trace_event) = 0;
virtual void Flush() = 0;
@@ -147,8 +147,8 @@ class V8_PLATFORM_EXPORT TraceBufferChunk {
class V8_PLATFORM_EXPORT TraceBuffer {
public:
- TraceBuffer() {}
- virtual ~TraceBuffer() {}
+ TraceBuffer() = default;
+ virtual ~TraceBuffer() = default;
virtual TraceObject* AddTraceEvent(uint64_t* handle) = 0;
virtual TraceObject* GetEventByHandle(uint64_t handle) = 0;
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index e06963949a..f0a8b5f163 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -62,7 +62,7 @@ class V8_EXPORT StringView {
class V8_EXPORT StringBuffer {
public:
- virtual ~StringBuffer() {}
+ virtual ~StringBuffer() = default;
virtual const StringView& string() = 0;
// This method copies contents.
static std::unique_ptr<StringBuffer> create(const StringView&);
@@ -107,7 +107,7 @@ class V8_EXPORT V8StackTrace {
virtual StringView topScriptId() const = 0;
virtual StringView topFunctionName() const = 0;
- virtual ~V8StackTrace() {}
+ virtual ~V8StackTrace() = default;
virtual std::unique_ptr<protocol::Runtime::API::StackTrace>
buildInspectorObject() const = 0;
virtual std::unique_ptr<StringBuffer> toString() const = 0;
@@ -118,13 +118,13 @@ class V8_EXPORT V8StackTrace {
class V8_EXPORT V8InspectorSession {
public:
- virtual ~V8InspectorSession() {}
+ virtual ~V8InspectorSession() = default;
// Cross-context inspectable values (DOM nodes in different worlds, etc.).
class V8_EXPORT Inspectable {
public:
virtual v8::Local<v8::Value> get(v8::Local<v8::Context>) = 0;
- virtual ~Inspectable() {}
+ virtual ~Inspectable() = default;
};
virtual void addInspectedObject(std::unique_ptr<Inspectable>) = 0;
@@ -162,7 +162,7 @@ class V8_EXPORT V8InspectorSession {
class V8_EXPORT V8InspectorClient {
public:
- virtual ~V8InspectorClient() {}
+ virtual ~V8InspectorClient() = default;
virtual void runMessageLoopOnPause(int contextGroupId) {}
virtual void quitMessageLoopOnPause() {}
@@ -239,7 +239,7 @@ struct V8_EXPORT V8StackTraceId {
class V8_EXPORT V8Inspector {
public:
static std::unique_ptr<V8Inspector> create(v8::Isolate*, V8InspectorClient*);
- virtual ~V8Inspector() {}
+ virtual ~V8Inspector() = default;
// Contexts instrumentation.
virtual void contextCreated(const V8ContextInfo&) = 0;
@@ -277,7 +277,7 @@ class V8_EXPORT V8Inspector {
// Connection.
class V8_EXPORT Channel {
public:
- virtual ~Channel() {}
+ virtual ~Channel() = default;
virtual void sendResponse(int callId,
std::unique_ptr<StringBuffer> message) = 0;
virtual void sendNotification(std::unique_ptr<StringBuffer> message) = 0;
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
new file mode 100644
index 0000000000..80f7367bfe
--- /dev/null
+++ b/deps/v8/include/v8-internal.h
@@ -0,0 +1,316 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_INTERNAL_H_
+#define INCLUDE_V8_INTERNAL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <type_traits>
+
+#include "v8-version.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include)
+
+namespace v8 {
+
+class Context;
+class Data;
+class Isolate;
+
+namespace internal {
+
+class Object;
+
+/**
+ * Configuration of tagging scheme.
+ */
+const int kApiPointerSize = sizeof(void*); // NOLINT
+const int kApiDoubleSize = sizeof(double); // NOLINT
+const int kApiIntSize = sizeof(int); // NOLINT
+const int kApiInt64Size = sizeof(int64_t); // NOLINT
+
+// Tag information for HeapObject.
+const int kHeapObjectTag = 1;
+const int kWeakHeapObjectTag = 3;
+const int kHeapObjectTagSize = 2;
+const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
+
+// Tag information for Smi.
+const int kSmiTag = 0;
+const int kSmiTagSize = 1;
+const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
+
+template <size_t tagged_ptr_size>
+struct SmiTagging;
+
+template <int kSmiShiftSize>
+V8_INLINE internal::Object* IntToSmi(int value) {
+ int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
+ intptr_t tagged_value =
+ (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
+ return reinterpret_cast<internal::Object*>(tagged_value);
+}
+
+// Smi constants for systems where tagged pointer is a 32-bit value.
+template <>
+struct SmiTagging<4> {
+ enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
+ static int SmiShiftSize() { return kSmiShiftSize; }
+ static int SmiValueSize() { return kSmiValueSize; }
+ V8_INLINE static int SmiToInt(const internal::Object* value) {
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
+ // Throw away top 32 bits and shift down (requires >> to be sign extending).
+ return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
+ }
+ V8_INLINE static internal::Object* IntToSmi(int value) {
+ return internal::IntToSmi<kSmiShiftSize>(value);
+ }
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
+ // To be representable as an tagged small integer, the two
+ // most-significant bits of 'value' must be either 00 or 11 due to
+ // sign-extension. To check this we add 01 to the two
+ // most-significant bits, and check if the most-significant bit is 0
+ //
+ // CAUTION: The original code below:
+ // bool result = ((value + 0x40000000) & 0x80000000) == 0;
+ // may lead to incorrect results according to the C language spec, and
+ // in fact doesn't work correctly with gcc4.1.1 in some cases: The
+ // compiler may produce undefined results in case of signed integer
+ // overflow. The computation must be done w/ unsigned ints.
+ return static_cast<uintptr_t>(value) + 0x40000000U < 0x80000000U;
+ }
+};
+
+// Smi constants for systems where tagged pointer is a 64-bit value.
+template <>
+struct SmiTagging<8> {
+ enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
+ static int SmiShiftSize() { return kSmiShiftSize; }
+ static int SmiValueSize() { return kSmiValueSize; }
+ V8_INLINE static int SmiToInt(const internal::Object* value) {
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
+ // Shift down and throw away top 32 bits.
+ return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
+ }
+ V8_INLINE static internal::Object* IntToSmi(int value) {
+ return internal::IntToSmi<kSmiShiftSize>(value);
+ }
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
+ // To be representable as a long smi, the value must be a 32-bit integer.
+ return (value == static_cast<int32_t>(value));
+ }
+};
+
+#if V8_COMPRESS_POINTERS
+static_assert(
+ kApiPointerSize == kApiInt64Size,
+ "Pointer compression can be enabled only for 64-bit architectures");
+typedef SmiTagging<4> PlatformSmiTagging;
+#else
+typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
+#endif
+
+const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
+const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
+const int kSmiMinValue = (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
+const int kSmiMaxValue = -(kSmiMinValue + 1);
+constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
+constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
+
+/**
+ * This class exports constants and functionality from within v8 that
+ * is necessary to implement inline functions in the v8 api. Don't
+ * depend on functions and constants defined here.
+ */
+class Internals {
+ public:
+ // These values match non-compiler-dependent values defined within
+ // the implementation of v8.
+ static const int kHeapObjectMapOffset = 0;
+ static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
+ static const int kStringResourceOffset =
+ 1 * kApiPointerSize + 2 * kApiIntSize;
+
+ static const int kOddballKindOffset = 4 * kApiPointerSize + kApiDoubleSize;
+ static const int kForeignAddressOffset = kApiPointerSize;
+ static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
+ static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
+ static const int kContextHeaderSize = 2 * kApiPointerSize;
+ static const int kContextEmbedderDataIndex = 5;
+ static const int kFullStringRepresentationMask = 0x0f;
+ static const int kStringEncodingMask = 0x8;
+ static const int kExternalTwoByteRepresentationTag = 0x02;
+ static const int kExternalOneByteRepresentationTag = 0x0a;
+
+ static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
+ static const int kExternalMemoryOffset = 4 * kApiPointerSize;
+ static const int kExternalMemoryLimitOffset =
+ kExternalMemoryOffset + kApiInt64Size;
+ static const int kExternalMemoryAtLastMarkCompactOffset =
+ kExternalMemoryLimitOffset + kApiInt64Size;
+ static const int kIsolateRootsOffset = kExternalMemoryLimitOffset +
+ kApiInt64Size + kApiInt64Size +
+ kApiPointerSize + kApiPointerSize;
+ static const int kUndefinedValueRootIndex = 4;
+ static const int kTheHoleValueRootIndex = 5;
+ static const int kNullValueRootIndex = 6;
+ static const int kTrueValueRootIndex = 7;
+ static const int kFalseValueRootIndex = 8;
+ static const int kEmptyStringRootIndex = 9;
+
+ static const int kNodeClassIdOffset = 1 * kApiPointerSize;
+ static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
+ static const int kNodeStateMask = 0x7;
+ static const int kNodeStateIsWeakValue = 2;
+ static const int kNodeStateIsPendingValue = 3;
+ static const int kNodeStateIsNearDeathValue = 4;
+ static const int kNodeIsIndependentShift = 3;
+ static const int kNodeIsActiveShift = 4;
+
+ static const int kFirstNonstringType = 0x80;
+ static const int kOddballType = 0x83;
+ static const int kForeignType = 0x87;
+ static const int kJSSpecialApiObjectType = 0x410;
+ static const int kJSApiObjectType = 0x420;
+ static const int kJSObjectType = 0x421;
+
+ static const int kUndefinedOddballKind = 5;
+ static const int kNullOddballKind = 3;
+
+ static const uint32_t kNumIsolateDataSlots = 4;
+
+ // Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
+ // incremental GC once the external memory reaches this limit.
+ static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
+
+ V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
+ V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
+#ifdef V8_ENABLE_CHECKS
+ CheckInitializedImpl(isolate);
+#endif
+ }
+
+ V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
+ kHeapObjectTag);
+ }
+
+ V8_INLINE static int SmiValue(const internal::Object* value) {
+ return PlatformSmiTagging::SmiToInt(value);
+ }
+
+ V8_INLINE static internal::Object* IntToSmi(int value) {
+ return PlatformSmiTagging::IntToSmi(value);
+ }
+
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
+ return PlatformSmiTagging::IsValidSmi(value);
+ }
+
+ V8_INLINE static int GetInstanceType(const internal::Object* obj) {
+ typedef internal::Object O;
+ O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
+ return ReadField<uint16_t>(map, kMapInstanceTypeOffset);
+ }
+
+ V8_INLINE static int GetOddballKind(const internal::Object* obj) {
+ typedef internal::Object O;
+ return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
+ }
+
+ V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
+ int representation = (instance_type & kFullStringRepresentationMask);
+ return representation == kExternalTwoByteRepresentationTag;
+ }
+
+ V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ return *addr & static_cast<uint8_t>(1U << shift);
+ }
+
+ V8_INLINE static void UpdateNodeFlag(internal::Object** obj, bool value,
+ int shift) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ uint8_t mask = static_cast<uint8_t>(1U << shift);
+ *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
+ }
+
+ V8_INLINE static uint8_t GetNodeState(internal::Object** obj) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ return *addr & kNodeStateMask;
+ }
+
+ V8_INLINE static void UpdateNodeState(internal::Object** obj, uint8_t value) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
+ }
+
+ V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
+ void* data) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
+ *reinterpret_cast<void**>(addr) = data;
+ }
+
+ V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
+ uint32_t slot) {
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(isolate) +
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
+ return *reinterpret_cast<void* const*>(addr);
+ }
+
+ V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate, int index) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
+ return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
+ }
+
+ template <typename T>
+ V8_INLINE static T ReadField(const internal::Object* ptr, int offset) {
+ const uint8_t* addr =
+ reinterpret_cast<const uint8_t*>(ptr) + offset - kHeapObjectTag;
+ return *reinterpret_cast<const T*>(addr);
+ }
+
+ template <typename T>
+ V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* ctx = *reinterpret_cast<O* const*>(context);
+ int embedder_data_offset =
+ I::kContextHeaderSize +
+ (internal::kApiPointerSize * I::kContextEmbedderDataIndex);
+ O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
+ int value_offset =
+ I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index);
+ return I::ReadField<T>(embedder_data, value_offset);
+ }
+};
+
+// Only perform cast check for types derived from v8::Data since
+// other types do not implement the Cast method.
+template <bool PerformCheck>
+struct CastCheck {
+ template <class T>
+ static void Perform(T* data);
+};
+
+template <>
+template <class T>
+void CastCheck<true>::Perform(T* data) {
+ T::Cast(data);
+}
+
+template <>
+template <class T>
+void CastCheck<false>::Perform(T* data) {}
+
+template <class T>
+V8_INLINE void PerformCastCheck(T* data) {
+ CastCheck<std::is_base_of<Data, T>::value>::Perform(data);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // INCLUDE_V8_INTERNAL_H_
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index cfeb13b658..d983c30249 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -322,7 +322,9 @@ class Platform {
* |isolate|. Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
- virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
+ V8_DEPRECATE_SOON(
+ "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
+ virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
@@ -330,8 +332,10 @@ class Platform {
* Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
- virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
- double delay_in_seconds) = 0;
+ V8_DEPRECATE_SOON(
+ "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
+ virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds)) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
@@ -341,7 +345,10 @@ class Platform {
* starved for an arbitrarily long time if no idle time is available.
* The definition of "foreground" is opaque to V8.
*/
- virtual void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) {
+ V8_DEPRECATE_SOON(
+ "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
+ virtual void CallIdleOnForegroundThread(Isolate* isolate,
+ IdleTask* task)) {
// This must be overriden if |IdleTasksEnabled()|.
abort();
}
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 3689a12272..c034518def 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -341,12 +341,6 @@ class V8_EXPORT CpuProfiler {
V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
void SetIdle(bool is_idle));
- /**
- * Generate more detailed source positions to code objects. This results in
- * better results when mapping profiling samples to script source.
- */
- static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);
-
private:
CpuProfiler();
~CpuProfiler();
@@ -451,7 +445,7 @@ class V8_EXPORT OutputStream { // NOLINT
kContinue = 0,
kAbort = 1
};
- virtual ~OutputStream() {}
+ virtual ~OutputStream() = default;
/** Notify about the end of stream. */
virtual void EndOfStream() = 0;
/** Get preferred output chunk size. Called only once. */
@@ -545,7 +539,7 @@ class V8_EXPORT ActivityControl { // NOLINT
kContinue = 0,
kAbort = 1
};
- virtual ~ActivityControl() {}
+ virtual ~ActivityControl() = default;
/**
* Notify about current progress. The activity can be stopped by
* returning kAbort as the callback result.
@@ -631,7 +625,7 @@ class V8_EXPORT AllocationProfile {
*/
virtual Node* GetRootNode() = 0;
- virtual ~AllocationProfile() {}
+ virtual ~AllocationProfile() = default;
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
@@ -799,15 +793,15 @@ class V8_EXPORT HeapProfiler {
virtual const char* GetName(Local<Object> object) = 0;
protected:
- virtual ~ObjectNameResolver() {}
+ virtual ~ObjectNameResolver() = default;
};
/**
* Takes a heap snapshot and returns it.
*/
const HeapSnapshot* TakeHeapSnapshot(
- ActivityControl* control = NULL,
- ObjectNameResolver* global_object_name_resolver = NULL);
+ ActivityControl* control = nullptr,
+ ObjectNameResolver* global_object_name_resolver = nullptr);
/**
* Starts tracking of heap objects population statistics. After calling
@@ -834,7 +828,7 @@ class V8_EXPORT HeapProfiler {
* method.
*/
SnapshotObjectId GetHeapStats(OutputStream* stream,
- int64_t* timestamp_us = NULL);
+ int64_t* timestamp_us = nullptr);
/**
* Stops tracking of heap objects population statistics, cleans up all
@@ -991,8 +985,8 @@ class V8_EXPORT RetainedObjectInfo { // NOLINT
virtual intptr_t GetSizeInBytes() { return -1; }
protected:
- RetainedObjectInfo() {}
- virtual ~RetainedObjectInfo() {}
+ RetainedObjectInfo() = default;
+ virtual ~RetainedObjectInfo() = default;
private:
RetainedObjectInfo(const RetainedObjectInfo&);
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index a306965122..96c9acbbdc 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -94,11 +94,11 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
static WeakCallbackDataType* WeakCallbackParameter(
MapType* map, const K& key, Local<V> value) {
- return NULL;
+ return nullptr;
}
static MapType* MapFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackDataType>& data) {
- return NULL;
+ return nullptr;
}
static K KeyFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackDataType>& data) {
@@ -302,7 +302,7 @@ class PersistentValueMapBase {
static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
- persistent->val_ = 0;
+ persistent->val_ = nullptr;
return reinterpret_cast<PersistentContainerValue>(v);
}
@@ -633,7 +633,7 @@ class PersistentValueVector {
private:
static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
- persistent->val_ = 0;
+ persistent->val_ = nullptr;
return reinterpret_cast<PersistentContainerValue>(v);
}
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 63dc5e7a7b..8624767047 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 7
-#define V8_MINOR_VERSION 0
-#define V8_BUILD_NUMBER 276
-#define V8_PATCH_LEVEL 38
+#define V8_MINOR_VERSION 1
+#define V8_BUILD_NUMBER 302
+#define V8_PATCH_LEVEL 28
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index e1951ec270..a4bbe1b0c4 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -22,42 +22,13 @@
#include <utility>
#include <vector>
-#include "v8-version.h" // NOLINT(build/include)
-#include "v8config.h" // NOLINT(build/include)
+#include "v8-internal.h" // NOLINT(build/include)
+#include "v8-version.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include)
// We reserve the V8_* prefix for macros defined in V8 public API and
// assume there are no name conflicts with the embedder's code.
-#ifdef V8_OS_WIN
-
-// Setup for Windows DLL export/import. When building the V8 DLL the
-// BUILDING_V8_SHARED needs to be defined. When building a program which uses
-// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
-// static library or building a program which uses the V8 static library neither
-// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
-#ifdef BUILDING_V8_SHARED
-# define V8_EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-# define V8_EXPORT __declspec(dllimport)
-#else
-# define V8_EXPORT
-#endif // BUILDING_V8_SHARED
-
-#else // V8_OS_WIN
-
-// Setup for Linux shared library export.
-#if V8_HAS_ATTRIBUTE_VISIBILITY
-# ifdef BUILDING_V8_SHARED
-# define V8_EXPORT __attribute__ ((visibility("default")))
-# else
-# define V8_EXPORT
-# endif
-#else
-# define V8_EXPORT
-#endif
-
-#endif // V8_OS_WIN
-
/**
* The v8 JavaScript engine.
*/
@@ -153,108 +124,13 @@ template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
class GlobalHandles;
+class ScopedExternalStringLock;
namespace wasm {
class NativeModule;
class StreamingDecoder;
} // namespace wasm
-/**
- * Configuration of tagging scheme.
- */
-const int kApiPointerSize = sizeof(void*); // NOLINT
-const int kApiDoubleSize = sizeof(double); // NOLINT
-const int kApiIntSize = sizeof(int); // NOLINT
-const int kApiInt64Size = sizeof(int64_t); // NOLINT
-
-// Tag information for HeapObject.
-const int kHeapObjectTag = 1;
-const int kWeakHeapObjectTag = 3;
-const int kHeapObjectTagSize = 2;
-const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
-
-// Tag information for Smi.
-const int kSmiTag = 0;
-const int kSmiTagSize = 1;
-const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
-
-template <size_t tagged_ptr_size>
-struct SmiTagging;
-
-template <int kSmiShiftSize>
-V8_INLINE internal::Object* IntToSmi(int value) {
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- intptr_t tagged_value =
- (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
- return reinterpret_cast<internal::Object*>(tagged_value);
-}
-
-// Smi constants for systems where tagged pointer is a 32-bit value.
-template <>
-struct SmiTagging<4> {
- enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
- static int SmiShiftSize() { return kSmiShiftSize; }
- static int SmiValueSize() { return kSmiValueSize; }
- V8_INLINE static int SmiToInt(const internal::Object* value) {
- int shift_bits = kSmiTagSize + kSmiShiftSize;
- // Throw away top 32 bits and shift down (requires >> to be sign extending).
- return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
- }
- V8_INLINE static internal::Object* IntToSmi(int value) {
- return internal::IntToSmi<kSmiShiftSize>(value);
- }
- V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
- // To be representable as an tagged small integer, the two
- // most-significant bits of 'value' must be either 00 or 11 due to
- // sign-extension. To check this we add 01 to the two
- // most-significant bits, and check if the most-significant bit is 0
- //
- // CAUTION: The original code below:
- // bool result = ((value + 0x40000000) & 0x80000000) == 0;
- // may lead to incorrect results according to the C language spec, and
- // in fact doesn't work correctly with gcc4.1.1 in some cases: The
- // compiler may produce undefined results in case of signed integer
- // overflow. The computation must be done w/ unsigned ints.
- return static_cast<uintptr_t>(value) + 0x40000000U < 0x80000000U;
- }
-};
-
-// Smi constants for systems where tagged pointer is a 64-bit value.
-template <>
-struct SmiTagging<8> {
- enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
- static int SmiShiftSize() { return kSmiShiftSize; }
- static int SmiValueSize() { return kSmiValueSize; }
- V8_INLINE static int SmiToInt(const internal::Object* value) {
- int shift_bits = kSmiTagSize + kSmiShiftSize;
- // Shift down and throw away top 32 bits.
- return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
- }
- V8_INLINE static internal::Object* IntToSmi(int value) {
- return internal::IntToSmi<kSmiShiftSize>(value);
- }
- V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
- // To be representable as a long smi, the value must be a 32-bit integer.
- return (value == static_cast<int32_t>(value));
- }
-};
-
-#if V8_COMPRESS_POINTERS
-static_assert(
- kApiPointerSize == kApiInt64Size,
- "Pointer compression can be enabled only for 64-bit architectures");
-typedef SmiTagging<4> PlatformSmiTagging;
-#else
-typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
-#endif
-
-const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
-const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-const int kSmiMinValue = (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
-const int kSmiMaxValue = -(kSmiMinValue + 1);
-constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
-constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
-
} // namespace internal
namespace debug {
@@ -302,7 +178,7 @@ class ConsoleCallArguments;
template <class T>
class Local {
public:
- V8_INLINE Local() : val_(0) {}
+ V8_INLINE Local() : val_(nullptr) {}
template <class S>
V8_INLINE Local(Local<S> that)
: val_(reinterpret_cast<T*>(*that)) {
@@ -317,12 +193,12 @@ class Local {
/**
* Returns true if the handle is empty.
*/
- V8_INLINE bool IsEmpty() const { return val_ == 0; }
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
- V8_INLINE void Clear() { val_ = 0; }
+ V8_INLINE void Clear() { val_ = nullptr; }
V8_INLINE T* operator->() const { return val_; }
@@ -338,8 +214,8 @@ class Local {
V8_INLINE bool operator==(const Local<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
- if (a == 0) return b == 0;
- if (b == 0) return false;
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
return *a == *b;
}
@@ -347,8 +223,8 @@ class Local {
const PersistentBase<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
- if (a == 0) return b == 0;
- if (b == 0) return false;
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
return *a == *b;
}
@@ -592,7 +468,7 @@ template <class T> class PersistentBase {
template <class S>
V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
- V8_INLINE bool IsEmpty() const { return val_ == NULL; }
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
V8_INLINE void Empty() { val_ = 0; }
V8_INLINE Local<T> Get(Isolate* isolate) const {
@@ -603,8 +479,8 @@ template <class T> class PersistentBase {
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
- if (a == NULL) return b == NULL;
- if (b == NULL) return false;
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
return *a == *b;
}
@@ -612,8 +488,8 @@ template <class T> class PersistentBase {
V8_INLINE bool operator==(const Local<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
- if (a == NULL) return b == NULL;
- if (b == NULL) return false;
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
return *a == *b;
}
@@ -786,7 +662,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
/**
* A Persistent with no storage cell.
*/
- V8_INLINE Persistent() : PersistentBase<T>(0) { }
+ V8_INLINE Persistent() : PersistentBase<T>(nullptr) {}
/**
* Construct a Persistent from a Local.
* When the Local is non-empty, a new storage cell is created
@@ -813,7 +689,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
* traits class is called, allowing the setting of flags based on the
* copied Persistent.
*/
- V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(0) {
+ V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(nullptr) {
Copy(that);
}
template <class S, class M2>
@@ -979,7 +855,7 @@ class V8_EXPORT HandleScope {
void operator=(const HandleScope&) = delete;
protected:
- V8_INLINE HandleScope() {}
+ V8_INLINE HandleScope() = default;
void Initialize(Isolate* isolate);
@@ -1019,7 +895,7 @@ class V8_EXPORT HandleScope {
class V8_EXPORT EscapableHandleScope : public HandleScope {
public:
explicit EscapableHandleScope(Isolate* isolate);
- V8_INLINE ~EscapableHandleScope() {}
+ V8_INLINE ~EscapableHandleScope() = default;
/**
* Pushes the value into the previous scope and returns a handle to it.
@@ -1123,10 +999,6 @@ class V8_EXPORT PrimitiveArray {
public:
static Local<PrimitiveArray> New(Isolate* isolate, int length);
int Length() const;
- V8_DEPRECATED("Use Isolate* version",
- void Set(int index, Local<Primitive> item));
- V8_DEPRECATED("Use Isolate* version",
- Local<Primitive> Get(int index));
void Set(Isolate* isolate, int index, Local<Primitive> item);
Local<Primitive> Get(Isolate* isolate, int index);
};
@@ -1393,7 +1265,7 @@ class V8_EXPORT ScriptCompiler {
};
CachedData()
- : data(NULL),
+ : data(nullptr),
length(0),
rejected(false),
buffer_policy(BufferNotOwned) {}
@@ -1424,9 +1296,9 @@ class V8_EXPORT ScriptCompiler {
public:
// Source takes ownership of CachedData.
V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
- CachedData* cached_data = NULL);
+ CachedData* cached_data = nullptr);
V8_INLINE Source(Local<String> source_string,
- CachedData* cached_data = NULL);
+ CachedData* cached_data = nullptr);
V8_INLINE ~Source();
// Ownership of the CachedData or its buffers is *not* transferred to the
@@ -1465,7 +1337,7 @@ class V8_EXPORT ScriptCompiler {
*/
class V8_EXPORT ExternalSourceStream {
public:
- virtual ~ExternalSourceStream() {}
+ virtual ~ExternalSourceStream() = default;
/**
* V8 calls this to request the next chunk of data from the embedder. This
@@ -1508,12 +1380,11 @@ class V8_EXPORT ScriptCompiler {
virtual void ResetToBookmark();
};
-
/**
* Source code which can be streamed into V8 in pieces. It will be parsed
- * while streaming. It can be compiled after the streaming is complete.
- * StreamedSource must be kept alive while the streaming task is ran (see
- * ScriptStreamingTask below).
+ * while streaming and compiled after parsing has completed. StreamedSource
+ * must be kept alive while the streaming task is run (see ScriptStreamingTask
+ * below).
*/
class V8_EXPORT StreamedSource {
public:
@@ -1522,29 +1393,35 @@ class V8_EXPORT ScriptCompiler {
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
~StreamedSource();
- // Ownership of the CachedData or its buffers is *not* transferred to the
- // caller. The CachedData object is alive as long as the StreamedSource
- // object is alive.
- const CachedData* GetCachedData() const;
+ V8_DEPRECATED("No longer used", const CachedData* GetCachedData() const) {
+ return nullptr;
+ }
- internal::ScriptStreamingData* impl() const { return impl_; }
+ internal::ScriptStreamingData* impl() const { return impl_.get(); }
// Prevent copying.
StreamedSource(const StreamedSource&) = delete;
StreamedSource& operator=(const StreamedSource&) = delete;
private:
- internal::ScriptStreamingData* impl_;
+ std::unique_ptr<internal::ScriptStreamingData> impl_;
};
/**
* A streaming task which the embedder must run on a background thread to
* stream scripts into V8. Returned by ScriptCompiler::StartStreamingScript.
*/
- class ScriptStreamingTask {
+ class V8_EXPORT ScriptStreamingTask final {
public:
- virtual ~ScriptStreamingTask() {}
- virtual void Run() = 0;
+ void Run();
+
+ private:
+ friend class ScriptCompiler;
+
+ explicit ScriptStreamingTask(internal::ScriptStreamingData* data)
+ : data_(data) {}
+
+ internal::ScriptStreamingData* data_;
};
enum CompileOptions {
@@ -1833,8 +1710,6 @@ class V8_EXPORT StackTrace {
/**
* Returns a StackFrame at a particular index.
*/
- V8_DEPRECATED("Use Isolate version",
- Local<StackFrame> GetFrame(uint32_t index) const);
Local<StackFrame> GetFrame(Isolate* isolate, uint32_t index) const;
/**
@@ -1951,6 +1826,11 @@ struct SampleInfo {
// executing an external callback.
};
+struct MemoryRange {
+ const void* start;
+ size_t length_in_bytes;
+};
+
/**
* A JSON Parser and Stringifier.
*/
@@ -1993,7 +1873,7 @@ class V8_EXPORT ValueSerializer {
public:
class V8_EXPORT Delegate {
public:
- virtual ~Delegate() {}
+ virtual ~Delegate() = default;
/**
* Handles the case where a DataCloneError would be thrown in the structured
@@ -2130,7 +2010,7 @@ class V8_EXPORT ValueDeserializer {
public:
class V8_EXPORT Delegate {
public:
- virtual ~Delegate() {}
+ virtual ~Delegate() = default;
/**
* The embedder overrides this method to read some kind of host object, if
@@ -2514,8 +2394,9 @@ class V8_EXPORT Value : public Data {
V8_WARN_UNUSED_RESULT MaybeLocal<BigInt> ToBigInt(
Local<Context> context) const;
- V8_WARN_UNUSED_RESULT MaybeLocal<Boolean> ToBoolean(
- Local<Context> context) const;
+ V8_DEPRECATE_SOON("ToBoolean can never throw. Use Local version.",
+ V8_WARN_UNUSED_RESULT MaybeLocal<Boolean> ToBoolean(
+ Local<Context> context) const);
V8_WARN_UNUSED_RESULT MaybeLocal<Number> ToNumber(
Local<Context> context) const;
V8_WARN_UNUSED_RESULT MaybeLocal<String> ToString(
@@ -2530,25 +2411,17 @@ class V8_EXPORT Value : public Data {
Local<Context> context) const;
V8_WARN_UNUSED_RESULT MaybeLocal<Int32> ToInt32(Local<Context> context) const;
- V8_DEPRECATED("Use maybe version",
- Local<Boolean> ToBoolean(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Number> ToNumber(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<String> ToString(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Object> ToObject(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Integer> ToInteger(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Int32> ToInt32(Isolate* isolate) const);
-
- inline V8_DEPRECATED("Use maybe version",
- Local<Boolean> ToBoolean() const);
- inline V8_DEPRECATED("Use maybe version", Local<String> ToString() const);
- inline V8_DEPRECATED("Use maybe version", Local<Object> ToObject() const);
- inline V8_DEPRECATED("Use maybe version",
- Local<Integer> ToInteger() const);
+ Local<Boolean> ToBoolean(Isolate* isolate) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Number> ToNumber(Isolate* isolate) const);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<String> ToString(Isolate* isolate) const);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Object> ToObject(Isolate* isolate) const);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Integer> ToInteger(Isolate* isolate) const);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Int32> ToInt32(Isolate* isolate) const);
/**
* Attempts to convert a string to an array index.
@@ -2557,7 +2430,11 @@ class V8_EXPORT Value : public Data {
V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
Local<Context> context) const;
- V8_WARN_UNUSED_RESULT Maybe<bool> BooleanValue(Local<Context> context) const;
+ bool BooleanValue(Isolate* isolate) const;
+
+ V8_DEPRECATE_SOON("BooleanValue can never throw. Use Isolate version.",
+ V8_WARN_UNUSED_RESULT Maybe<bool> BooleanValue(
+ Local<Context> context) const);
V8_WARN_UNUSED_RESULT Maybe<double> NumberValue(Local<Context> context) const;
V8_WARN_UNUSED_RESULT Maybe<int64_t> IntegerValue(
Local<Context> context) const;
@@ -2565,14 +2442,7 @@ class V8_EXPORT Value : public Data {
Local<Context> context) const;
V8_WARN_UNUSED_RESULT Maybe<int32_t> Int32Value(Local<Context> context) const;
- V8_DEPRECATED("Use maybe version", bool BooleanValue() const);
- V8_DEPRECATED("Use maybe version", double NumberValue() const);
- V8_DEPRECATED("Use maybe version", int64_t IntegerValue() const);
- V8_DEPRECATED("Use maybe version", uint32_t Uint32Value() const);
- V8_DEPRECATED("Use maybe version", int32_t Int32Value() const);
-
/** JS == */
- V8_DEPRECATED("Use maybe version", bool Equals(Local<Value> that) const);
V8_WARN_UNUSED_RESULT Maybe<bool> Equals(Local<Context> context,
Local<Value> that) const;
bool StrictEquals(Local<Value> that) const;
@@ -2679,8 +2549,6 @@ class V8_EXPORT String : public Name {
* Returns the number of bytes in the UTF-8 encoded
* representation of this string.
*/
- V8_DEPRECATED("Use Isolate version instead", int Utf8Length() const);
-
int Utf8Length(Isolate* isolate) const;
/**
@@ -2737,23 +2605,12 @@ class V8_EXPORT String : public Name {
// 16-bit character codes.
int Write(Isolate* isolate, uint16_t* buffer, int start = 0, int length = -1,
int options = NO_OPTIONS) const;
- V8_DEPRECATED("Use Isolate* version",
- int Write(uint16_t* buffer, int start = 0, int length = -1,
- int options = NO_OPTIONS) const);
// One byte characters.
int WriteOneByte(Isolate* isolate, uint8_t* buffer, int start = 0,
int length = -1, int options = NO_OPTIONS) const;
- V8_DEPRECATED("Use Isolate* version",
- int WriteOneByte(uint8_t* buffer, int start = 0,
- int length = -1, int options = NO_OPTIONS)
- const);
// UTF-8 encoded characters.
int WriteUtf8(Isolate* isolate, char* buffer, int length = -1,
- int* nchars_ref = NULL, int options = NO_OPTIONS) const;
- V8_DEPRECATED("Use Isolate* version",
- int WriteUtf8(char* buffer, int length = -1,
- int* nchars_ref = NULL,
- int options = NO_OPTIONS) const);
+ int* nchars_ref = nullptr, int options = NO_OPTIONS) const;
/**
* A zero length string.
@@ -2772,12 +2629,31 @@ class V8_EXPORT String : public Name {
class V8_EXPORT ExternalStringResourceBase { // NOLINT
public:
- virtual ~ExternalStringResourceBase() {}
+ virtual ~ExternalStringResourceBase() = default;
- virtual bool IsCompressible() const { return false; }
+ V8_DEPRECATE_SOON("Use IsCacheable().",
+ virtual bool IsCompressible() const) {
+ return false;
+ }
+
+ /**
+ * If a string is cacheable, the value returned by
+ * ExternalStringResource::data() may be cached, otherwise it is not
+ * expected to be stable beyond the current top-level task.
+ */
+ virtual bool IsCacheable() const {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+ return !IsCompressible();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+ }
protected:
- ExternalStringResourceBase() {}
+ ExternalStringResourceBase() = default;
/**
* Internally V8 will call this Dispose method when the external string
@@ -2787,6 +2663,24 @@ class V8_EXPORT String : public Name {
*/
virtual void Dispose() { delete this; }
+ /**
+ * For a non-cacheable string, the value returned by
+ * |ExternalStringResource::data()| has to be stable between |Lock()| and
+ * |Unlock()|, that is the string must behave as is |IsCacheable()| returned
+ * true.
+ *
+ * These two functions must be thread-safe, and can be called from anywhere.
+ * They also must handle lock depth, in the sense that each can be called
+ * several times, from different threads, and unlocking should only happen
+ * when the balance of Lock() and Unlock() calls is 0.
+ */
+ virtual void Lock() const {}
+
+ /**
+ * Unlocks the string.
+ */
+ virtual void Unlock() const {}
+
// Disallow copying and assigning.
ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
void operator=(const ExternalStringResourceBase&) = delete;
@@ -2794,6 +2688,7 @@ class V8_EXPORT String : public Name {
private:
friend class internal::Heap;
friend class v8::String;
+ friend class internal::ScopedExternalStringLock;
};
/**
@@ -2809,7 +2704,7 @@ class V8_EXPORT String : public Name {
* Override the destructor to manage the life cycle of the underlying
* buffer.
*/
- virtual ~ExternalStringResource() {}
+ ~ExternalStringResource() override = default;
/**
* The string data from the underlying buffer.
@@ -2822,7 +2717,7 @@ class V8_EXPORT String : public Name {
virtual size_t length() const = 0;
protected:
- ExternalStringResource() {}
+ ExternalStringResource() = default;
};
/**
@@ -2842,13 +2737,13 @@ class V8_EXPORT String : public Name {
* Override the destructor to manage the life cycle of the underlying
* buffer.
*/
- virtual ~ExternalOneByteStringResource() {}
+ ~ExternalOneByteStringResource() override = default;
/** The string data from the underlying buffer.*/
virtual const char* data() const = 0;
/** The number of Latin-1 characters in the string.*/
virtual size_t length() const = 0;
protected:
- ExternalOneByteStringResource() {}
+ ExternalOneByteStringResource() = default;
};
/**
@@ -2917,9 +2812,6 @@ class V8_EXPORT String : public Name {
*/
static Local<String> Concat(Isolate* isolate, Local<String> left,
Local<String> right);
- static V8_DEPRECATED("Use Isolate* version",
- Local<String> Concat(Local<String> left,
- Local<String> right));
/**
* Creates a new external string using the data defined in the given
@@ -2988,8 +2880,6 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Utf8Value {
public:
- V8_DEPRECATED("Use Isolate version",
- explicit Utf8Value(Local<v8::Value> obj));
Utf8Value(Isolate* isolate, Local<v8::Value> obj);
~Utf8Value();
char* operator*() { return str_; }
@@ -3013,7 +2903,6 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Value {
public:
- V8_DEPRECATED("Use Isolate version", explicit Value(Local<v8::Value> obj));
Value(Isolate* isolate, Local<v8::Value> obj);
~Value();
uint16_t* operator*() { return str_; }
@@ -3075,6 +2964,7 @@ class V8_EXPORT Symbol : public Name {
static Local<Symbol> ForApi(Isolate *isolate, Local<String> name);
// Well-known symbols
+ static Local<Symbol> GetAsyncIterator(Isolate* isolate);
static Local<Symbol> GetHasInstance(Isolate* isolate);
static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
static Local<Symbol> GetIterator(Isolate* isolate);
@@ -3312,10 +3202,17 @@ enum PropertyFilter {
* Options for marking whether callbacks may trigger JS-observable side effects.
* Side-effect-free callbacks are whitelisted during debug evaluation with
* throwOnSideEffect. It applies when calling a Function, FunctionTemplate,
- * or an Accessor's getter callback. For Interceptors, please see
+ * or an Accessor callback. For Interceptors, please see
* PropertyHandlerFlags's kHasNoSideEffect.
+ * Callbacks that only cause side effects to the receiver are whitelisted if
+ * invoked on receiver objects that are created within the same debug-evaluate
+ * call, as these objects are temporary and the side effect does not escape.
*/
-enum class SideEffectType { kHasSideEffect, kHasNoSideEffect };
+enum class SideEffectType {
+ kHasSideEffect,
+ kHasNoSideEffect,
+ kHasSideEffectToReceiver
+};
/**
* Keys/Properties filter enums:
@@ -3439,7 +3336,7 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
Local<Value> key);
- V8_DEPRECATED("Use maybe version", bool Delete(Local<Value> key));
+ V8_DEPRECATE_SOON("Use maybe version", bool Delete(Local<Value> key));
V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
Local<Value> key);
@@ -3453,10 +3350,12 @@ class V8_EXPORT Object : public Value {
*/
V8_WARN_UNUSED_RESULT Maybe<bool> SetAccessor(
Local<Context> context, Local<Name> name,
- AccessorNameGetterCallback getter, AccessorNameSetterCallback setter = 0,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
MaybeLocal<Value> data = MaybeLocal<Value>(),
AccessControl settings = DEFAULT, PropertyAttribute attribute = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
void SetAccessorProperty(Local<Name> name, Local<Function> getter,
Local<Function> setter = Local<Function>(),
@@ -3472,7 +3371,8 @@ class V8_EXPORT Object : public Value {
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = nullptr,
Local<Value> data = Local<Value>(), PropertyAttribute attributes = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
/**
* Attempts to create a property with the given name which behaves like a data
@@ -3486,7 +3386,8 @@ class V8_EXPORT Object : public Value {
Local<Context> context, Local<Name> name,
AccessorNameGetterCallback getter, Local<Value> data = Local<Value>(),
PropertyAttribute attributes = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
/**
* Functionality for private properties.
@@ -3779,12 +3680,6 @@ class V8_EXPORT Array : public Object {
*/
static Local<Array> New(Isolate* isolate, int length = 0);
- /**
- * Creates a JavaScript array out of a Local<Value> array in C++
- * with a known length.
- */
- static Local<Array> New(Isolate* isolate, Local<Value>* elements,
- size_t length);
V8_INLINE static Array* Cast(Value* obj);
private:
Array();
@@ -4518,7 +4413,7 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final {
void Abort(MaybeLocal<Value> exception);
Local<Promise> GetPromise();
- ~WasmModuleObjectBuilderStreaming();
+ ~WasmModuleObjectBuilderStreaming() = default;
private:
WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) =
@@ -4577,7 +4472,7 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
class V8_EXPORT Allocator { // NOLINT
public:
- virtual ~Allocator() {}
+ virtual ~Allocator() = default;
/**
* Allocate |length| bytes. Return NULL if allocation is not successful.
@@ -5238,8 +5133,6 @@ class V8_EXPORT BooleanObject : public Object {
class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Local<String> value);
- V8_DEPRECATED("Use Isolate* version",
- static Local<Value> New(Local<String> value));
Local<String> ValueOf() const;
@@ -5400,20 +5293,22 @@ class V8_EXPORT Template : public Data {
*/
void SetNativeDataProperty(
Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter = 0,
+ AccessorSetterCallback setter = nullptr,
// TODO(dcarney): gcc can't handle Local below
Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
Local<AccessorSignature> signature = Local<AccessorSignature>(),
AccessControl settings = DEFAULT,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
void SetNativeDataProperty(
Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = 0,
+ AccessorNameSetterCallback setter = nullptr,
// TODO(dcarney): gcc can't handle Local below
Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
Local<AccessorSignature> signature = Local<AccessorSignature>(),
AccessControl settings = DEFAULT,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
/**
* Like SetNativeDataProperty, but V8 will replace the native data property
@@ -5422,7 +5317,8 @@ class V8_EXPORT Template : public Data {
void SetLazyDataProperty(
Local<Name> name, AccessorNameGetterCallback getter,
Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
/**
* During template instantiation, sets the value with the intrinsic property
@@ -5783,7 +5679,7 @@ class V8_EXPORT FunctionTemplate : public Template {
public:
/** Creates a function template.*/
static Local<FunctionTemplate> New(
- Isolate* isolate, FunctionCallback callback = 0,
+ Isolate* isolate, FunctionCallback callback = nullptr,
Local<Value> data = Local<Value>(),
Local<Signature> signature = Local<Signature>(), int length = 0,
ConstructorBehavior behavior = ConstructorBehavior::kAllow,
@@ -5964,11 +5860,11 @@ struct NamedPropertyHandlerConfiguration {
NamedPropertyHandlerConfiguration(
/** Note: getter is required */
- GenericNamedPropertyGetterCallback getter = 0,
- GenericNamedPropertySetterCallback setter = 0,
- GenericNamedPropertyQueryCallback query = 0,
- GenericNamedPropertyDeleterCallback deleter = 0,
- GenericNamedPropertyEnumeratorCallback enumerator = 0,
+ GenericNamedPropertyGetterCallback getter = nullptr,
+ GenericNamedPropertySetterCallback setter = nullptr,
+ GenericNamedPropertyQueryCallback query = nullptr,
+ GenericNamedPropertyDeleterCallback deleter = nullptr,
+ GenericNamedPropertyEnumeratorCallback enumerator = nullptr,
Local<Value> data = Local<Value>(),
PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
@@ -5976,8 +5872,8 @@ struct NamedPropertyHandlerConfiguration {
query(query),
deleter(deleter),
enumerator(enumerator),
- definer(0),
- descriptor(0),
+ definer(nullptr),
+ descriptor(nullptr),
data(data),
flags(flags) {}
@@ -5992,7 +5888,7 @@ struct NamedPropertyHandlerConfiguration {
PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
setter(setter),
- query(0),
+ query(nullptr),
deleter(deleter),
enumerator(enumerator),
definer(definer),
@@ -6034,11 +5930,11 @@ struct IndexedPropertyHandlerConfiguration {
IndexedPropertyHandlerConfiguration(
/** Note: getter is required */
- IndexedPropertyGetterCallback getter = 0,
- IndexedPropertySetterCallback setter = 0,
- IndexedPropertyQueryCallback query = 0,
- IndexedPropertyDeleterCallback deleter = 0,
- IndexedPropertyEnumeratorCallback enumerator = 0,
+ IndexedPropertyGetterCallback getter = nullptr,
+ IndexedPropertySetterCallback setter = nullptr,
+ IndexedPropertyQueryCallback query = nullptr,
+ IndexedPropertyDeleterCallback deleter = nullptr,
+ IndexedPropertyEnumeratorCallback enumerator = nullptr,
Local<Value> data = Local<Value>(),
PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
@@ -6046,8 +5942,8 @@ struct IndexedPropertyHandlerConfiguration {
query(query),
deleter(deleter),
enumerator(enumerator),
- definer(0),
- descriptor(0),
+ definer(nullptr),
+ descriptor(nullptr),
data(data),
flags(flags) {}
@@ -6062,7 +5958,7 @@ struct IndexedPropertyHandlerConfiguration {
PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
setter(setter),
- query(0),
+ query(nullptr),
deleter(deleter),
enumerator(enumerator),
definer(definer),
@@ -6134,16 +6030,20 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
void SetAccessor(
Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter = 0, Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT, PropertyAttribute attribute = None,
+ AccessorSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
Local<AccessorSignature> signature = Local<AccessorSignature>(),
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
void SetAccessor(
Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = 0, Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT, PropertyAttribute attribute = None,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
Local<AccessorSignature> signature = Local<AccessorSignature>(),
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
/**
* Sets a named property handler on the object template.
@@ -6177,10 +6077,10 @@ class V8_EXPORT ObjectTemplate : public Template {
// TODO(dcarney): deprecate
void SetIndexedPropertyHandler(
IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter = 0,
- IndexedPropertyQueryCallback query = 0,
- IndexedPropertyDeleterCallback deleter = 0,
- IndexedPropertyEnumeratorCallback enumerator = 0,
+ IndexedPropertySetterCallback setter = nullptr,
+ IndexedPropertyQueryCallback query = nullptr,
+ IndexedPropertyDeleterCallback deleter = nullptr,
+ IndexedPropertyEnumeratorCallback enumerator = nullptr,
Local<Value> data = Local<Value>()) {
SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
deleter, enumerator, data));
@@ -6320,11 +6220,11 @@ V8_DEPRECATE_SOON("Implementation detail", class)
V8_EXPORT ExternalOneByteStringResourceImpl
: public String::ExternalOneByteStringResource {
public:
- ExternalOneByteStringResourceImpl() : data_(0), length_(0) {}
+ ExternalOneByteStringResourceImpl() : data_(nullptr), length_(0) {}
ExternalOneByteStringResourceImpl(const char* data, size_t length)
: data_(data), length_(length) {}
- const char* data() const { return data_; }
- size_t length() const { return length_; }
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
private:
const char* data_;
@@ -6338,11 +6238,8 @@ class V8_EXPORT Extension { // NOLINT
public:
// Note that the strings passed into this constructor must live as long
// as the Extension itself.
- Extension(const char* name,
- const char* source = 0,
- int dep_count = 0,
- const char** deps = 0,
- int source_length = -1);
+ Extension(const char* name, const char* source = nullptr, int dep_count = 0,
+ const char** deps = nullptr, int source_length = -1);
virtual ~Extension() { delete source_; }
virtual Local<FunctionTemplate> GetNativeFunctionTemplate(
Isolate* isolate, Local<String> name) {
@@ -6567,6 +6464,15 @@ typedef void (*HostInitializeImportMetaObjectCallback)(Local<Context> context,
Local<Object> meta);
/**
+ * PrepareStackTraceCallback is called when the stack property of an error is
+ * first accessed. The return value will be used as the stack value. If this
+ * callback is registed, the |Error.prepareStackTrace| API will be disabled.
+ */
+typedef MaybeLocal<Value> (*PrepareStackTraceCallback)(Local<Context> context,
+ Local<Value> error,
+ Local<StackTrace> trace);
+
+/**
* PromiseHook with type kInit is called when a new promise is
* created. When a new promise is created as part of the chain in the
* case of Promise.then or in the intermediate promises created by
@@ -6994,7 +6900,7 @@ typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
*/
class V8_EXPORT ExternalResourceVisitor { // NOLINT
public:
- virtual ~ExternalResourceVisitor() {}
+ virtual ~ExternalResourceVisitor() = default;
virtual void VisitExternalString(Local<String> string) {}
};
@@ -7004,7 +6910,7 @@ class V8_EXPORT ExternalResourceVisitor { // NOLINT
*/
class V8_EXPORT PersistentHandleVisitor { // NOLINT
public:
- virtual ~PersistentHandleVisitor() {}
+ virtual ~PersistentHandleVisitor() = default;
virtual void VisitPersistentHandle(Persistent<Value>* value,
uint16_t class_id) {}
};
@@ -7074,8 +6980,8 @@ class V8_EXPORT EmbedderHeapTracer {
* embedder.
*/
V8_DEPRECATED("Use void AdvanceTracing(deadline_in_ms)",
- virtual bool AdvanceTracing(
- double deadline_in_ms, AdvanceTracingActions actions)) {
+ virtual bool AdvanceTracing(double deadline_in_ms,
+ AdvanceTracingActions actions)) {
return false;
}
@@ -7124,7 +7030,8 @@ class V8_EXPORT EmbedderHeapTracer {
* The embedder is expected to throw away all intermediate data and reset to
* the initial state.
*/
- virtual void AbortTracing() = 0;
+ V8_DEPRECATE_SOON("Obsolete as V8 will not abort tracing anymore.",
+ virtual void AbortTracing()) {}
/*
* Called by the embedder to request immediate finalization of the currently
@@ -7153,8 +7060,7 @@ class V8_EXPORT EmbedderHeapTracer {
/**
* Returns the number of wrappers that are still to be traced by the embedder.
*/
- V8_DEPRECATE_SOON("Use IsTracingDone",
- virtual size_t NumberOfWrappersToTrace()) {
+ V8_DEPRECATED("Use IsTracingDone", virtual size_t NumberOfWrappersToTrace()) {
return 0;
}
@@ -7458,6 +7364,23 @@ class V8_EXPORT Isolate {
kFunctionTokenOffsetTooLongForToString = 49,
kWasmSharedMemory = 50,
kWasmThreadOpcodes = 51,
+ kAtomicsNotify = 52,
+ kAtomicsWake = 53,
+ kCollator = 54,
+ kNumberFormat = 55,
+ kDateTimeFormat = 56,
+ kPluralRules = 57,
+ kRelativeTimeFormat = 58,
+ kLocale = 59,
+ kListFormat = 60,
+ kSegmenter = 61,
+ kStringLocaleCompare = 62,
+ kStringToLocaleUpperCase = 63,
+ kStringToLocaleLowerCase = 64,
+ kNumberToLocaleString = 65,
+ kDateToLocaleString = 66,
+ kDateToLocaleDateString = 67,
+ kDateToLocaleTimeString = 68,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
@@ -7546,6 +7469,12 @@ class V8_EXPORT Isolate {
HostInitializeImportMetaObjectCallback callback);
/**
+ * This specifies the callback called when the stack property of Error
+ * is accessed.
+ */
+ void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
+
+ /**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to guide heuristics.
* It is allowed to call this function from another thread while
@@ -7782,6 +7711,11 @@ class V8_EXPORT Isolate {
*/
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+ /*
+ * Gets the currently active heap tracer for the isolate.
+ */
+ EmbedderHeapTracer* GetEmbedderHeapTracer();
+
/**
* Use for |AtomicsWaitCallback| to indicate the type of event it receives.
*/
@@ -8180,7 +8114,9 @@ class V8_EXPORT Isolate {
void SetStackLimit(uintptr_t stack_limit);
/**
- * Returns a memory range that can potentially contain jitted code.
+ * Returns a memory range that can potentially contain jitted code. Code for
+ * V8's 'builtins' will not be in this range if embedded builtins is enabled.
+ * Instead, see GetEmbeddedCodeRange.
*
* On Win64, embedders are advised to install function table callbacks for
* these ranges, as default SEH won't be able to unwind through jitted code.
@@ -8194,6 +8130,15 @@ class V8_EXPORT Isolate {
*/
void GetCodeRange(void** start, size_t* length_in_bytes);
+ /**
+ * Returns a memory range containing the code for V8's embedded functions
+ * (e.g. builtins) which are shared across isolates.
+ *
+ * If embedded builtins are disabled, then the memory range will be a null
+ * pointer with 0 length.
+ */
+ MemoryRange GetEmbeddedCodeRange();
+
/** Set the callback to invoke in case of fatal errors. */
void SetFatalErrorHandler(FatalErrorCallback that);
@@ -8933,7 +8878,7 @@ class V8_EXPORT TryCatch {
* of the C++ try catch handler itself.
*/
static void* JSStackComparableAddress(TryCatch* handler) {
- if (handler == NULL) return NULL;
+ if (handler == nullptr) return nullptr;
return handler->js_stack_comparable_address_;
}
@@ -8973,7 +8918,7 @@ class V8_EXPORT TryCatch {
*/
class V8_EXPORT ExtensionConfiguration {
public:
- ExtensionConfiguration() : name_count_(0), names_(NULL) { }
+ ExtensionConfiguration() : name_count_(0), names_(nullptr) {}
ExtensionConfiguration(int name_count, const char* names[])
: name_count_(name_count), names_(names) { }
@@ -9030,7 +8975,7 @@ class V8_EXPORT Context {
* and only object identify will remain.
*/
static Local<Context> New(
- Isolate* isolate, ExtensionConfiguration* extensions = NULL,
+ Isolate* isolate, ExtensionConfiguration* extensions = nullptr,
MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
MaybeLocal<Value> global_object = MaybeLocal<Value>(),
DeserializeInternalFieldsCallback internal_fields_deserializer =
@@ -9369,201 +9314,6 @@ class V8_EXPORT Locker {
// --- Implementation ---
-
-namespace internal {
-
-/**
- * This class exports constants and functionality from within v8 that
- * is necessary to implement inline functions in the v8 api. Don't
- * depend on functions and constants defined here.
- */
-class Internals {
- public:
- // These values match non-compiler-dependent values defined within
- // the implementation of v8.
- static const int kHeapObjectMapOffset = 0;
- static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
- static const int kStringResourceOffset = 3 * kApiPointerSize;
-
- static const int kOddballKindOffset = 4 * kApiPointerSize + kApiDoubleSize;
- static const int kForeignAddressOffset = kApiPointerSize;
- static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
- static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
- static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 5;
- static const int kFullStringRepresentationMask = 0x0f;
- static const int kStringEncodingMask = 0x8;
- static const int kExternalTwoByteRepresentationTag = 0x02;
- static const int kExternalOneByteRepresentationTag = 0x0a;
-
- static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
- static const int kExternalMemoryOffset = 4 * kApiPointerSize;
- static const int kExternalMemoryLimitOffset =
- kExternalMemoryOffset + kApiInt64Size;
- static const int kExternalMemoryAtLastMarkCompactOffset =
- kExternalMemoryLimitOffset + kApiInt64Size;
- static const int kIsolateRootsOffset = kExternalMemoryLimitOffset +
- kApiInt64Size + kApiInt64Size +
- kApiPointerSize + kApiPointerSize;
- static const int kUndefinedValueRootIndex = 4;
- static const int kTheHoleValueRootIndex = 5;
- static const int kNullValueRootIndex = 6;
- static const int kTrueValueRootIndex = 7;
- static const int kFalseValueRootIndex = 8;
- static const int kEmptyStringRootIndex = 9;
-
- static const int kNodeClassIdOffset = 1 * kApiPointerSize;
- static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
- static const int kNodeStateMask = 0x7;
- static const int kNodeStateIsWeakValue = 2;
- static const int kNodeStateIsPendingValue = 3;
- static const int kNodeStateIsNearDeathValue = 4;
- static const int kNodeIsIndependentShift = 3;
- static const int kNodeIsActiveShift = 4;
-
- static const int kFirstNonstringType = 0x80;
- static const int kOddballType = 0x83;
- static const int kForeignType = 0x87;
- static const int kJSSpecialApiObjectType = 0x410;
- static const int kJSApiObjectType = 0x420;
- static const int kJSObjectType = 0x421;
-
- static const int kUndefinedOddballKind = 5;
- static const int kNullOddballKind = 3;
-
- static const uint32_t kNumIsolateDataSlots = 4;
-
- V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
- V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
-#ifdef V8_ENABLE_CHECKS
- CheckInitializedImpl(isolate);
-#endif
- }
-
- V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) {
- return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
- kHeapObjectTag);
- }
-
- V8_INLINE static int SmiValue(const internal::Object* value) {
- return PlatformSmiTagging::SmiToInt(value);
- }
-
- V8_INLINE static internal::Object* IntToSmi(int value) {
- return PlatformSmiTagging::IntToSmi(value);
- }
-
- V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
- return PlatformSmiTagging::IsValidSmi(value);
- }
-
- V8_INLINE static int GetInstanceType(const internal::Object* obj) {
- typedef internal::Object O;
- O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
- return ReadField<uint16_t>(map, kMapInstanceTypeOffset);
- }
-
- V8_INLINE static int GetOddballKind(const internal::Object* obj) {
- typedef internal::Object O;
- return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
- }
-
- V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
- int representation = (instance_type & kFullStringRepresentationMask);
- return representation == kExternalTwoByteRepresentationTag;
- }
-
- V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- return *addr & static_cast<uint8_t>(1U << shift);
- }
-
- V8_INLINE static void UpdateNodeFlag(internal::Object** obj,
- bool value, int shift) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- uint8_t mask = static_cast<uint8_t>(1U << shift);
- *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
- }
-
- V8_INLINE static uint8_t GetNodeState(internal::Object** obj) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- return *addr & kNodeStateMask;
- }
-
- V8_INLINE static void UpdateNodeState(internal::Object** obj,
- uint8_t value) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
- }
-
- V8_INLINE static void SetEmbedderData(v8::Isolate* isolate,
- uint32_t slot,
- void* data) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset + slot * kApiPointerSize;
- *reinterpret_cast<void**>(addr) = data;
- }
-
- V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
- uint32_t slot) {
- const uint8_t* addr = reinterpret_cast<const uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset + slot * kApiPointerSize;
- return *reinterpret_cast<void* const*>(addr);
- }
-
- V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate,
- int index) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
- return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
- }
-
- template <typename T>
- V8_INLINE static T ReadField(const internal::Object* ptr, int offset) {
- const uint8_t* addr =
- reinterpret_cast<const uint8_t*>(ptr) + offset - kHeapObjectTag;
- return *reinterpret_cast<const T*>(addr);
- }
-
- template <typename T>
- V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) {
- typedef internal::Object O;
- typedef internal::Internals I;
- O* ctx = *reinterpret_cast<O* const*>(context);
- int embedder_data_offset = I::kContextHeaderSize +
- (internal::kApiPointerSize * I::kContextEmbedderDataIndex);
- O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
- int value_offset =
- I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index);
- return I::ReadField<T>(embedder_data, value_offset);
- }
-};
-
-// Only perform cast check for types derived from v8::Data since
-// other types do not implement the Cast method.
-template <bool PerformCheck>
-struct CastCheck {
- template <class T>
- static void Perform(T* data);
-};
-
-template <>
-template <class T>
-void CastCheck<true>::Perform(T* data) {
- T::Cast(data);
-}
-
-template <>
-template <class T>
-void CastCheck<false>::Perform(T* data) {}
-
-template <class T>
-V8_INLINE void PerformCastCheck(T* data) {
- CastCheck<std::is_base_of<Data, T>::value>::Perform(data);
-}
-
-} // namespace internal
-
-
template <class T>
Local<T> Local<T>::New(Isolate* isolate, Local<T> that) {
return New(isolate, that.val_);
@@ -9577,7 +9327,7 @@ Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
template <class T>
Local<T> Local<T>::New(Isolate* isolate, T* that) {
- if (that == NULL) return Local<T>();
+ if (that == nullptr) return Local<T>();
T* that_ptr = that;
internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
@@ -9621,7 +9371,7 @@ void* WeakCallbackInfo<T>::GetInternalField(int index) const {
template <class T>
T* PersistentBase<T>::New(Isolate* isolate, T* that) {
- if (that == NULL) return NULL;
+ if (that == nullptr) return nullptr;
internal::Object** p = reinterpret_cast<internal::Object**>(that);
return reinterpret_cast<T*>(
V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate),
@@ -9672,7 +9422,7 @@ template <class T>
void PersistentBase<T>::Reset() {
if (this->IsEmpty()) return;
V8::DisposeGlobal(reinterpret_cast<internal::Object**>(this->val_));
- val_ = 0;
+ val_ = nullptr;
}
@@ -10239,30 +9989,6 @@ template <class T> Value* Value::Cast(T* value) {
}
-Local<Boolean> Value::ToBoolean() const {
- return ToBoolean(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Boolean>());
-}
-
-
-Local<String> Value::ToString() const {
- return ToString(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<String>());
-}
-
-
-Local<Object> Value::ToObject() const {
- return ToObject(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Object>());
-}
-
-
-Local<Integer> Value::ToInteger() const {
- return ToInteger(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Integer>());
-}
-
-
Boolean* Boolean::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -10711,10 +10437,10 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
}
if (change_in_bytes < 0) {
- *external_memory_limit += change_in_bytes;
- }
-
- if (change_in_bytes > 0 && amount > *external_memory_limit) {
+ const int64_t lower_limit = *external_memory_limit + change_in_bytes;
+ if (lower_limit > I::kExternalAllocationSoftLimit)
+ *external_memory_limit = lower_limit;
+ } else if (change_in_bytes > 0 && amount > *external_memory_limit) {
ReportExternalAllocationLimitReached();
}
return *external_memory;
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 75fd5aa7e7..93c4629825 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -420,6 +420,36 @@ namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
+#ifdef V8_OS_WIN
+
+// Setup for Windows DLL export/import. When building the V8 DLL the
+// BUILDING_V8_SHARED needs to be defined. When building a program which uses
+// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
+// static library or building a program which uses the V8 static library neither
+// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
+#ifdef BUILDING_V8_SHARED
+# define V8_EXPORT __declspec(dllexport)
+#elif USING_V8_SHARED
+# define V8_EXPORT __declspec(dllimport)
+#else
+# define V8_EXPORT
+#endif // BUILDING_V8_SHARED
+
+#else // V8_OS_WIN
+
+// Setup for Linux shared library export.
+#if V8_HAS_ATTRIBUTE_VISIBILITY
+# ifdef BUILDING_V8_SHARED
+# define V8_EXPORT __attribute__ ((visibility("default")))
+# else
+# define V8_EXPORT
+# endif
+#else
+# define V8_EXPORT
+#endif
+
+#endif // V8_OS_WIN
+
// clang-format on
#endif // V8CONFIG_H_
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index a0ede58b91..53ea0cdd44 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -108,6 +108,23 @@ verifiers {
triggered_by: "v8_win_rel_ng"
}
}
+ # TODO(machenbach): Remove after testing in practice and migrate to
+ # PRESUBMIT.py scripts.
+ buckets {
+ name: "luci.chromium.try"
+ builders {
+ name: "cast_shell_android"
+ experiment_percentage: 20
+ }
+ builders {
+ name: "cast_shell_linux"
+ experiment_percentage: 20
+ }
+ builders {
+ name: "linux-chromeos-rel"
+ experiment_percentage: 20
+ }
+ }
}
}
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 095aeefc5c..124524c552 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -27,24 +27,12 @@
'mips64el.debug': 'default_debug_mips64el',
'mips64el.optdebug': 'default_optdebug_mips64el',
'mips64el.release': 'default_release_mips64el',
- 'ppc.debug': 'default_debug_ppc',
- 'ppc.optdebug': 'default_optdebug_ppc',
- 'ppc.release': 'default_release_ppc',
- 'ppc.debug.sim': 'default_debug_ppc_sim',
- 'ppc.optdebug.sim': 'default_optdebug_ppc_sim',
- 'ppc.release.sim': 'default_release_ppc_sim',
'ppc64.debug': 'default_debug_ppc64',
'ppc64.optdebug': 'default_optdebug_ppc64',
'ppc64.release': 'default_release_ppc64',
'ppc64.debug.sim': 'default_debug_ppc64_sim',
'ppc64.optdebug.sim': 'default_optdebug_ppc64_sim',
'ppc64.release.sim': 'default_release_ppc64_sim',
- 's390.debug': 'default_debug_s390',
- 's390.optdebug': 'default_optdebug_s390',
- 's390.release': 'default_release_s390',
- 's390.debug.sim': 'default_debug_s390_sim',
- 's390.optdebug.sim': 'default_optdebug_s390_sim',
- 's390.release.sim': 'default_release_s390_sim',
's390x.debug': 'default_debug_s390x',
's390x.optdebug': 'default_optdebug_s390x',
's390x.release': 'default_release_s390x',
@@ -54,6 +42,7 @@
'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64',
+ 'x64.release.sample': 'release_x64_sample',
},
'client.dynamorio': {
'linux-v8-dr': 'release_x64',
@@ -102,6 +91,8 @@
'V8 Linux gcc 4.8': 'release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'debug_x64_gcc',
# FYI.
+ 'V8 Linux - embedded builtins': 'release_x86_embedded_builtins',
+ 'V8 Linux - embedded builtins - debug': 'debug_x86_embedded_builtins',
'V8 Fuchsia': 'release_x64_fuchsia',
'V8 Fuchsia - debug': 'debug_x64_fuchsia',
'V8 Linux64 - cfi': 'release_x64_cfi',
@@ -138,8 +129,6 @@
'debug_simulate_arm64_asan_edge',
'V8 Clusterfuzz Linux ASAN arm - debug builder':
'debug_simulate_arm_asan_edge',
- 'V8 Clusterfuzz Linux ASAN mipsel - debug builder':
- 'debug_simulate_mipsel_asan_edge',
'V8 Clusterfuzz Linux64 CFI - release builder':
'release_x64_cfi_clusterfuzz',
'V8 Clusterfuzz Linux MSAN no origins':
@@ -169,11 +158,8 @@
'V8 Mips - builder': 'release_mips_no_snap_no_i18n',
'V8 Linux - mipsel - sim - builder': 'release_simulate_mipsel',
'V8 Linux - mips64el - sim - builder': 'release_simulate_mips64el',
- # PPC.
- 'V8 Linux - ppc - sim': 'release_simulate_ppc',
+ # IBM.
'V8 Linux - ppc64 - sim': 'release_simulate_ppc64',
- # S390.
- 'V8 Linux - s390 - sim': 'release_simulate_s390',
'V8 Linux - s390x - sim': 'release_simulate_s390x',
},
'client.v8.branches': {
@@ -193,12 +179,8 @@
'V8 mips64el - sim - stable branch': 'release_simulate_mips64el',
'V8 mipsel - sim - beta branch': 'release_simulate_mipsel',
'V8 mipsel - sim - stable branch': 'release_simulate_mipsel',
- 'V8 ppc - sim - beta branch': 'release_simulate_ppc',
- 'V8 ppc - sim - stable branch': 'release_simulate_ppc',
'V8 ppc64 - sim - beta branch': 'release_simulate_ppc64',
'V8 ppc64 - sim - stable branch': 'release_simulate_ppc64',
- 'V8 s390 - sim - beta branch': 'release_simulate_s390',
- 'V8 s390 - sim - stable branch': 'release_simulate_s390',
'V8 s390x - sim - beta branch': 'release_simulate_s390x',
'V8 s390x - sim - stable branch': 'release_simulate_s390x',
},
@@ -207,7 +189,9 @@
'v8_android_arm64_compile_dbg': 'debug_android_arm64',
'v8_android_arm64_n5x_rel_ng': 'release_android_arm64',
'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot',
+ 'v8_linux_embedded_builtins_rel_ng': 'release_x86_embedded_builtins_trybot',
'v8_linux_rel_ng': 'release_x86_gcmole_trybot',
+ 'v8_linux_optional_rel_ng': 'release_x86_trybot',
'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa',
'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'debug_x86_trybot',
@@ -218,6 +202,7 @@
'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'release_x86_gcc_minimal_symbols',
'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap',
+ 'v8_linux64_compile_rel_xg': 'release_x64_test_features_trybot',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
@@ -235,6 +220,7 @@
'v8_linux64_tsan_isolates_rel_ng':
'release_x64_tsan_minimal_symbols',
'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols',
+ 'v8_odroid_arm_rel_ng': 'release_arm',
# TODO(machenbach): Remove after switching to x64 on infra side.
'v8_win_dbg': 'debug_x86_trybot',
'v8_win_compile_dbg': 'debug_x86_trybot',
@@ -280,7 +266,7 @@
'default_optdebug_android_arm': [
'debug', 'arm', 'android', 'v8_enable_slow_dchecks' ],
'default_release_android_arm': [
- 'release', 'arm', 'android'],
+ 'release', 'arm', 'android', 'android_strip_outputs'],
'default_debug_arm64': [
'debug', 'simulate_arm64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_arm64': [
@@ -299,18 +285,6 @@
'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [
'release', 'simulate_mips64el'],
- 'default_debug_ppc': [
- 'debug', 'ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
- 'default_optdebug_ppc': [
- 'debug', 'ppc', 'v8_enable_slow_dchecks'],
- 'default_release_ppc': [
- 'release', 'ppc'],
- 'default_debug_ppc_sim': [
- 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
- 'default_optdebug_ppc_sim': [
- 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
- 'default_release_ppc_sim': [
- 'release', 'simulate_ppc'],
'default_debug_ppc64': [
'debug', 'ppc64', 'gcc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc64': [
@@ -323,18 +297,6 @@
'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
'default_release_ppc64_sim': [
'release', 'simulate_ppc64'],
- 'default_debug_s390': [
- 'debug', 's390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
- 'default_optdebug_s390': [
- 'debug', 's390', 'v8_enable_slow_dchecks'],
- 'default_release_s390': [
- 'release', 's390'],
- 'default_debug_s390_sim': [
- 'debug', 'simulate_s390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
- 'default_optdebug_s390_sim': [
- 'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
- 'default_release_s390_sim': [
- 'release', 'simulate_s390'],
'default_debug_s390x': [
'debug', 's390x', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390x': [
@@ -353,6 +315,8 @@
'debug', 'x64', 'v8_enable_slow_dchecks'],
'default_release_x64': [
'release', 'x64'],
+ 'release_x64_sample': [
+ 'release', 'x64', 'sample'],
'default_debug_x86': [
'debug', 'x86', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x86': [
@@ -373,8 +337,6 @@
'debug_simulate_arm64_no_snap': [
'debug', 'simulate_arm64', 'shared', 'goma', 'v8_optimized_debug',
'v8_snapshot_none'],
- 'debug_simulate_mipsel_asan_edge': [
- 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
# Release configs for simulators.
'release_simulate_arm': [
@@ -397,12 +359,8 @@
'release_bot', 'simulate_mipsel'],
'release_simulate_mips64el': [
'release_bot', 'simulate_mips64el'],
- 'release_simulate_ppc': [
- 'release_bot', 'simulate_ppc'],
'release_simulate_ppc64': [
'release_bot', 'simulate_ppc64'],
- 'release_simulate_s390': [
- 'release_bot', 'simulate_s390'],
'release_simulate_s390x': [
'release_bot', 'simulate_s390x'],
@@ -416,9 +374,11 @@
'release_arm': [
'release_bot', 'arm', 'hard_float'],
'release_android_arm': [
- 'release_bot', 'arm', 'android', 'minimal_symbols'],
+ 'release_bot', 'arm', 'android', 'minimal_symbols',
+ 'android_strip_outputs'],
'release_android_arm64': [
- 'release_bot', 'arm64', 'android', 'minimal_symbols'],
+ 'release_bot', 'arm64', 'android', 'minimal_symbols',
+ 'android_strip_outputs'],
# Release configs for x64.
'release_x64': [
@@ -519,6 +479,9 @@
# Debug configs for x86.
'debug_x86': [
'debug_bot', 'x86'],
+ 'debug_x86_embedded_builtins': [
+ 'debug_bot', 'x86', 'v8_enable_embedded_builtins',
+ 'v8_no_untrusted_code_mitigations'],
'debug_x86_minimal_symbols': [
'debug_bot', 'x86', 'minimal_symbols'],
'debug_x86_no_i18n': [
@@ -538,6 +501,12 @@
# Release configs for x86.
'release_x86': [
'release_bot', 'x86'],
+ 'release_x86_embedded_builtins': [
+ 'release_bot', 'x86', 'v8_enable_embedded_builtins',
+ 'v8_no_untrusted_code_mitigations'],
+ 'release_x86_embedded_builtins_trybot': [
+ 'release_trybot', 'x86', 'v8_enable_embedded_builtins',
+ 'v8_no_untrusted_code_mitigations'],
'release_x86_gcc': [
'release_bot', 'x86', 'gcc'],
'release_x86_gcc_minimal_symbols': [
@@ -580,6 +549,10 @@
'gn_args': 'target_os="android" v8_android_log_stdout=true',
},
+ 'android_strip_outputs': {
+ 'gn_args': 'android_unstripped_runtime_outputs=false',
+ },
+
'arm': {
'gn_args': 'target_cpu="arm"',
},
@@ -625,14 +598,10 @@
'gn_args': 'is_debug=true v8_enable_backtrace=true',
},
- 'v8_use_multi_snapshots': {
- 'gn_args': 'v8_use_multi_snapshots=true',
- },
-
'debug_bot': {
'mixins': [
'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
- 'v8_use_multi_snapshots', 'v8_optimized_debug'],
+ 'v8_optimized_debug'],
},
'debug_trybot': {
@@ -715,11 +684,11 @@
},
'release': {
- 'gn_args': 'is_debug=false android_unstripped_runtime_outputs=false',
+ 'gn_args': 'is_debug=false',
},
'release_bot': {
- 'mixins': ['release', 'static', 'goma', 'v8_use_multi_snapshots'],
+ 'mixins': ['release', 'static', 'goma'],
},
'release_trybot': {
@@ -747,18 +716,10 @@
'gn_args': 'target_cpu="x64" v8_target_cpu="mips64el"',
},
- 'simulate_ppc': {
- 'gn_args': 'target_cpu="x86" v8_target_cpu="ppc"',
- },
-
'simulate_ppc64': {
'gn_args': 'target_cpu="x64" v8_target_cpu="ppc64"',
},
- 'simulate_s390': {
- 'gn_args': 'target_cpu="x86" v8_target_cpu="s390"',
- },
-
'simulate_s390x': {
'gn_args': 'target_cpu="x64" v8_target_cpu="s390x"',
},
@@ -808,6 +769,10 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
+ 'v8_enable_embedded_builtins': {
+ 'gn_args': 'v8_enable_embedded_builtins=true',
+ },
+
'v8_enable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=true',
},
@@ -853,6 +818,10 @@
'gn_args': 'v8_use_snapshot=false',
},
+ 'v8_no_untrusted_code_mitigations': {
+ 'gn_args': 'v8_untrusted_code_mitigations=false',
+ },
+
'v8_verify_heap': {
'gn_args': 'v8_enable_verify_heap=true',
},
@@ -861,18 +830,10 @@
'gn_args': 'v8_enable_verify_csa=true',
},
- 's390': {
- 'gn_args': 'target_cpu="s390x" v8_target_cpu="s390"',
- },
-
's390x': {
'gn_args': 'target_cpu="s390x" v8_target_cpu="s390x"',
},
- 'ppc': {
- 'gn_args': 'target_cpu="ppc"',
- },
-
'ppc64': {
'gn_args': 'target_cpu="ppc64" use_custom_libcxx=false',
},
@@ -885,5 +846,9 @@
'gn_args': 'target_cpu="x86"',
},
+ 'sample': {
+ 'gn_args': 'v8_monolithic=true is_component_build=false '
+ 'v8_use_external_startup_data=false use_custom_libcxx=false',
+ },
},
}
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 90cfd737f2..99873803c9 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -13,6 +13,7 @@ include_rules = [
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
+ "+src/heap/heap-write-barrier.h",
"-src/inspector",
"-src/interpreter",
"+src/interpreter/bytecode-array-accessor.h",
@@ -30,6 +31,7 @@ include_rules = [
"+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform",
"-include/libplatform",
+ "+builtins-generated",
"+torque-generated"
]
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index da935f3652..226178394d 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -31,7 +31,8 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_is_special_data_property(true);
info->set_is_sloppy(false);
info->set_replace_on_access(false);
- info->set_has_no_side_effect(false);
+ info->set_getter_side_effect_type(SideEffectType::kHasSideEffect);
+ info->set_setter_side_effect_type(SideEffectType::kHasSideEffect);
name = factory->InternalizeName(name);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
@@ -70,7 +71,7 @@ bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(isolate, name, isolate->factory()->length_string(),
- String::kLengthOffset, FieldIndex::kTagged, index);
+ String::kLengthOffset, FieldIndex::kWord32, index);
}
return false;
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 69fdbbb74e..15c4773ec3 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -22,27 +22,28 @@ class JavaScriptFrame;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
-#define ACCESSOR_INFO_LIST(V) \
- V(arguments_iterator, ArgumentsIterator) \
- V(array_length, ArrayLength) \
- V(bound_function_length, BoundFunctionLength) \
- V(bound_function_name, BoundFunctionName) \
- V(error_stack, ErrorStack) \
- V(function_arguments, FunctionArguments) \
- V(function_caller, FunctionCaller) \
- V(function_name, FunctionName) \
- V(function_length, FunctionLength) \
- V(function_prototype, FunctionPrototype) \
- V(string_length, StringLength)
-
-#define SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(V) \
- V(ArrayLength) \
- V(BoundFunctionLength) \
- V(BoundFunctionName) \
- V(FunctionName) \
- V(FunctionLength) \
- V(FunctionPrototype) \
- V(StringLength)
+// V(accessor_name, AccessorName, GetterSideEffectType, SetterSideEffectType)
+#define ACCESSOR_INFO_LIST_GENERATOR(V, _) \
+ V(_, arguments_iterator, ArgumentsIterator, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, array_length, ArrayLength, kHasNoSideEffect, kHasSideEffectToReceiver) \
+ V(_, bound_function_length, BoundFunctionLength, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, bound_function_name, BoundFunctionName, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, error_stack, ErrorStack, kHasSideEffectToReceiver, \
+ kHasSideEffectToReceiver) \
+ V(_, function_arguments, FunctionArguments, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, function_caller, FunctionCaller, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, function_name, FunctionName, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, function_length, FunctionLength, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \
+ kHasSideEffectToReceiver) \
+ V(_, string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver)
#define ACCESSOR_SETTER_LIST(V) \
V(ArrayLengthSetter) \
@@ -55,11 +56,11 @@ class JavaScriptFrame;
class Accessors : public AllStatic {
public:
-#define ACCESSOR_GETTER_DECLARATION(accessor_name, AccessorName) \
- static void AccessorName##Getter( \
- v8::Local<v8::Name> name, \
+#define ACCESSOR_GETTER_DECLARATION(_, accessor_name, AccessorName, ...) \
+ static void AccessorName##Getter( \
+ v8::Local<v8::Name> name, \
const v8::PropertyCallbackInfo<v8::Value>& info);
- ACCESSOR_INFO_LIST(ACCESSOR_GETTER_DECLARATION)
+ ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_GETTER_DECLARATION, /* not used */)
#undef ACCESSOR_GETTER_DECLARATION
#define ACCESSOR_SETTER_DECLARATION(accessor_name) \
@@ -71,7 +72,7 @@ class Accessors : public AllStatic {
static constexpr int kAccessorInfoCount =
#define COUNT_ACCESSOR(...) +1
- ACCESSOR_INFO_LIST(COUNT_ACCESSOR);
+ ACCESSOR_INFO_LIST_GENERATOR(COUNT_ACCESSOR, /* not used */);
#undef COUNT_ACCESSOR
static constexpr int kAccessorSetterCount =
@@ -118,9 +119,9 @@ class Accessors : public AllStatic {
AccessorNameBooleanSetterCallback setter);
private:
-#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName) \
+#define ACCESSOR_INFO_DECLARATION(_, accessor_name, AccessorName, ...) \
static Handle<AccessorInfo> Make##AccessorName##Info(Isolate* isolate);
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+ ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_INFO_DECLARATION, /* not used */)
#undef ACCESSOR_INFO_DECLARATION
friend class Heap;
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index 2b0bf727e5..ad71a25a99 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -14,8 +14,8 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != nullptr) return;
map_ = new HeapObjectToIndexHashMap();
- for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ for (RootIndex root_index = RootIndex::kFirstStrongRoot;
+ root_index <= RootIndex::kLastStrongRoot; ++root_index) {
Object* root = isolate->heap()->root(root_index);
if (!root->IsHeapObject()) continue;
// Omit root entries that can be written after initialization. They must
@@ -25,11 +25,12 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
Maybe<uint32_t> maybe_index = map_->Get(heap_object);
+ uint32_t index = static_cast<uint32_t>(root_index);
if (maybe_index.IsJust()) {
// Some are initialized to a previous value in the root list.
- DCHECK_LT(maybe_index.FromJust(), i);
+ DCHECK_LT(maybe_index.FromJust(), index);
} else {
- map_->Set(heap_object, i);
+ map_->Set(heap_object, index);
}
} else {
// Immortal immovable root objects are constant and allocated on the first
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h
index 599e44724a..f7a1cc2ad9 100644
--- a/deps/v8/src/address-map.h
+++ b/deps/v8/src/address-map.h
@@ -56,11 +56,14 @@ class RootIndexMap {
public:
explicit RootIndexMap(Isolate* isolate);
- static const int kInvalidRootIndex = -1;
-
- int Lookup(HeapObject* obj) {
+ // Returns true on successful lookup and sets *|out_root_list|.
+ bool Lookup(HeapObject* obj, RootIndex* out_root_list) {
Maybe<uint32_t> maybe_index = map_->Get(obj);
- return maybe_index.IsJust() ? maybe_index.FromJust() : kInvalidRootIndex;
+ if (maybe_index.IsJust()) {
+ *out_root_list = static_cast<RootIndex>(maybe_index.FromJust());
+ return true;
+ }
+ return false;
}
private:
diff --git a/deps/v8/src/allocation-site-scopes-inl.h b/deps/v8/src/allocation-site-scopes-inl.h
new file mode 100644
index 0000000000..e114bb3885
--- /dev/null
+++ b/deps/v8/src/allocation-site-scopes-inl.h
@@ -0,0 +1,52 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ALLOCATION_SITE_SCOPES_INL_H_
+#define V8_ALLOCATION_SITE_SCOPES_INL_H_
+
+#include "src/allocation-site-scopes.h"
+
+#include "src/objects/allocation-site-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
+ if (top().is_null()) {
+ InitializeTraversal(top_site_);
+ } else {
+ // Advance current site
+ Object* nested_site = current()->nested_site();
+ // Something is wrong if we advance to the end of the list here.
+ update_current_site(AllocationSite::cast(nested_site));
+ }
+ return Handle<AllocationSite>(*current(), isolate());
+}
+
+void AllocationSiteUsageContext::ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ // This assert ensures that we are pointing at the right sub-object in a
+ // recursive walk of a nested literal.
+ DCHECK(object.is_null() || *object == scope_site->boilerplate());
+}
+
+bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
+ if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
+ if (FLAG_allocation_site_pretenuring ||
+ AllocationSite::ShouldTrack(object->GetElementsKind())) {
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating Memento for %s %p\n",
+ object->IsJSArray() ? "JSArray" : "JSObject",
+ static_cast<void*>(*object));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ALLOCATION_SITE_SCOPES_INL_H_
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
index 60614c5e01..0a729948db 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -56,40 +56,12 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
top_site_(site),
activated_(activated) { }
- inline Handle<AllocationSite> EnterNewScope() {
- if (top().is_null()) {
- InitializeTraversal(top_site_);
- } else {
- // Advance current site
- Object* nested_site = current()->nested_site();
- // Something is wrong if we advance to the end of the list here.
- update_current_site(AllocationSite::cast(nested_site));
- }
- return Handle<AllocationSite>(*current(), isolate());
- }
+ inline Handle<AllocationSite> EnterNewScope();
inline void ExitScope(Handle<AllocationSite> scope_site,
- Handle<JSObject> object) {
- // This assert ensures that we are pointing at the right sub-object in a
- // recursive walk of a nested literal.
- DCHECK(object.is_null() || *object == scope_site->boilerplate());
- }
+ Handle<JSObject> object);
- bool ShouldCreateMemento(Handle<JSObject> object) {
- if (activated_ &&
- AllocationSite::CanTrack(object->map()->instance_type())) {
- if (FLAG_allocation_site_pretenuring ||
- AllocationSite::ShouldTrack(object->GetElementsKind())) {
- if (FLAG_trace_creation_allocation_sites) {
- PrintF("*** Creating Memento for %s %p\n",
- object->IsJSArray() ? "JSArray" : "JSObject",
- static_cast<void*>(*object));
- }
- return true;
- }
- }
- return false;
- }
+ inline bool ShouldCreateMemento(Handle<JSObject> object);
static const bool kCopying = true;
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 55c68dea89..6327a9c965 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
+#include "src/base/lsan-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/utils.h"
@@ -17,10 +18,6 @@
#include <malloc.h> // NOLINT
#endif
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif
-
namespace v8 {
namespace internal {
@@ -51,21 +48,29 @@ struct InitializePageAllocator {
static v8::base::PageAllocator default_allocator;
page_allocator = &default_allocator;
}
+#if defined(LEAK_SANITIZER)
+ {
+ static v8::base::LsanPageAllocator lsan_allocator(page_allocator);
+ page_allocator = &lsan_allocator;
+ }
+#endif
*page_allocator_ptr = page_allocator;
}
};
static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
page_allocator = LAZY_INSTANCE_INITIALIZER;
-
-v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
-
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;
} // namespace
+v8::PageAllocator* GetPlatformPageAllocator() {
+ DCHECK_NOT_NULL(page_allocator.Get());
+ return page_allocator.Get();
+}
+
void* Malloced::New(size_t size) {
void* result = AllocWithRetry(size);
if (result == nullptr) {
@@ -131,68 +136,62 @@ void AlignedFree(void *ptr) {
#endif
}
-size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
+size_t AllocatePageSize() {
+ return GetPlatformPageAllocator()->AllocatePageSize();
+}
-size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
+size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
void SetRandomMmapSeed(int64_t seed) {
- GetPageAllocator()->SetRandomMmapSeed(seed);
+ GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
}
-void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
+void* GetRandomMmapAddr() {
+ return GetPlatformPageAllocator()->GetRandomMmapAddr();
+}
-void* AllocatePages(void* address, size_t size, size_t alignment,
+void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
+ size_t size, size_t alignment,
PageAllocator::Permission access) {
+ DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(address, AlignedAddress(address, alignment));
- DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
+ DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
- result =
- GetPageAllocator()->AllocatePages(address, size, alignment, access);
+ result = page_allocator->AllocatePages(address, size, alignment, access);
if (result != nullptr) break;
- size_t request_size = size + alignment - AllocatePageSize();
+ size_t request_size = size + alignment - page_allocator->AllocatePageSize();
if (!OnCriticalMemoryPressure(request_size)) break;
}
-#if defined(LEAK_SANITIZER)
- if (result != nullptr) {
- __lsan_register_root_region(result, size);
- }
-#endif
return result;
}
-bool FreePages(void* address, const size_t size) {
- DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
- bool result = GetPageAllocator()->FreePages(address, size);
-#if defined(LEAK_SANITIZER)
- if (result) {
- __lsan_unregister_root_region(address, size);
- }
-#endif
- return result;
+bool FreePages(v8::PageAllocator* page_allocator, void* address,
+ const size_t size) {
+ DCHECK_NOT_NULL(page_allocator);
+ DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
+ return page_allocator->FreePages(address, size);
}
-bool ReleasePages(void* address, size_t size, size_t new_size) {
+bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
+ size_t new_size) {
+ DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size);
- bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
-#if defined(LEAK_SANITIZER)
- if (result) {
- __lsan_unregister_root_region(address, size);
- __lsan_register_root_region(address, new_size);
- }
-#endif
- return result;
+ return page_allocator->ReleasePages(address, size, new_size);
}
-bool SetPermissions(void* address, size_t size,
- PageAllocator::Permission access) {
- return GetPageAllocator()->SetPermissions(address, size, access);
+bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
+ size_t size, PageAllocator::Permission access) {
+ DCHECK_NOT_NULL(page_allocator);
+ return page_allocator->SetPermissions(address, size, access);
}
-byte* AllocatePage(void* address, size_t* allocated) {
- size_t page_size = AllocatePageSize();
- void* result =
- AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
+byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
+ size_t* allocated) {
+ DCHECK_NOT_NULL(page_allocator);
+ size_t page_size = page_allocator->AllocatePageSize();
+ void* result = AllocatePages(page_allocator, address, page_size, page_size,
+ PageAllocator::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
@@ -206,16 +205,17 @@ bool OnCriticalMemoryPressure(size_t length) {
return true;
}
-VirtualMemory::VirtualMemory() : address_(kNullAddress), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
- : address_(kNullAddress), size_(0) {
- size_t page_size = AllocatePageSize();
- size_t alloc_size = RoundUp(size, page_size);
- address_ = reinterpret_cast<Address>(
- AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess));
- if (address_ != kNullAddress) {
- size_ = alloc_size;
+VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
+ void* hint, size_t alignment)
+ : page_allocator_(page_allocator) {
+ DCHECK_NOT_NULL(page_allocator);
+ size_t page_size = page_allocator_->AllocatePageSize();
+ alignment = RoundUp(alignment, page_size);
+ size = RoundUp(size, page_size);
+ Address address = reinterpret_cast<Address>(AllocatePages(
+ page_allocator_, hint, size, alignment, PageAllocator::kNoAccess));
+ if (address != kNullAddress) {
+ region_ = base::AddressRegion(address, size);
}
}
@@ -226,30 +226,31 @@ VirtualMemory::~VirtualMemory() {
}
void VirtualMemory::Reset() {
- address_ = kNullAddress;
- size_ = 0;
+ page_allocator_ = nullptr;
+ region_ = base::AddressRegion();
}
bool VirtualMemory::SetPermissions(Address address, size_t size,
PageAllocator::Permission access) {
CHECK(InVM(address, size));
- bool result = v8::internal::SetPermissions(address, size, access);
+ bool result =
+ v8::internal::SetPermissions(page_allocator_, address, size, access);
DCHECK(result);
return result;
}
size_t VirtualMemory::Release(Address free_start) {
DCHECK(IsReserved());
- DCHECK(IsAddressAligned(free_start, CommitPageSize()));
+ DCHECK(IsAddressAligned(free_start, page_allocator_->CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
- const size_t free_size = size_ - (free_start - address_);
+
+ const size_t old_size = region_.size();
+ const size_t free_size = old_size - (free_start - region_.begin());
CHECK(InVM(free_start, free_size));
- DCHECK_LT(address_, free_start);
- DCHECK_LT(free_start, address_ + size_);
- CHECK(ReleasePages(reinterpret_cast<void*>(address_), size_,
- size_ - free_size));
- size_ -= free_size;
+ region_.set_size(old_size - free_size);
+ CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
+ old_size, region_.size()));
return free_size;
}
@@ -257,41 +258,21 @@ void VirtualMemory::Free() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
- Address address = address_;
- size_t size = size_;
- CHECK(InVM(address, size));
+ v8::PageAllocator* page_allocator = page_allocator_;
+ base::AddressRegion region = region_;
Reset();
- // FreePages expects size to be aligned to allocation granularity. Trimming
- // may leave size at only commit granularity. Align it here.
- CHECK(FreePages(reinterpret_cast<void*>(address),
- RoundUp(size, AllocatePageSize())));
+ // FreePages expects size to be aligned to allocation granularity however
+ // ReleasePages may leave size at only commit granularity. Align it here.
+ CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
DCHECK(!IsReserved());
- address_ = from->address_;
- size_ = from->size_;
+ page_allocator_ = from->page_allocator_;
+ region_ = from->region_;
from->Reset();
}
-bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
- VirtualMemory vm(size, hint);
- if (vm.IsReserved()) {
- result->TakeControl(&vm);
- return true;
- }
- return false;
-}
-
-bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
- VirtualMemory* result) {
- VirtualMemory vm(size, hint, alignment);
- if (vm.IsReserved()) {
- result->TakeControl(&vm);
- return true;
- }
- return false;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 8e17a35514..3a21310af8 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -6,6 +6,7 @@
#define V8_ALLOCATION_H_
#include "include/v8-platform.h"
+#include "src/base/address-region.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
#include "src/globals.h"
@@ -82,6 +83,9 @@ void* AllocWithRetry(size_t size);
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
+// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
+V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
+
// Gets the page granularity for AllocatePages and FreePages. Addresses returned
// by AllocatePages and AllocatePage are aligned to this size.
V8_EXPORT_PRIVATE size_t AllocatePageSize();
@@ -101,14 +105,16 @@ V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
// AllocatePageSize(). Returns the address of the allocated memory, with the
// specified size and alignment, or nullptr on failure.
V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size,
+V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator,
+ void* address, size_t size,
size_t alignment,
PageAllocator::Permission access);
// Frees memory allocated by a call to AllocatePages. |address| and |size| must
// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
+V8_WARN_UNUSED_RESULT bool FreePages(v8::PageAllocator* page_allocator,
+ void* address, const size_t size);
// Releases memory that is no longer needed. The range specified by |address|
// and |size| must be an allocated memory region. |size| and |new_size| must be
@@ -116,7 +122,8 @@ V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
// Released memory is left in an undefined state, so it should not be accessed.
// Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
+V8_WARN_UNUSED_RESULT bool ReleasePages(v8::PageAllocator* page_allocator,
+ void* address, size_t size,
size_t new_size);
// Sets permissions according to |access|. |address| and |size| must be
@@ -124,18 +131,21 @@ V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
// cause the memory contents to be lost. Returns true on success, otherwise
// false.
V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
+V8_WARN_UNUSED_RESULT bool SetPermissions(v8::PageAllocator* page_allocator,
+ void* address, size_t size,
PageAllocator::Permission access);
-inline bool SetPermissions(Address address, size_t size,
- PageAllocator::Permission access) {
- return SetPermissions(reinterpret_cast<void*>(address), size, access);
+inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address,
+ size_t size, PageAllocator::Permission access) {
+ return SetPermissions(page_allocator, reinterpret_cast<void*>(address), size,
+ access);
}
// Convenience function that allocates a single system page with read and write
// permissions. |address| is a hint. Returns the base address of the memory and
// the page size via |allocated| on success. Returns nullptr on failure.
V8_EXPORT_PRIVATE
-V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
+V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator,
+ void* address, size_t* allocated);
// Function that may release reserved memory regions to allow failed allocations
// to succeed. |length| is the amount of memory needed. Returns |true| if memory
@@ -143,50 +153,67 @@ V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory.
-class V8_EXPORT_PRIVATE VirtualMemory {
+class V8_EXPORT_PRIVATE VirtualMemory final {
public:
// Empty VirtualMemory object, controlling no reserved memory.
- VirtualMemory();
+ VirtualMemory() = default;
// Reserves virtual memory containing an area of the given size that is
- // aligned per alignment. This may not be at the position returned by
- // address().
- VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize());
+ // aligned per |alignment| rounded up to the |page_allocator|'s allocate page
+ // size.
+ // This may not be at the position returned by address().
+ VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
+ size_t alignment = 1);
// Construct a virtual memory by assigning it some already mapped address
// and size.
- VirtualMemory(Address address, size_t size)
- : address_(address), size_(size) {}
+ VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size)
+ : page_allocator_(page_allocator), region_(address, size) {
+ DCHECK_NOT_NULL(page_allocator);
+ }
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();
+ // Move constructor.
+ VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT { TakeControl(&other); }
+
+ // Move assignment operator.
+ VirtualMemory& operator=(VirtualMemory&& other) V8_NOEXCEPT {
+ TakeControl(&other);
+ return *this;
+ }
+
// Returns whether the memory has been reserved.
- bool IsReserved() const { return address_ != kNullAddress; }
+ bool IsReserved() const { return region_.begin() != kNullAddress; }
// Initialize or resets an embedded VirtualMemory object.
void Reset();
+ v8::PageAllocator* page_allocator() { return page_allocator_; }
+
+ const base::AddressRegion& region() const { return region_; }
+
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
Address address() const {
DCHECK(IsReserved());
- return address_;
+ return region_.begin();
}
Address end() const {
DCHECK(IsReserved());
- return address_ + size_;
+ return region_.end();
}
// Returns the size of the reserved memory. The returned value is only
// meaningful when IsReserved() returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
- size_t size() const { return size_; }
+ size_t size() const { return region_.size(); }
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
@@ -204,17 +231,16 @@ class V8_EXPORT_PRIVATE VirtualMemory {
void TakeControl(VirtualMemory* from);
bool InVM(Address address, size_t size) {
- return (address_ <= address) && ((address_ + size_) >= (address + size));
+ return region_.contains(address, size);
}
private:
- Address address_; // Start address of the virtual memory.
- size_t size_; // Size of the virtual memory.
-};
+ // Page allocator that controls the virtual memory.
+ v8::PageAllocator* page_allocator_ = nullptr;
+ base::AddressRegion region_;
-bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
-bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
- VirtualMemory* result);
+ DISALLOW_COPY_AND_ASSIGN(VirtualMemory);
+};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index 89f606ed41..1e5d6b2aaa 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -8,6 +8,7 @@
#include "src/api-arguments.h"
#include "src/api-inl.h"
+#include "src/debug/debug.h"
#include "src/objects/api-callbacks.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
@@ -34,6 +35,10 @@ inline JSObject* PropertyCallbackArguments::holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
+inline Object* PropertyCallbackArguments::receiver() {
+ return Object::cast(this->begin()[T::kThisIndex]);
+}
+
inline JSObject* FunctionCallbackArguments::holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
@@ -47,14 +52,24 @@ inline JSObject* FunctionCallbackArguments::holder() {
DCHECK(!name->IsPrivate()); \
DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
-#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE, \
- CALLBACK_INFO) \
- if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects && \
- !ISOLATE->debug()->PerformSideEffectCheckForCallback(CALLBACK_INFO)) { \
- return RETURN_VALUE(); \
- } \
- VMState<EXTERNAL> state(ISOLATE); \
- ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
+#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE, \
+ CALLBACK_INFO, RECEIVER, ACCESSOR_KIND) \
+ if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects && \
+ !ISOLATE->debug()->PerformSideEffectCheckForCallback( \
+ CALLBACK_INFO, RECEIVER, Debug::k##ACCESSOR_KIND)) { \
+ return RETURN_VALUE(); \
+ } \
+ VMState<EXTERNAL> state(ISOLATE); \
+ ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
+ PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
+
+#define PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_VALUE, \
+ API_RETURN_TYPE) \
+ if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects) { \
+ return RETURN_VALUE(); \
+ } \
+ VMState<EXTERNAL> state(ISOLATE); \
+ ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
#define CREATE_NAMED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \
@@ -65,11 +80,13 @@ inline JSObject* FunctionCallbackArguments::holder() {
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer( \
isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \
+ Handle<Object> receiver_check_unsupported; \
GenericNamedProperty##FUNCTION##Callback f = \
ToCData<GenericNamedProperty##FUNCTION##Callback>( \
interceptor->TYPE()); \
PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
- INFO_FOR_SIDE_EFFECT); \
+ INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \
+ NotAccessor); \
LOG(isolate, \
ApiNamedPropertyAccess("interceptor-named-" #TYPE, holder(), *name)); \
f(v8::Utils::ToLocal(name), callback_info); \
@@ -87,10 +104,12 @@ FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer( \
isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \
+ Handle<Object> receiver_check_unsupported; \
IndexedProperty##FUNCTION##Callback f = \
ToCData<IndexedProperty##FUNCTION##Callback>(interceptor->TYPE()); \
PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
- INFO_FOR_SIDE_EFFECT); \
+ INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \
+ NotAccessor); \
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #TYPE, \
holder(), index)); \
f(index, callback_info); \
@@ -108,9 +127,11 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
v8::FunctionCallback f =
v8::ToCData<v8::FunctionCallback>(handler->callback());
+ Handle<Object> receiver_check_unsupported;
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
!isolate->debug()->PerformSideEffectCheckForCallback(
- handle(handler, isolate))) {
+ handle(handler, isolate), receiver_check_unsupported,
+ Debug::kNotAccessor)) {
return Handle<Object>();
}
VMState<EXTERNAL> state(isolate);
@@ -167,10 +188,11 @@ Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
Handle<Object> PropertyCallbackArguments::BasicCallNamedGetterCallback(
GenericNamedPropertyGetterCallback f, Handle<Name> name,
- Handle<Object> info) {
+ Handle<Object> info, Handle<Object> receiver) {
DCHECK(!name->IsPrivate());
Isolate* isolate = this->isolate();
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info);
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info, receiver,
+ Getter);
f(v8::Utils::ToLocal(name), callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -184,9 +206,8 @@ Handle<Object> PropertyCallbackArguments::CallNamedSetter(
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kNamedSetterCallback);
- Handle<Object> side_effect_check_not_supported;
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
- side_effect_check_not_supported);
+ PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
+ v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
@@ -202,9 +223,8 @@ Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
RuntimeCallCounterId::kNamedDefinerCallback);
GenericNamedPropertyDefinerCallback f =
ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
- Handle<Object> side_effect_check_not_supported;
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
- side_effect_check_not_supported);
+ PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
+ v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
f(v8::Utils::ToLocal(name), desc, callback_info);
@@ -219,9 +239,8 @@ Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
RuntimeCallCounterId::kIndexedSetterCallback);
IndexedPropertySetterCallback f =
ToCData<IndexedPropertySetterCallback>(interceptor->setter());
- Handle<Object> side_effect_check_not_supported;
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
- side_effect_check_not_supported);
+ PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
+ v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
f(index, v8::Utils::ToLocal(value), callback_info);
@@ -237,9 +256,8 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
RuntimeCallCounterId::kIndexedDefinerCallback);
IndexedPropertyDefinerCallback f =
ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
- Handle<Object> side_effect_check_not_supported;
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
- side_effect_check_not_supported);
+ PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
+ v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
f(index, desc, callback_info);
@@ -275,7 +293,9 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
Handle<Object> PropertyCallbackArguments::BasicCallIndexedGetterCallback(
IndexedPropertyGetterCallback f, uint32_t index, Handle<Object> info) {
Isolate* isolate = this->isolate();
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info);
+ Handle<Object> receiver_check_unsupported;
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info,
+ receiver_check_unsupported, Getter);
f(index, callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -287,7 +307,9 @@ Handle<JSObject> PropertyCallbackArguments::CallPropertyEnumerator(
v8::ToCData<IndexedPropertyEnumeratorCallback>(interceptor->enumerator());
// TODO(cbruni): assert same type for indexed and named callback.
Isolate* isolate = this->isolate();
- PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array, interceptor);
+ Handle<Object> receiver_check_unsupported;
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array, interceptor,
+ receiver_check_unsupported, NotAccessor);
f(callback_info);
return GetReturnValue<JSObject>(isolate);
}
@@ -303,7 +325,8 @@ Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
AccessorNameGetterCallback f =
ToCData<AccessorNameGetterCallback>(info->getter());
- return BasicCallNamedGetterCallback(f, name, info);
+ return BasicCallNamedGetterCallback(f, name, info,
+ handle(receiver(), isolate));
}
Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
@@ -314,15 +337,15 @@ Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
RuntimeCallCounterId::kAccessorSetterCallback);
AccessorNameSetterCallback f =
ToCData<AccessorNameSetterCallback>(accessor_info->setter());
- Handle<Object> side_effect_check_not_supported;
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, void,
- side_effect_check_not_supported);
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, void, accessor_info,
+ handle(receiver(), isolate), Setter);
LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
#undef PREPARE_CALLBACK_INFO
+#undef PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 0a0a7362c7..d8fc2b49ab 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -26,12 +26,12 @@ class CustomArguments : public CustomArgumentsBase {
public:
static const int kReturnValueOffset = T::kReturnValueIndex;
- ~CustomArguments() {
+ ~CustomArguments() override {
this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
- virtual inline void IterateInstance(RootVisitor* v) {
+ inline void IterateInstance(RootVisitor* v) override {
v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
values_ + T::kArgsLength);
}
@@ -133,9 +133,10 @@ class PropertyCallbackArguments
IndexedPropertyGetterCallback f, uint32_t index, Handle<Object> info);
inline Handle<Object> BasicCallNamedGetterCallback(
GenericNamedPropertyGetterCallback f, Handle<Name> name,
- Handle<Object> info);
+ Handle<Object> info, Handle<Object> receiver = Handle<Object>());
inline JSObject* holder();
+ inline Object* receiver();
// Don't copy PropertyCallbackArguments, because they would both have the
// same prev_ pointer.
diff --git a/deps/v8/src/api-inl.h b/deps/v8/src/api-inl.h
index 50586814d8..5758729dd3 100644
--- a/deps/v8/src/api-inl.h
+++ b/deps/v8/src/api-inl.h
@@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/objects-inl.h"
+#include "src/objects/stack-frame-info.h"
namespace v8 {
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 977d6cdafc..11b63d56d8 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -114,9 +114,8 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
}
#endif
- MAYBE_RETURN_NULL(
- Object::AddDataProperty(&it, value, attributes, kThrowOnError,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED));
+ MAYBE_RETURN_NULL(Object::AddDataProperty(
+ &it, value, attributes, kThrowOnError, StoreOrigin::kNamed));
return value;
}
@@ -403,8 +402,10 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
}
Handle<JSObject> object;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
- JSObject::New(constructor, new_target), JSObject);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, object,
+ JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
+ JSObject);
if (is_prototype) JSObject::OptimizeAsPrototype(object);
@@ -495,8 +496,15 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
parent_prototype);
}
}
+ InstanceType function_type =
+ (!data->needs_access_check() &&
+ data->named_property_handler()->IsUndefined(isolate) &&
+ data->indexed_property_handler()->IsUndefined(isolate))
+ ? JS_API_OBJECT_TYPE
+ : JS_SPECIAL_API_OBJECT_TYPE;
+
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
- isolate, data, prototype, ApiNatives::JavaScriptObjectType, maybe_name);
+ isolate, data, prototype, function_type, maybe_name);
if (serial_number) {
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
@@ -625,8 +633,7 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
- Handle<Object> prototype, ApiInstanceType instance_type,
- MaybeHandle<Name> maybe_name) {
+ Handle<Object> prototype, InstanceType type, MaybeHandle<Name> maybe_name) {
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj,
maybe_name);
@@ -670,33 +677,10 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
immutable_proto = instance_template->immutable_proto();
}
- // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
- // JSObject::GetHeaderSize.
- int instance_size = kPointerSize * embedder_field_count;
- InstanceType type;
- switch (instance_type) {
- case JavaScriptObjectType:
- if (!obj->needs_access_check() &&
- obj->named_property_handler()->IsUndefined(isolate) &&
- obj->indexed_property_handler()->IsUndefined(isolate)) {
- type = JS_API_OBJECT_TYPE;
- } else {
- type = JS_SPECIAL_API_OBJECT_TYPE;
- }
- instance_size += JSObject::kHeaderSize;
- break;
- case GlobalObjectType:
- type = JS_GLOBAL_OBJECT_TYPE;
- instance_size += JSGlobalObject::kSize;
- break;
- case GlobalProxyType:
- type = JS_GLOBAL_PROXY_TYPE;
- instance_size += JSGlobalProxy::kSize;
- break;
- default:
- UNREACHABLE();
- break;
- }
+ // JS_FUNCTION_TYPE requires information about the prototype slot.
+ DCHECK_NE(JS_FUNCTION_TYPE, type);
+ int instance_size =
+ JSObject::GetHeaderSize(type) + kPointerSize * embedder_field_count;
Handle<Map> map = isolate->factory()->NewMap(type, instance_size,
TERMINAL_FAST_ELEMENTS_KIND);
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index e8bb32d40a..ff6cdc6c86 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/handles.h"
#include "src/maybe-handles.h"
+#include "src/objects.h"
#include "src/property-details.h"
namespace v8 {
@@ -33,15 +34,9 @@ class ApiNatives {
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(
Handle<ObjectTemplateInfo> data);
- enum ApiInstanceType {
- JavaScriptObjectType,
- GlobalObjectType,
- GlobalProxyType
- };
-
static Handle<JSFunction> CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
- Handle<Object> prototype, ApiInstanceType instance_type,
+ Handle<Object> prototype, InstanceType type,
MaybeHandle<Name> name = MaybeHandle<Name>());
static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 5ac9aec047..3f62a23d43 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -58,7 +58,9 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/templates.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@@ -834,6 +836,7 @@ StartupData SnapshotCreator::CreateBlob(
}
data->created_ = true;
+ DCHECK(i::Snapshot::VerifyChecksum(&result));
return result;
}
@@ -876,12 +879,12 @@ void RegisteredExtension::UnregisterAll() {
namespace {
class ExtensionResource : public String::ExternalOneByteStringResource {
public:
- ExtensionResource() : data_(0), length_(0) {}
+ ExtensionResource() : data_(nullptr), length_(0) {}
ExtensionResource(const char* data, size_t length)
: data_(data), length_(length) {}
- const char* data() const { return data_; }
- size_t length() const { return length_; }
- virtual void Dispose() {}
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
+ void Dispose() override {}
private:
const char* data_;
@@ -1391,7 +1394,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
- if (callback != 0) {
+ if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type);
}
obj->set_length(length);
@@ -1676,7 +1679,8 @@ static void TemplateSetAccessor(
Template* template_obj, v8::Local<Name> name, Getter getter, Setter setter,
Data data, AccessControl settings, PropertyAttribute attribute,
v8::Local<AccessorSignature> signature, bool is_special_data_property,
- bool replace_on_access, SideEffectType getter_side_effect_type) {
+ bool replace_on_access, SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
auto info = Utils::OpenHandle(template_obj);
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -1686,8 +1690,8 @@ static void TemplateSetAccessor(
is_special_data_property, replace_on_access);
accessor_info->set_initial_property_attributes(
static_cast<i::PropertyAttributes>(attribute));
- accessor_info->set_has_no_side_effect(getter_side_effect_type ==
- SideEffectType::kHasNoSideEffect);
+ accessor_info->set_getter_side_effect_type(getter_side_effect_type);
+ accessor_info->set_setter_side_effect_type(setter_side_effect_type);
i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info);
}
@@ -1695,29 +1699,34 @@ void Template::SetNativeDataProperty(
v8::Local<String> name, AccessorGetterCallback getter,
AccessorSetterCallback setter, v8::Local<Value> data,
PropertyAttribute attribute, v8::Local<AccessorSignature> signature,
- AccessControl settings, SideEffectType getter_side_effect_type) {
+ AccessControl settings, SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
- signature, true, false, getter_side_effect_type);
+ signature, true, false, getter_side_effect_type,
+ setter_side_effect_type);
}
void Template::SetNativeDataProperty(
v8::Local<Name> name, AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter, v8::Local<Value> data,
PropertyAttribute attribute, v8::Local<AccessorSignature> signature,
- AccessControl settings, SideEffectType getter_side_effect_type) {
+ AccessControl settings, SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
- signature, true, false, getter_side_effect_type);
+ signature, true, false, getter_side_effect_type,
+ setter_side_effect_type);
}
void Template::SetLazyDataProperty(v8::Local<Name> name,
AccessorNameGetterCallback getter,
v8::Local<Value> data,
PropertyAttribute attribute,
- SideEffectType getter_side_effect_type) {
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter,
static_cast<AccessorNameSetterCallback>(nullptr), data,
DEFAULT, attribute, Local<AccessorSignature>(), true,
- true, getter_side_effect_type);
+ true, getter_side_effect_type, setter_side_effect_type);
}
void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
@@ -1737,10 +1746,11 @@ void ObjectTemplate::SetAccessor(v8::Local<String> name,
v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
- SideEffectType getter_side_effect_type) {
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
signature, i::FLAG_disable_old_api_accessors, false,
- getter_side_effect_type);
+ getter_side_effect_type, setter_side_effect_type);
}
void ObjectTemplate::SetAccessor(v8::Local<Name> name,
@@ -1749,10 +1759,11 @@ void ObjectTemplate::SetAccessor(v8::Local<Name> name,
v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
- SideEffectType getter_side_effect_type) {
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
signature, i::FLAG_disable_old_api_accessors, false,
- getter_side_effect_type);
+ getter_side_effect_type, setter_side_effect_type);
}
template <typename Getter, typename Setter, typename Query, typename Descriptor,
@@ -1765,15 +1776,15 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE, i::TENURED));
obj->set_flags(0);
- if (getter != 0) SET_FIELD_WRAPPED(isolate, obj, set_getter, getter);
- if (setter != 0) SET_FIELD_WRAPPED(isolate, obj, set_setter, setter);
- if (query != 0) SET_FIELD_WRAPPED(isolate, obj, set_query, query);
- if (descriptor != 0)
+ if (getter != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_getter, getter);
+ if (setter != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_setter, setter);
+ if (query != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_query, query);
+ if (descriptor != nullptr)
SET_FIELD_WRAPPED(isolate, obj, set_descriptor, descriptor);
- if (remover != 0) SET_FIELD_WRAPPED(isolate, obj, set_deleter, remover);
- if (enumerator != 0)
+ if (remover != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_deleter, remover);
+ if (enumerator != nullptr)
SET_FIELD_WRAPPED(isolate, obj, set_enumerator, enumerator);
- if (definer != 0) SET_FIELD_WRAPPED(isolate, obj, set_definer, definer);
+ if (definer != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_definer, definer);
obj->set_can_intercept_symbols(
!(static_cast<int>(flags) &
static_cast<int>(PropertyHandlerFlags::kOnlyInterceptStrings)));
@@ -2001,24 +2012,15 @@ ScriptCompiler::CachedData::~CachedData() {
}
}
-
bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; }
-
void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); }
ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
Encoding encoding)
: impl_(new i::ScriptStreamingData(stream, encoding)) {}
-ScriptCompiler::StreamedSource::~StreamedSource() { delete impl_; }
-
-
-const ScriptCompiler::CachedData*
-ScriptCompiler::StreamedSource::GetCachedData() const {
- return impl_->cached_data.get();
-}
-
+ScriptCompiler::StreamedSource::~StreamedSource() = default;
Local<Script> UnboundScript::BindToCurrentContext() {
auto function_info =
@@ -2030,7 +2032,6 @@ Local<Script> UnboundScript::BindToCurrentContext() {
return ToApiHandle<Script>(function);
}
-
int UnboundScript::GetId() {
auto function_info =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
@@ -2157,10 +2158,6 @@ int PrimitiveArray::Length() const {
return array->length();
}
-void PrimitiveArray::Set(int index, Local<Primitive> item) {
- return Set(Isolate::GetCurrent(), index, item);
-}
-
void PrimitiveArray::Set(Isolate* v8_isolate, int index,
Local<Primitive> item) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -2174,10 +2171,6 @@ void PrimitiveArray::Set(Isolate* v8_isolate, int index,
array->set(index, *i_item);
}
-Local<Primitive> PrimitiveArray::Get(int index) {
- return Get(Isolate::GetCurrent(), index);
-}
-
Local<Primitive> PrimitiveArray::Get(Isolate* v8_isolate, int index) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
@@ -2534,6 +2527,7 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
RETURN_ESCAPED(Utils::CallableToLocal(result));
}
+void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
@@ -2544,10 +2538,13 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
// TODO(rmcilroy): remove CompileOptions from the API.
CHECK(options == ScriptCompiler::kNoCompileOptions);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- return i::Compiler::NewBackgroundCompileTask(source->impl(), isolate);
+ i::ScriptStreamingData* data = source->impl();
+ std::unique_ptr<i::BackgroundCompileTask> task =
+ base::make_unique<i::BackgroundCompileTask>(data, isolate);
+ data->task = std::move(task);
+ return new ScriptCompiler::ScriptStreamingTask(data);
}
-
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
StreamedSource* v8_source,
Local<String> full_source_string,
@@ -2562,11 +2559,11 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
isolate, origin.ResourceName(), origin.ResourceLineOffset(),
origin.ResourceColumnOffset(), origin.SourceMapUrl(),
origin.HostDefinedOptions());
- i::ScriptStreamingData* streaming_data = v8_source->impl();
+ i::ScriptStreamingData* data = v8_source->impl();
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForStreamedScript(
- isolate, str, script_details, origin.Options(), streaming_data);
+ isolate, str, script_details, origin.Options(), data);
i::Handle<i::SharedFunctionInfo> result;
has_pending_exception = !maybe_function_info.ToHandle(&result);
@@ -2908,10 +2905,6 @@ void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
// --- S t a c k T r a c e ---
-Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
- return GetFrame(Isolate::GetCurrent(), index);
-}
-
Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
uint32_t index) const {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -3572,17 +3565,20 @@ MaybeLocal<BigInt> Value::ToBigInt(Local<Context> context) const {
RETURN_ESCAPED(result);
}
+bool Value::BooleanValue(Isolate* v8_isolate) const {
+ return Utils::OpenHandle(this)->BooleanValue(
+ reinterpret_cast<i::Isolate*>(v8_isolate));
+}
+
MaybeLocal<Boolean> Value::ToBoolean(Local<Context> context) const {
- auto obj = Utils::OpenHandle(this);
- if (obj->IsBoolean()) return ToApiHandle<Boolean>(obj);
- auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- auto val = isolate->factory()->ToBoolean(obj->BooleanValue(isolate));
- return ToApiHandle<Boolean>(val);
+ return ToBoolean(context->GetIsolate());
}
Local<Boolean> Value::ToBoolean(Isolate* v8_isolate) const {
- return ToBoolean(v8_isolate->GetCurrentContext()).ToLocalChecked();
+ auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ return ToApiHandle<Boolean>(
+ isolate->factory()->ToBoolean(BooleanValue(v8_isolate)));
}
@@ -3888,36 +3884,6 @@ void v8::RegExp::CheckCast(v8::Value* that) {
}
-bool Value::BooleanValue() const {
- return BooleanValue(Isolate::GetCurrent()->GetCurrentContext())
- .FromJust();
-}
-
-
-double Value::NumberValue() const {
- return NumberValue(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(std::numeric_limits<double>::quiet_NaN());
-}
-
-
-int64_t Value::IntegerValue() const {
- return IntegerValue(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(0);
-}
-
-
-uint32_t Value::Uint32Value() const {
- return Uint32Value(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(0);
-}
-
-
-int32_t Value::Int32Value() const {
- return Int32Value(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(0);
-}
-
-
Maybe<bool> Value::BooleanValue(Local<Context> context) const {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
return Just(Utils::OpenHandle(this)->BooleanValue(isolate));
@@ -4006,12 +3972,6 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
}
-bool Value::Equals(Local<Value> that) const {
- return Equals(Isolate::GetCurrent()->GetCurrentContext(), that)
- .FromMaybe(false);
-}
-
-
Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
auto self = Utils::OpenHandle(this);
@@ -4063,7 +4023,8 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception =
i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj,
- i::LanguageMode::kSloppy)
+ i::LanguageMode::kSloppy,
+ i::StoreOrigin::kMaybeKeyed)
.is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
@@ -4617,8 +4578,8 @@ static Maybe<bool> ObjectSetAccessor(
Local<Context> context, Object* self, Local<Name> name, Getter getter,
Setter setter, Data data, AccessControl settings,
PropertyAttribute attributes, bool is_special_data_property,
- bool replace_on_access,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect) {
+ bool replace_on_access, SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing<bool>(),
i::HandleScope);
@@ -4629,8 +4590,8 @@ static Maybe<bool> ObjectSetAccessor(
i::Handle<i::AccessorInfo> info =
MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
is_special_data_property, replace_on_access);
- info->set_has_no_side_effect(getter_side_effect_type ==
- SideEffectType::kHasNoSideEffect);
+ info->set_getter_side_effect_type(getter_side_effect_type);
+ info->set_setter_side_effect_type(setter_side_effect_type);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@@ -4653,11 +4614,12 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
AccessorNameSetterCallback setter,
MaybeLocal<Value> data, AccessControl settings,
PropertyAttribute attribute,
- SideEffectType getter_side_effect_type) {
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter, setter,
data.FromMaybe(Local<Value>()), settings, attribute,
i::FLAG_disable_old_api_accessors, false,
- getter_side_effect_type);
+ getter_side_effect_type, setter_side_effect_type);
}
@@ -4684,19 +4646,22 @@ Maybe<bool> Object::SetNativeDataProperty(
v8::Local<v8::Context> context, v8::Local<Name> name,
AccessorNameGetterCallback getter, AccessorNameSetterCallback setter,
v8::Local<Value> data, PropertyAttribute attributes,
- SideEffectType getter_side_effect_type) {
+ SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT,
- attributes, true, false, getter_side_effect_type);
+ attributes, true, false, getter_side_effect_type,
+ setter_side_effect_type);
}
Maybe<bool> Object::SetLazyDataProperty(
v8::Local<v8::Context> context, v8::Local<Name> name,
AccessorNameGetterCallback getter, v8::Local<Value> data,
- PropertyAttribute attributes, SideEffectType getter_side_effect_type) {
+ PropertyAttribute attributes, SideEffectType getter_side_effect_type,
+ SideEffectType setter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter,
static_cast<AccessorNameSetterCallback>(nullptr),
data, DEFAULT, attributes, true, true,
- getter_side_effect_type);
+ getter_side_effect_type, setter_side_effect_type);
}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
@@ -5343,10 +5308,6 @@ bool String::ContainsOnlyOneByte() const {
return helper.Check(*str);
}
-int String::Utf8Length() const {
- return Utf8Length(Isolate::GetCurrent());
-}
-
int String::Utf8Length(Isolate* isolate) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
str = i::String::Flatten(reinterpret_cast<i::Isolate*>(isolate), str);
@@ -5570,14 +5531,6 @@ static bool RecursivelySerializeToUtf8(i::String* current,
return true;
}
-
-int String::WriteUtf8(char* buffer, int capacity,
- int* nchars_ref, int options) const {
- return WriteUtf8(Isolate::GetCurrent(),
- buffer, capacity, nchars_ref, options);
-}
-
-
int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
int* nchars_ref, int options) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
@@ -5645,18 +5598,6 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
}
-int String::WriteOneByte(uint8_t* buffer, int start,
- int length, int options) const {
- return WriteOneByte(Isolate::GetCurrent(), buffer, start, length, options);
-}
-
-
-int String::Write(uint16_t* buffer, int start, int length,
- int options) const {
- return Write(Isolate::GetCurrent(), buffer, start, length, options);
-}
-
-
int String::WriteOneByte(Isolate* isolate, uint8_t* buffer, int start,
int length, int options) const {
return WriteHelper(reinterpret_cast<i::Isolate*>(isolate), this, buffer,
@@ -6010,16 +5951,16 @@ HeapStatistics::HeapStatistics()
malloced_memory_(0),
external_memory_(0),
peak_malloced_memory_(0),
- does_zap_garbage_(0),
+ does_zap_garbage_(false),
number_of_native_contexts_(0),
number_of_detached_contexts_(0) {}
-HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
- space_size_(0),
- space_used_size_(0),
- space_available_size_(0),
- physical_space_size_(0) { }
-
+HeapSpaceStatistics::HeapSpaceStatistics()
+ : space_name_(nullptr),
+ space_size_(0),
+ space_used_size_(0),
+ space_available_size_(0),
+ physical_space_size_(0) {}
HeapObjectStatistics::HeapObjectStatistics()
: object_type_(nullptr),
@@ -6604,11 +6545,6 @@ MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
return result;
}
-Local<String> v8::String::Concat(Local<String> left,
- Local<String> right) {
- return Concat(Isolate::GetCurrent(), left, right);
-}
-
Local<String> v8::String::Concat(Isolate* v8_isolate, Local<String> left,
Local<String> right) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -6793,7 +6729,6 @@ double v8::NumberObject::ValueOf() const {
}
Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
- CHECK(i::FLAG_harmony_bigint);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, BigIntObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -6835,11 +6770,6 @@ bool v8::BooleanObject::ValueOf() const {
}
-Local<v8::Value> v8::StringObject::New(Local<String> value) {
- return New(Isolate::GetCurrent(), value);
-}
-
-
Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
@@ -6981,23 +6911,6 @@ Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
return Utils::ToLocal(obj);
}
-Local<v8::Array> v8::Array::New(Isolate* isolate, Local<Value>* elements,
- size_t length) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Factory* factory = i_isolate->factory();
- LOG_API(i_isolate, Array, New);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- int len = static_cast<int>(length);
-
- i::Handle<i::FixedArray> result = factory->NewFixedArray(len);
- for (int i = 0; i < len; i++) {
- i::Handle<i::Object> element = Utils::OpenHandle(*elements[i]);
- result->set(i, *element);
- }
-
- return Utils::ToLocal(
- factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, len));
-}
uint32_t v8::Array::Length() const {
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
@@ -7103,30 +7016,30 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
i::Factory* factory = isolate->factory();
i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj),
isolate);
- const bool collect_keys =
- kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kKeys;
- const bool collect_values =
- kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kValues;
- int capacity = table->UsedCapacity();
- int max_length =
- (capacity - offset) * ((collect_keys && collect_values) ? 2 : 1);
- i::Handle<i::FixedArray> result = factory->NewFixedArray(max_length);
+ if (offset >= table->NumberOfElements()) return factory->NewJSArray(0);
+ int length = (table->NumberOfElements() - offset) *
+ (kind == MapAsArrayKind::kEntries ? 2 : 1);
+ i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
i::DisallowHeapAllocation no_gc;
+ int capacity = table->UsedCapacity();
i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
- for (int i = offset; i < capacity; ++i) {
+ for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
- if (collect_keys) result->set(result_index++, key);
- if (collect_values) result->set(result_index++, table->ValueAt(i));
+ if (offset-- > 0) continue;
+ if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kKeys) {
+ result->set(result_index++, key);
+ }
+ if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kValues) {
+ result->set(result_index++, table->ValueAt(i));
+ }
}
}
- DCHECK_GE(max_length, result_index);
- if (result_index == 0) return factory->NewJSArray(0);
- result->Shrink(isolate, result_index);
- return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS,
- result_index);
+ DCHECK_EQ(result_index, result->length());
+ DCHECK_EQ(result_index, length);
+ return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length);
}
} // namespace
@@ -7211,26 +7124,24 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
i::Factory* factory = isolate->factory();
i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj),
isolate);
- // Elements skipped by |offset| may already be deleted.
- int capacity = table->UsedCapacity();
- int max_length = capacity - offset;
- if (max_length == 0) return factory->NewJSArray(0);
- i::Handle<i::FixedArray> result = factory->NewFixedArray(max_length);
+ int length = table->NumberOfElements() - offset;
+ if (length <= 0) return factory->NewJSArray(0);
+ i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
i::DisallowHeapAllocation no_gc;
+ int capacity = table->UsedCapacity();
i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
- for (int i = offset; i < capacity; ++i) {
+ for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
+ if (offset-- > 0) continue;
result->set(result_index++, key);
}
}
- DCHECK_GE(max_length, result_index);
- if (result_index == 0) return factory->NewJSArray(0);
- result->Shrink(isolate, result_index);
- return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS,
- result_index);
+ DCHECK_EQ(result_index, result->length());
+ DCHECK_EQ(result_index, length);
+ return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length);
}
} // namespace
@@ -7501,7 +7412,7 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
reinterpret_cast<i::Isolate*>(isolate)->global_handles()->Create(
*Utils::OpenHandle(*promise))) {}
- ~AsyncCompilationResolver() {
+ ~AsyncCompilationResolver() override {
i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
}
@@ -7540,9 +7451,6 @@ void WasmModuleObjectBuilderStreaming::Finish() {
void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
}
-WasmModuleObjectBuilderStreaming::~WasmModuleObjectBuilderStreaming() {
-}
-
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@@ -7602,9 +7510,8 @@ void ArrayBufferDeleter(void* buffer, size_t length, void* info) {
v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents(
- self->backing_store(), byte_length, self->allocation_base(),
+ self->backing_store(), self->byte_length(), self->allocation_base(),
self->allocation_length(),
self->is_wasm_memory() ? Allocator::AllocationMode::kReservation
: Allocator::AllocationMode::kNormal,
@@ -7632,7 +7539,7 @@ void v8::ArrayBuffer::Neuter() {
size_t v8::ArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- return static_cast<size_t>(obj->byte_length()->Number());
+ return obj->byte_length();
}
@@ -7656,6 +7563,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
ArrayBufferCreationMode mode) {
// Embedders must guarantee that the external backing store is valid.
CHECK(byte_length == 0 || data != nullptr);
+ CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -7687,9 +7595,8 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
- size_t byte_offset = i::NumberToSize(self->byte_offset());
- size_t bytes_to_copy =
- i::Min(byte_length, i::NumberToSize(self->byte_length()));
+ size_t byte_offset = self->byte_offset();
+ size_t bytes_to_copy = i::Min(byte_length, self->byte_length());
if (bytes_to_copy) {
i::DisallowHeapAllocation no_gc;
i::Isolate* isolate = self->GetIsolate();
@@ -7720,19 +7627,19 @@ bool v8::ArrayBufferView::HasBuffer() const {
size_t v8::ArrayBufferView::ByteOffset() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- return static_cast<size_t>(obj->byte_offset()->Number());
+ return obj->WasNeutered() ? 0 : obj->byte_offset();
}
size_t v8::ArrayBufferView::ByteLength() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- return static_cast<size_t>(obj->byte_length()->Number());
+ return obj->WasNeutered() ? 0 : obj->byte_length();
}
size_t v8::TypedArray::Length() {
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
- return obj->length_value();
+ return obj->WasNeutered() ? 0 : obj->length_value();
}
static_assert(v8::TypedArray::kMaxLength == i::Smi::kMaxValue,
@@ -7840,9 +7747,8 @@ v8::SharedArrayBuffer::Contents::Contents(
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
- size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents(
- self->backing_store(), byte_length, self->allocation_base(),
+ self->backing_store(), self->byte_length(), self->allocation_base(),
self->allocation_length(),
self->is_wasm_memory()
? ArrayBuffer::Allocator::AllocationMode::kReservation
@@ -7858,7 +7764,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
size_t v8::SharedArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- return static_cast<size_t>(obj->byte_length()->Number());
+ return obj->byte_length();
}
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
@@ -7912,8 +7818,8 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- return Utils::ToLocal(i_isolate->SymbolFor(
- i::Heap::kPublicSymbolTableRootIndex, i_name, false));
+ return Utils::ToLocal(
+ i_isolate->SymbolFor(i::RootIndex::kPublicSymbolTable, i_name, false));
}
@@ -7921,10 +7827,11 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
return Utils::ToLocal(
- i_isolate->SymbolFor(i::Heap::kApiSymbolTableRootIndex, i_name, false));
+ i_isolate->SymbolFor(i::RootIndex::kApiSymbolTable, i_name, false));
}
#define WELL_KNOWN_SYMBOLS(V) \
+ V(AsyncIterator, async_iterator) \
V(HasInstance, has_instance) \
V(IsConcatSpreadable, is_concat_spreadable) \
V(Iterator, iterator) \
@@ -7961,8 +7868,8 @@ Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- Local<Symbol> result = Utils::ToLocal(i_isolate->SymbolFor(
- i::Heap::kApiPrivateSymbolTableRootIndex, i_name, true));
+ Local<Symbol> result = Utils::ToLocal(
+ i_isolate->SymbolFor(i::RootIndex::kApiPrivateSymbolTable, i_name, true));
return v8::Local<Private>(reinterpret_cast<Private*>(*result));
}
@@ -8003,7 +7910,6 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
}
Local<BigInt> v8::BigInt::New(Isolate* isolate, int64_t value) {
- CHECK(i::FLAG_harmony_bigint);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::BigInt> result = i::BigInt::FromInt64(internal_isolate, value);
@@ -8011,7 +7917,6 @@ Local<BigInt> v8::BigInt::New(Isolate* isolate, int64_t value) {
}
Local<BigInt> v8::BigInt::NewFromUnsigned(Isolate* isolate, uint64_t value) {
- CHECK(i::FLAG_harmony_bigint);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::BigInt> result = i::BigInt::FromUint64(internal_isolate, value);
@@ -8021,7 +7926,6 @@ Local<BigInt> v8::BigInt::NewFromUnsigned(Isolate* isolate, uint64_t value) {
MaybeLocal<BigInt> v8::BigInt::NewFromWords(Local<Context> context,
int sign_bit, int word_count,
const uint64_t* words) {
- CHECK(i::FLAG_harmony_bigint);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, BigInt, NewFromWords,
MaybeLocal<BigInt>(), InternalEscapableScope);
@@ -8186,6 +8090,11 @@ void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
isolate->heap()->SetEmbedderHeapTracer(tracer);
}
+EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->heap()->GetEmbedderHeapTracer();
+}
+
void Isolate::SetGetExternallyAllocatedMemoryInBytesCallback(
GetExternallyAllocatedMemoryInBytesCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8225,9 +8134,9 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
kGCCallbackFlagForced);
} else {
DCHECK_EQ(kFullGarbageCollection, type);
- reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask,
- i::GarbageCollectionReason::kTesting, kGCCallbackFlagForced);
+ reinterpret_cast<i::Isolate*>(this)->heap()->PreciseCollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
+ kGCCallbackFlagForced);
}
}
@@ -8296,7 +8205,11 @@ void Isolate::Initialize(Isolate* isolate,
if (params.entry_hook || !i::Snapshot::Initialize(i_isolate)) {
// If snapshot data was provided and we failed to deserialize it must
// have been corrupted.
- CHECK_NULL(i_isolate->snapshot_blob());
+ if (i_isolate->snapshot_blob() != nullptr) {
+ FATAL(
+ "Failed to deserialize the V8 snapshot blob. This can mean that the "
+ "snapshot blob file is corrupted or missing.");
+ }
base::ElapsedTimer timer;
if (i::FLAG_profile_deserialization) timer.Start();
i_isolate->Init(nullptr);
@@ -8366,6 +8279,11 @@ void Isolate::SetHostInitializeImportMetaObjectCallback(
isolate->SetHostInitializeImportMetaObjectCallback(callback);
}
+void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetPrepareStackTraceCallback(callback);
+}
+
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
@@ -8791,17 +8709,17 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (isolate->heap()->memory_allocator()->code_range()->valid()) {
- *start = reinterpret_cast<void*>(
- isolate->heap()->memory_allocator()->code_range()->start());
- *length_in_bytes =
- isolate->heap()->memory_allocator()->code_range()->size();
- } else {
- *start = nullptr;
- *length_in_bytes = 0;
- }
+ const base::AddressRegion& code_range =
+ isolate->heap()->memory_allocator()->code_range();
+ *start = reinterpret_cast<void*>(code_range.begin());
+ *length_in_bytes = code_range.size();
}
+MemoryRange Isolate::GetEmbeddedCodeRange() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return {reinterpret_cast<const void*>(isolate->embedded_blob()),
+ isolate->embedded_blob_size()};
+}
#define CALLBACK_SETTER(ExternalName, Type, InternalName) \
void Isolate::Set##ExternalName(Type callback) { \
@@ -8986,9 +8904,6 @@ bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8Isolate) {
return isolate->IsRunningMicrotasks();
}
-String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
- : Utf8Value(Isolate::GetCurrent(), obj) {}
-
String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
: str_(nullptr), length_(0) {
if (obj.IsEmpty()) return;
@@ -9008,9 +8923,6 @@ String::Utf8Value::~Utf8Value() {
i::DeleteArray(str_);
}
-String::Value::Value(v8::Local<v8::Value> obj)
- : Value(Isolate::GetCurrent(), obj) {}
-
String::Value::Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
: str_(nullptr), length_(0) {
if (obj.IsEmpty()) return;
@@ -9192,7 +9104,10 @@ int debug::Script::ColumnOffset() const {
std::vector<int> debug::Script::LineEnds() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
- if (script->type() == i::Script::TYPE_WASM) return std::vector<int>();
+ if (script->type() == i::Script::TYPE_WASM &&
+ this->SourceMappingURL().IsEmpty()) {
+ return std::vector<int>();
+ }
i::Isolate* isolate = script->GetIsolate();
i::HandleScope scope(isolate);
i::Script::InitLineEnds(script);
@@ -9281,7 +9196,8 @@ bool debug::Script::GetPossibleBreakpoints(
std::vector<debug::BreakLocation>* locations) const {
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
- if (script->type() == i::Script::TYPE_WASM) {
+ if (script->type() == i::Script::TYPE_WASM &&
+ this->SourceMappingURL().IsEmpty()) {
i::WasmModuleObject* module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
return module_object->GetPossibleBreakpoints(start, end, locations);
@@ -9332,9 +9248,13 @@ bool debug::Script::GetPossibleBreakpoints(
int debug::Script::GetSourceOffset(const debug::Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
- return i::WasmModuleObject::cast(script->wasm_module_object())
- ->GetFunctionOffset(location.GetLineNumber()) +
- location.GetColumnNumber();
+ if (this->SourceMappingURL().IsEmpty()) {
+ return i::WasmModuleObject::cast(script->wasm_module_object())
+ ->GetFunctionOffset(location.GetLineNumber()) +
+ location.GetColumnNumber();
+ }
+ DCHECK_EQ(0, location.GetLineNumber());
+ return location.GetColumnNumber();
}
int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
@@ -9777,10 +9697,10 @@ int debug::GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
}
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
int result = 0;
-#define IS_BUILTIN_ACESSOR(name, _) \
+#define IS_BUILTIN_ACESSOR(_, name, ...) \
if (*structure == *isolate->factory()->name##_accessor()) \
result |= static_cast<int>(debug::NativeAccessorType::IsBuiltin);
- ACCESSOR_INFO_LIST(IS_BUILTIN_ACESSOR)
+ ACCESSOR_INFO_LIST_GENERATOR(IS_BUILTIN_ACESSOR, /* not used */)
#undef IS_BUILTIN_ACESSOR
i::Handle<i::AccessorInfo> accessor_info =
i::Handle<i::AccessorInfo>::cast(structure);
@@ -9826,7 +9746,7 @@ debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
new i::PostponeInterruptsScope(reinterpret_cast<i::Isolate*>(isolate),
i::StackGuard::API_INTERRUPT)) {}
-debug::PostponeInterruptsScope::~PostponeInterruptsScope() {}
+debug::PostponeInterruptsScope::~PostponeInterruptsScope() = default;
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
@@ -9950,6 +9870,47 @@ debug::TypeProfile::ScriptData debug::TypeProfile::GetScriptData(
return ScriptData(i, type_profile_);
}
+v8::MaybeLocal<v8::Value> debug::WeakMap::Get(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> key) {
+ PREPARE_FOR_EXECUTION(context, WeakMap, Get, Value);
+ auto self = Utils::OpenHandle(this);
+ Local<Value> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !ToLocal<Value>(i::Execution::Call(isolate, isolate->weakmap_get(), self,
+ arraysize(argv), argv),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+v8::MaybeLocal<debug::WeakMap> debug::WeakMap::Set(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> key,
+ v8::Local<v8::Value> value) {
+ PREPARE_FOR_EXECUTION(context, WeakMap, Set, WeakMap);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
+ Utils::OpenHandle(*value)};
+ has_pending_exception = !i::Execution::Call(isolate, isolate->weakmap_set(),
+ self, arraysize(argv), argv)
+ .ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(WeakMap);
+ RETURN_ESCAPED(Local<WeakMap>::Cast(Utils::ToLocal(result)));
+}
+
+Local<debug::WeakMap> debug::WeakMap::New(v8::Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, WeakMap, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Handle<i::JSWeakMap> obj = i_isolate->factory()->NewJSWeakMap();
+ return ToApiHandle<debug::WeakMap>(obj);
+}
+
+debug::WeakMap* debug::WeakMap::Cast(v8::Value* value) {
+ return static_cast<debug::WeakMap*>(value);
+}
+
const char* CpuProfileNode::GetFunctionNameStr() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->entry()->name();
@@ -10134,11 +10095,6 @@ void CpuProfiler::SetIdle(bool is_idle) {
isolate->SetIdle(is_idle);
}
-void CpuProfiler::UseDetailedSourcePositionsForProfiling(Isolate* isolate) {
- reinterpret_cast<i::Isolate*>(isolate)
- ->set_detailed_source_positions_for_profiling(true);
-}
-
uintptr_t CodeEvent::GetCodeStartAddress() {
return reinterpret_cast<i::CodeEvent*>(this)->code_start_address;
}
@@ -10546,9 +10502,9 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
CHECK(i::FLAG_expose_gc);
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
heap->SetEmbedderStackStateForNextFinalizaton(stack_state);
- heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask,
- i::GarbageCollectionReason::kTesting,
- kGCCallbackFlagForced);
+ heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting,
+ kGCCallbackFlagForced);
}
bool EmbedderHeapTracer::AdvanceTracing(double deadline_in_ms) {
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index ae0ce350a4..e5f5c7da70 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -116,6 +116,7 @@ class RegisteredExtension {
V(Proxy, JSProxy) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
+ V(debug::WeakMap, JSWeakMap) \
V(Promise, JSPromise) \
V(Primitive, Object) \
V(PrimitiveArray, FixedArray) \
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 0bfdd770f5..db1ee5467c 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -27,7 +27,7 @@ namespace internal {
// Note that length_ (whose value is in the integer range) is defined
// as intptr_t to provide endian-neutrality on 64-bit archs.
-class Arguments BASE_EMBEDDED {
+class Arguments {
public:
Arguments(int length, Object** arguments)
: length_(length), arguments_(arguments) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 163fa4c219..758fcd1a68 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -46,6 +46,7 @@
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -417,6 +418,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am)
: rn_(rn), rm_(no_reg), offset_(offset), am_(am) {
// Accesses below the stack pointer are not safe, and are prohibited by the
@@ -472,6 +480,7 @@ void NeonMemOperand::SetAlignment(int align) {
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
@@ -483,6 +492,12 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
+ case HeapObjectRequest::kStringConstant: {
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
+ }
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
Memory<Address>(constant_pool_entry_address(pc, 0 /* unused */)) =
@@ -1418,7 +1433,7 @@ int Assembler::branch_offset(Label* L) {
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
- RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool b_imm_check = is_int24(imm24);
@@ -1432,7 +1447,7 @@ void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
}
void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
- RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool bl_imm_check = is_int24(imm24);
@@ -5103,13 +5118,7 @@ void Assembler::dq(uint64_t value) {
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- if (options().disable_reloc_info_for_patching) return;
- if (RelocInfo::IsNone(rmode) ||
- // Don't record external references unless the heap will be serialized.
- (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code())) {
- return;
- }
+ if (!ShouldRecordRelocInfo(rmode)) return;
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index fb36702882..1bfa58b853 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -393,7 +393,7 @@ enum Coprocessor {
// Machine instruction Operands
// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
+class Operand {
public:
// immediate
V8_INLINE explicit Operand(int32_t immediate,
@@ -425,6 +425,7 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Return true if this is a register operand.
bool IsRegister() const {
@@ -498,7 +499,7 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
+class MemOperand {
public:
// [rn +/- offset] Offset/NegOffset
// [rn +/- offset]! PreIndex/NegPreIndex
@@ -557,7 +558,7 @@ class MemOperand BASE_EMBEDDED {
// Class NeonMemOperand represents a memory operand in load and
// store NEON instructions
-class NeonMemOperand BASE_EMBEDDED {
+class NeonMemOperand {
public:
// [rn {:align}] Offset
// [rn {:align}]! PostIndex
@@ -580,7 +581,7 @@ class NeonMemOperand BASE_EMBEDDED {
// Class NeonListOperand represents a list of NEON registers
-class NeonListOperand BASE_EMBEDDED {
+class NeonListOperand {
public:
explicit NeonListOperand(DoubleRegister base, int register_count = 1)
: base_(base), register_count_(register_count) {}
@@ -1693,7 +1694,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
friend class UseScratchRegisterScope;
};
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
V8_INLINE explicit EnsureSpace(Assembler* assembler);
};
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index bb5becefb8..c7eaef1325 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -131,7 +131,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
IsolateAddressId::kPendingExceptionAddress, isolate())));
}
__ str(r0, MemOperand(scratch));
- __ LoadRoot(r0, Heap::kExceptionRootIndex);
+ __ LoadRoot(r0, RootIndex::kException);
__ b(&exit);
// Invoke: Link this frame into the handler chain.
@@ -418,7 +418,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
- __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r4, RootIndex::kTheHoleValue);
__ Move(r6, ExternalReference::scheduled_exception_address(isolate));
__ ldr(r5, MemOperand(r6));
__ cmp(r4, r5);
@@ -469,14 +469,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
// new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// call data
__ push(call_data);
Register scratch0 = call_data;
Register scratch1 = r5;
- __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch0, RootIndex::kUndefinedValue);
// return value
__ push(scratch0);
// return value default
@@ -549,7 +549,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Push data from AccessorInfo.
__ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ push(scratch);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 39f756d152..7dc4ced321 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -9,7 +9,6 @@
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h"
#include "src/codegen.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -19,17 +18,17 @@ namespace internal {
#if defined(V8_HOST_ARCH_ARM)
-MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
- MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
Register dest = r0;
Register src = r1;
@@ -166,11 +165,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ Ret();
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -178,16 +178,17 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// Convert 8 to 16. The number of character to copy must be at least 8.
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- Isolate* isolate, MemCopyUint16Uint8Function stub) {
+ MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
Register dest = r0;
Register src = r1;
@@ -256,25 +257,27 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
#endif
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
return nullptr;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
__ MovFromFloatParameter(d0);
__ vsqrt(d0, d0);
@@ -282,12 +285,13 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 8af455fc6e..f3be7a7c4a 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -88,9 +88,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
- // r2 : arguments list (FixedArray)
// r4 : arguments list length (untagged)
- Register registers[] = {r1, r0, r2, r4};
+ // r2 : arguments list (FixedArray)
+ Register registers[] = {r1, r0, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -125,9 +125,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r3 : the new target
- // r2 : arguments list (FixedArray)
// r4 : arguments list length (untagged)
- Register registers[] = {r1, r3, r0, r2, r4};
+ // r2 : arguments list (FixedArray)
+ Register registers[] = {r1, r3, r0, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -193,7 +193,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // JSFunction
@@ -237,10 +237,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
- r3, // new target
+ r4, // address of the first argument
r1, // constructor to call
+ r3, // new target
r2, // allocation site feedback if available, undefined otherwise
- r4 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 09db465d59..cdf9dad1d9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -130,7 +130,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
+ RootIndex::kBuiltinsConstantsTable));
// The ldr call below could end up clobbering ip when the offset does not fit
// into 12 bits (and thus needs to be loaded from the constant pool). In that
@@ -147,7 +147,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
reg = r7;
}
- LoadRoot(reg, Heap::kBuiltinsConstantsTableRootIndex);
+ LoadRoot(reg, RootIndex::kBuiltinsConstantsTable);
ldr(destination, MemOperand(reg, offset));
if (could_clobber_ip) {
@@ -527,7 +527,7 @@ void MacroAssembler::Store(Register src,
}
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), cond);
}
@@ -615,8 +615,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -628,7 +626,6 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter);
Pop(object_parameter);
- Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -1520,7 +1517,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ LoadRoot(r3, RootIndex::kUndefinedValue);
}
Label done;
@@ -1642,9 +1639,7 @@ void MacroAssembler::CompareInstanceType(Register map,
cmp(type_reg, Operand(type));
}
-
-void MacroAssembler::CompareRoot(Register obj,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(obj != scratch);
@@ -2053,7 +2048,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
- CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ CompareRoot(object, RootIndex::kUndefinedValue);
b(eq, &done_checking);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 055b6e6fbc..ef75c3fe4c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -71,6 +71,9 @@ enum TargetAddressStorageMode {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -481,11 +484,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index) override {
+ void LoadRoot(Register destination, RootIndex index) override {
LoadRoot(destination, index, al);
}
- void LoadRoot(Register destination, Heap::RootListIndex index,
- Condition cond);
+ void LoadRoot(Register destination, RootIndex index, Condition cond);
// Jump if the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@@ -566,10 +568,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -713,8 +719,8 @@ class MacroAssembler : public TurboAssembler {
// Compare the object in a register to a value from the root list.
// Acquires a scratch register.
- void CompareRoot(Register obj, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index) {
+ void CompareRoot(Register obj, RootIndex index);
+ void PushRoot(RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@@ -722,14 +728,13 @@ class MacroAssembler : public TurboAssembler {
}
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
CompareRoot(with, index);
b(eq, if_equal);
}
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal) {
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
CompareRoot(with, index);
b(ne, if_not_equal);
}
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index b1e8421876..e9d74104d3 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -3212,15 +3212,15 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
- lazily_initialize_fast_sqrt(isolate_);
+ lazily_initialize_fast_sqrt();
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm).get_scalar();
- double dd_value = fast_sqrt(dm_value, isolate_);
+ double dd_value = fast_sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m).get_scalar();
- float sd_value = fast_sqrt(sm_value, isolate_);
+ float sd_value = fast_sqrt(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
@@ -5282,10 +5282,10 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
src[i] = bit_cast<uint32_t>(result);
}
} else {
- lazily_initialize_fast_sqrt(isolate_);
+ lazily_initialize_fast_sqrt();
for (int i = 0; i < 4; i++) {
float radicand = bit_cast<float>(src[i]);
- float result = 1.0f / fast_sqrt(radicand, isolate_);
+ float result = 1.0f / fast_sqrt(radicand);
result = canonicalizeNaN(result);
src[i] = bit_cast<uint32_t>(result);
}
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 52df8143ef..5a163b06fd 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -341,7 +341,9 @@ Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) ||
(heap_object_request().kind() == HeapObjectRequest::kCodeStub &&
- immediate_.rmode() == RelocInfo::CODE_TARGET));
+ immediate_.rmode() == RelocInfo::CODE_TARGET) ||
+ (heap_object_request().kind() == HeapObjectRequest::kStringConstant &&
+ immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT));
return immediate_;
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index d41b1a7d7f..eb581b472b 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -36,6 +36,7 @@
#include "src/code-stubs.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -583,6 +584,7 @@ void Assembler::Reset() {
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
switch (request.kind()) {
@@ -601,6 +603,13 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->GetCode());
break;
}
+ case HeapObjectRequest::kStringConstant: {
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ set_target_address_at(pc, 0 /* unused */,
+ str->AllocateStringConstant(isolate).address());
+ break;
+ }
}
}
}
@@ -1717,6 +1726,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.heap_object_request_.emplace(str);
+ DCHECK(result.IsHeapObjectRequest());
+ return result;
+}
+
void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
if (operand.IsHeapObjectRequest()) {
RequestHeapObject(operand.heap_object_request());
@@ -4751,14 +4767,6 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
ConstantPoolMode constant_pool_mode) {
- // Non-relocatable constants should not end up in the literal pool.
- DCHECK(!RelocInfo::IsNone(rmode));
- if (options().disable_reloc_info_for_patching) return;
-
- // We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
- bool write_reloc_info = true;
-
if ((rmode == RelocInfo::COMMENT) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
@@ -4772,23 +4780,22 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
- write_reloc_info = constpool_.RecordEntry(data, rmode);
+ bool new_constpool_entry = constpool_.RecordEntry(data, rmode);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
+ if (!new_constpool_entry) return;
}
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
- if (write_reloc_info) {
- // Don't record external references unless the heap will be serialized.
- if (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code()) {
- return;
- }
- DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
- reloc_info_writer.Write(&rinfo);
- }
+ if (!ShouldRecordRelocInfo(rmode)) return;
+
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
+
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
+ reloc_info_writer.Write(&rinfo);
}
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index b42b80f9ca..0432708fd1 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -718,6 +718,7 @@ class Operand {
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
inline bool IsHeapObjectRequest() const;
inline HeapObjectRequest heap_object_request() const;
@@ -3624,8 +3625,7 @@ class PatchingAssembler : public Assembler {
void PatchSubSp(uint32_t immediate);
};
-
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) {
assembler->CheckBufferSpace();
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 328983f42c..9b8114c9bf 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -124,7 +124,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
IsolateAddressId::kPendingExceptionAddress, isolate())));
}
__ Str(code_entry, MemOperand(x10));
- __ LoadRoot(x0, Heap::kExceptionRootIndex);
+ __ LoadRoot(x0, RootIndex::kException);
__ B(&exit);
// Invoke: Link this frame into the handler chain.
@@ -434,8 +434,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
__ Ldr(x5, MemOperand(x5));
- __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
- &promote_scheduled_exception);
+ __ JumpIfNotRoot(x5, RootIndex::kTheHoleValue, &promote_scheduled_exception);
__ DropSlots(stack_space);
__ Ret();
@@ -484,7 +483,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
Register undef = x7;
- __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undef, RootIndex::kUndefinedValue);
// Push new target, call data.
__ Push(undef, call_data);
@@ -562,7 +561,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
name));
__ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undef, RootIndex::kUndefinedValue);
__ Mov(isolate_address, ExternalReference::isolate_address(isolate()));
__ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index ad77033280..180e3f54b7 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -8,7 +8,6 @@
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/codegen.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -16,9 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
- return nullptr;
-}
+UnaryMathFunction CreateSqrtFunction() { return nullptr; }
#undef __
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 389f4818d5..1d238e2d32 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -291,10 +291,8 @@ M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
#define DECLARE_INSTRUCTION_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1) \
DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2)
-#define NOTHING(A, B)
INSTRUCTION_FIELDS_LIST(DECLARE_INSTRUCTION_FIELDS_OFFSETS)
SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
-#undef NOTHING
#undef DECLARE_FIELDS_OFFSETS
#undef DECLARE_INSTRUCTION_FIELDS_OFFSETS
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index bb1c22aff5..905cc51a57 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -89,9 +89,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
- // x2 : arguments list (FixedArray)
// x4 : arguments list length (untagged)
- Register registers[] = {x1, x0, x2, x4};
+ // x2 : arguments list (FixedArray)
+ Register registers[] = {x1, x0, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -126,9 +126,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x3 : the new target
- // x2 : arguments list (FixedArray)
// x4 : arguments list length (untagged)
- Register registers[] = {x1, x3, x0, x2, x4};
+ // x2 : arguments list (FixedArray)
+ Register registers[] = {x1, x3, x0, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -198,7 +198,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // JSFunction
@@ -242,10 +242,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
- x3, // new target
+ x4, // address of the first argument
x1, // constructor to call
+ x3, // new target
x2, // allocation site feedback if available, undefined otherwise
- x4 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index b15ab47473..97a75e5758 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1516,7 +1516,7 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
Fsub(dst, src, fp_zero);
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
@@ -1646,7 +1646,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
Register scratch = temps.AcquireX();
Label done_checking;
AssertNotSmi(object);
- JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
+ JumpIfRoot(object, RootIndex::kUndefinedValue, &done_checking);
Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
@@ -1727,7 +1727,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
- Mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Ldr(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Br(kOffHeapTrampolineRegister);
}
@@ -1806,8 +1806,8 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
- LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ldr(destination,
FieldMemOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
@@ -1905,7 +1905,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(scratch, cond);
return;
}
@@ -1963,7 +1963,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
}
@@ -2225,7 +2225,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+ LoadRoot(x3, RootIndex::kUndefinedValue);
}
Label done;
@@ -2597,8 +2597,7 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
DecodeField<Map::ElementsKindBits>(result);
}
-void MacroAssembler::CompareRoot(const Register& obj,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp));
@@ -2606,17 +2605,13 @@ void MacroAssembler::CompareRoot(const Register& obj,
Cmp(obj, temp);
}
-
-void MacroAssembler::JumpIfRoot(const Register& obj,
- Heap::RootListIndex index,
+void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index,
Label* if_equal) {
CompareRoot(obj, index);
B(eq, if_equal);
}
-
-void MacroAssembler::JumpIfNotRoot(const Register& obj,
- Heap::RootListIndex index,
+void MacroAssembler::JumpIfNotRoot(const Register& obj, RootIndex index,
Label* if_not_equal) {
CompareRoot(obj, index);
B(ne, if_not_equal);
@@ -2823,8 +2818,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -2834,7 +2827,6 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter, object_parameter);
- Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -2915,8 +2907,7 @@ void TurboAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index,
+void MacroAssembler::AssertRegisterIsRoot(Register reg, RootIndex index,
AbortReason reason) {
if (emit_debug_code()) {
CompareRoot(reg, index);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index a2862748a6..8648ff0439 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -180,6 +180,9 @@ enum PreShiftImmMode {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -1126,7 +1129,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef DECLARE_FUNCTION
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) override;
inline void Ret(const Register& xn = lr);
@@ -1262,10 +1265,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class MacroAssembler : public TurboAssembler {
public:
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -1821,17 +1828,13 @@ class MacroAssembler : public TurboAssembler {
void LoadElementsKindFromMap(Register result, Register map);
// Compare the object in a register to a value from the root list.
- void CompareRoot(const Register& obj, Heap::RootListIndex index);
+ void CompareRoot(const Register& obj, RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(const Register& obj,
- Heap::RootListIndex index,
- Label* if_equal);
+ void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(const Register& obj,
- Heap::RootListIndex index,
- Label* if_not_equal);
+ void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
// Compare the contents of a register with an operand, and branch to true,
// false or fall through, depending on condition.
@@ -1944,7 +1947,7 @@ class MacroAssembler : public TurboAssembler {
// Debugging.
void AssertRegisterIsRoot(
- Register reg, Heap::RootListIndex index,
+ Register reg, RootIndex index,
AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
// Abort if the specified register contains the invalid color bit pattern.
@@ -2025,7 +2028,7 @@ class MacroAssembler : public TurboAssembler {
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
// emitted is what you specified when creating the scope.
-class InstructionAccurateScope BASE_EMBEDDED {
+class InstructionAccurateScope {
public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
: tasm_(tasm)
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index fd973c8a36..aea4c0a21b 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -391,7 +391,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
return MaybeHandle<Object>();
}
memory->set_is_growable(false);
- size_t size = NumberToSize(memory->byte_length());
+ size_t size = memory->byte_length();
// Check the asm.js heap size against the valid limits.
if (!IsValidAsmjsMemorySize(size)) {
ReportInstantiationFailure(script, position, "Invalid heap size");
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 0f216052f3..2037a0ec8f 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -44,10 +44,24 @@
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
+AssemblerOptions AssemblerOptions::EnableV8AgnosticCode() const {
+ AssemblerOptions options = *this;
+ options.v8_agnostic_code = true;
+ options.record_reloc_info_for_serialization = false;
+ options.enable_root_array_delta_access = false;
+ // Inherit |enable_simulator_code| value.
+ options.isolate_independent_code = false;
+ options.inline_offheap_trampolines = false;
+ // Inherit |code_range_start| value.
+ // Inherit |use_pc_relative_calls_and_jumps| value.
+ return options;
+}
+
AssemblerOptions AssemblerOptions::Default(
Isolate* isolate, bool explicitly_support_serialization) {
AssemblerOptions options;
@@ -61,9 +75,12 @@ AssemblerOptions AssemblerOptions::Default(
options.enable_simulator_code = !serializer;
#endif
options.inline_offheap_trampolines = !serializer;
+
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- options.code_range_start =
- isolate->heap()->memory_allocator()->code_range()->start();
+ const base::AddressRegion& code_range =
+ isolate->heap()->memory_allocator()->code_range();
+ DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
+ options.code_range_start = code_range.begin();
#endif
return options;
}
@@ -355,6 +372,13 @@ HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
DCHECK_NOT_NULL(value_.code_stub);
}
+HeapObjectRequest::HeapObjectRequest(const StringConstantBase* string,
+ int offset)
+ : kind_(kStringConstant), offset_(offset) {
+ value_.string = string;
+ DCHECK_NOT_NULL(value_.string);
+}
+
// Platform specific but identical code for all the platforms.
void Assembler::RecordDeoptReason(DeoptimizeReason reason,
@@ -381,11 +405,13 @@ void Assembler::DataAlign(int m) {
}
void AssemblerBase::RequestHeapObject(HeapObjectRequest request) {
+ DCHECK(!options().v8_agnostic_code);
request.set_offset(pc_offset());
heap_object_requests_.push_front(request);
}
int AssemblerBase::AddCodeTarget(Handle<Code> target) {
+ DCHECK(!options().v8_agnostic_code);
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
@@ -398,6 +424,7 @@ int AssemblerBase::AddCodeTarget(Handle<Code> target) {
}
Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
+ DCHECK(!options().v8_agnostic_code);
DCHECK_LE(0, code_target_index);
DCHECK_LT(code_target_index, code_targets_.size());
return code_targets_[code_target_index];
@@ -405,6 +432,7 @@ Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index,
Handle<Code> code) {
+ DCHECK(!options().v8_agnostic_code);
DCHECK_LE(0, code_target_index);
DCHECK_LT(code_target_index, code_targets_.size());
code_targets_[code_target_index] = code;
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index b108c5dfff..a2a1c73191 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -67,6 +67,7 @@ class Isolate;
class SCTableReference;
class SourcePosition;
class StatsCounter;
+class StringConstantBase;
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.
@@ -97,8 +98,9 @@ class HeapObjectRequest {
public:
explicit HeapObjectRequest(double heap_number, int offset = -1);
explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
+ explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
- enum Kind { kHeapNumber, kCodeStub };
+ enum Kind { kHeapNumber, kCodeStub, kStringConstant };
Kind kind() const { return kind_; }
double heap_number() const {
@@ -111,6 +113,11 @@ class HeapObjectRequest {
return value_.code_stub;
}
+ const StringConstantBase* string() const {
+ DCHECK_EQ(kind(), kStringConstant);
+ return value_.string;
+ }
+
// The code buffer offset at the time of the request.
int offset() const {
DCHECK_GE(offset_, 0);
@@ -128,6 +135,7 @@ class HeapObjectRequest {
union {
double heap_number;
CodeStub* code_stub;
+ const StringConstantBase* string;
} value_;
int offset_;
@@ -139,6 +147,9 @@ class HeapObjectRequest {
enum class CodeObjectRequired { kNo, kYes };
struct V8_EXPORT_PRIVATE AssemblerOptions {
+ // Prohibits using any V8-specific features of assembler like (isolates,
+ // heap objects, external references, etc.).
+ bool v8_agnostic_code = false;
// Recording reloc info for external references and off-heap targets is
// needed whenever code is serialized, e.g. into the snapshot or as a WASM
// module. This flag allows this reloc info to be disabled for code that
@@ -168,6 +179,9 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// the instruction immediates.
bool use_pc_relative_calls_and_jumps = false;
+ // Constructs V8-agnostic set of options from current state.
+ AssemblerOptions EnableV8AgnosticCode() const;
+
static AssemblerOptions Default(
Isolate* isolate, bool explicitly_support_serialization = false);
};
@@ -268,13 +282,23 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
}
}
- // {RequestHeapObject} records the need for a future heap number allocation or
- // code stub generation. After code assembly, each platform's
- // {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these
- // objects and place them where they are expected (determined by the pc offset
- // associated with each request).
+ // {RequestHeapObject} records the need for a future heap number allocation,
+ // code stub generation or string allocation. After code assembly, each
+ // platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will
+ // allocate these objects and place them where they are expected (determined
+ // by the pc offset associated with each request).
void RequestHeapObject(HeapObjectRequest request);
+ bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
+ DCHECK(!RelocInfo::IsNone(rmode));
+ if (options().disable_reloc_info_for_patching) return false;
+ if (RelocInfo::IsOnlyForSerializer(rmode) &&
+ !options().record_reloc_info_for_serialization && !emit_debug_code()) {
+ return false;
+ }
+ return true;
+ }
+
private:
// Before we copy code into the code space, we sometimes cannot encode
// call/jump code targets as we normally would, as the difference between the
@@ -301,7 +325,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
};
// Avoids emitting debug code during the lifetime of this scope object.
-class DontEmitDebugCodeScope BASE_EMBEDDED {
+class DontEmitDebugCodeScope {
public:
explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
: assembler_(assembler), old_value_(assembler->emit_debug_code()) {
@@ -332,7 +356,7 @@ class PredictableCodeSizeScope {
// Enable a specified feature within a scope.
-class CpuFeatureScope BASE_EMBEDDED {
+class CpuFeatureScope {
public:
enum CheckPolicy {
kCheckSupported,
@@ -350,12 +374,12 @@ class CpuFeatureScope BASE_EMBEDDED {
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
- // Define a destructor to avoid unused variable warnings.
- ~CpuFeatureScope() {}
+ ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default)
+ // Define a destructor to avoid unused variable warnings.
+ }
#endif
};
-
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
// Example:
@@ -420,7 +444,7 @@ class CpuFeatures : public AllStatic {
// Utility functions
// Computes pow(x, y) with the special cases in the spec for Math.pow.
-double power_helper(Isolate* isolate, double x, double y);
+double power_helper(double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
@@ -430,7 +454,7 @@ double power_double_double(double x, double y);
class ConstantPoolEntry {
public:
- ConstantPoolEntry() {}
+ ConstantPoolEntry() = default;
ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
RelocInfo::Mode rmode = RelocInfo::NONE)
: position_(position),
@@ -447,7 +471,7 @@ class ConstantPoolEntry {
int position() const { return position_; }
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
bool is_merged() const { return merged_index_ >= 0; }
- int merged_index(void) const {
+ int merged_index() const {
DCHECK(is_merged());
return merged_index_;
}
@@ -456,7 +480,7 @@ class ConstantPoolEntry {
merged_index_ = index;
DCHECK(is_merged());
}
- int offset(void) const {
+ int offset() const {
DCHECK_GE(merged_index_, 0);
return merged_index_;
}
@@ -493,7 +517,7 @@ class ConstantPoolEntry {
// -----------------------------------------------------------------------------
// Embedded constant pool support
-class ConstantPoolBuilder BASE_EMBEDDED {
+class ConstantPoolBuilder {
public:
ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index acf7649792..b64f95dfa5 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -77,7 +77,9 @@ class PerThreadAssertScopeDebugOnly : public
#else
class PerThreadAssertScopeDebugOnly {
public:
- PerThreadAssertScopeDebugOnly() { }
+ PerThreadAssertScopeDebugOnly() { // NOLINT (modernize-use-equals-default)
+ // Define a constructor to avoid unused variable warnings.
+ }
void Release() {}
#endif
};
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
index 5cb1e87d23..7e3a25890b 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
@@ -14,7 +14,7 @@ AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit,
int delta)
: AstTraversalVisitor(stack_limit), delta_(delta) {}
-AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() {}
+AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() = default;
void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) {
Visit(pattern);
diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h
index cf7bab53da..60222a4035 100644
--- a/deps/v8/src/ast/ast-source-ranges.h
+++ b/deps/v8/src/ast/ast-source-ranges.h
@@ -21,8 +21,9 @@ struct SourceRange {
static SourceRange OpenEnded(int32_t start) {
return SourceRange(start, kNoSourcePosition);
}
- static SourceRange ContinuationOf(const SourceRange& that) {
- return that.IsEmpty() ? Empty() : OpenEnded(that.end);
+ static SourceRange ContinuationOf(const SourceRange& that,
+ int end = kNoSourcePosition) {
+ return that.IsEmpty() ? Empty() : SourceRange(that.end, end);
}
int32_t start, end;
};
@@ -56,7 +57,7 @@ enum class SourceRangeKind {
class AstNodeSourceRanges : public ZoneObject {
public:
- virtual ~AstNodeSourceRanges() {}
+ virtual ~AstNodeSourceRanges() = default;
virtual SourceRange GetRange(SourceRangeKind kind) = 0;
};
@@ -65,7 +66,7 @@ class BinaryOperationSourceRanges final : public AstNodeSourceRanges {
explicit BinaryOperationSourceRanges(const SourceRange& right_range)
: right_range_(right_range) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
DCHECK_EQ(kind, SourceRangeKind::kRight);
return right_range_;
}
@@ -79,7 +80,7 @@ class ContinuationSourceRanges : public AstNodeSourceRanges {
explicit ContinuationSourceRanges(int32_t continuation_position)
: continuation_position_(continuation_position) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
DCHECK_EQ(kind, SourceRangeKind::kContinuation);
return SourceRange::OpenEnded(continuation_position_);
}
@@ -99,7 +100,7 @@ class CaseClauseSourceRanges final : public AstNodeSourceRanges {
explicit CaseClauseSourceRanges(const SourceRange& body_range)
: body_range_(body_range) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
DCHECK_EQ(kind, SourceRangeKind::kBody);
return body_range_;
}
@@ -114,7 +115,7 @@ class ConditionalSourceRanges final : public AstNodeSourceRanges {
const SourceRange& else_range)
: then_range_(then_range), else_range_(else_range) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kThen:
return then_range_;
@@ -136,7 +137,7 @@ class IfStatementSourceRanges final : public AstNodeSourceRanges {
const SourceRange& else_range)
: then_range_(then_range), else_range_(else_range) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kElse:
return else_range_;
@@ -162,7 +163,7 @@ class IterationStatementSourceRanges final : public AstNodeSourceRanges {
explicit IterationStatementSourceRanges(const SourceRange& body_range)
: body_range_(body_range) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kBody:
return body_range_;
@@ -198,7 +199,7 @@ class NaryOperationSourceRanges final : public AstNodeSourceRanges {
void AddRange(const SourceRange& range) { ranges_.push_back(range); }
size_t RangeCount() const { return ranges_.size(); }
- SourceRange GetRange(SourceRangeKind kind) { UNREACHABLE(); }
+ SourceRange GetRange(SourceRangeKind kind) override { UNREACHABLE(); }
private:
ZoneVector<SourceRange> ranges_;
@@ -227,7 +228,7 @@ class TryCatchStatementSourceRanges final : public AstNodeSourceRanges {
explicit TryCatchStatementSourceRanges(const SourceRange& catch_range)
: catch_range_(catch_range) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kCatch:
return catch_range_;
@@ -247,7 +248,7 @@ class TryFinallyStatementSourceRanges final : public AstNodeSourceRanges {
explicit TryFinallyStatementSourceRanges(const SourceRange& finally_range)
: finally_range_(finally_range) {}
- SourceRange GetRange(SourceRangeKind kind) {
+ SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kFinally:
return finally_range_;
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 8cf81b24a5..67ea77bfbf 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -242,6 +242,17 @@ const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
return result;
}
+const AstRawString* AstValueFactory::CloneFromOtherFactory(
+ const AstRawString* raw_string) {
+ const AstRawString* result = GetString(
+ raw_string->hash_field(), raw_string->is_one_byte(),
+ Vector<const byte>(raw_string->raw_data(), raw_string->byte_length()));
+ // Check we weren't trying to clone a string that was already in this
+ // ast-value-factory.
+ DCHECK_NE(result, raw_string);
+ return result;
+}
+
AstConsString* AstValueFactory::NewConsString() {
AstConsString* new_string = new (zone_) AstConsString;
DCHECK_NOT_NULL(new_string);
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index e85b0675bf..726d961362 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -194,48 +194,49 @@ class AstBigInt {
};
// For generating constants.
-#define AST_STRING_CONSTANTS(F) \
- F(anonymous_function, "(anonymous function)") \
- F(arguments, "arguments") \
- F(async, "async") \
- F(await, "await") \
- F(bigint, "bigint") \
- F(boolean, "boolean") \
- F(constructor, "constructor") \
- F(default, "default") \
- F(done, "done") \
- F(dot, ".") \
- F(dot_for, ".for") \
- F(dot_generator_object, ".generator_object") \
- F(dot_iterator, ".iterator") \
- F(dot_result, ".result") \
- F(dot_switch_tag, ".switch_tag") \
- F(dot_catch, ".catch") \
- F(empty, "") \
- F(eval, "eval") \
- F(function, "function") \
- F(get_space, "get ") \
- F(length, "length") \
- F(let, "let") \
- F(name, "name") \
- F(native, "native") \
- F(new_target, ".new.target") \
- F(next, "next") \
- F(number, "number") \
- F(object, "object") \
- F(proto, "__proto__") \
- F(prototype, "prototype") \
- F(return, "return") \
- F(set_space, "set ") \
- F(star_default_star, "*default*") \
- F(string, "string") \
- F(symbol, "symbol") \
- F(this, "this") \
- F(this_function, ".this_function") \
- F(throw, "throw") \
- F(undefined, "undefined") \
- F(use_asm, "use asm") \
- F(use_strict, "use strict") \
+#define AST_STRING_CONSTANTS(F) \
+ F(anonymous_function, "(anonymous function)") \
+ F(arguments, "arguments") \
+ F(async, "async") \
+ F(await, "await") \
+ F(bigint, "bigint") \
+ F(boolean, "boolean") \
+ F(constructor, "constructor") \
+ F(default, "default") \
+ F(done, "done") \
+ F(dot, ".") \
+ F(dot_for, ".for") \
+ F(dot_generator_object, ".generator_object") \
+ F(dot_iterator, ".iterator") \
+ F(dot_promise, ".promise") \
+ F(dot_result, ".result") \
+ F(dot_switch_tag, ".switch_tag") \
+ F(dot_catch, ".catch") \
+ F(empty, "") \
+ F(eval, "eval") \
+ F(function, "function") \
+ F(get_space, "get ") \
+ F(length, "length") \
+ F(let, "let") \
+ F(name, "name") \
+ F(native, "native") \
+ F(new_target, ".new.target") \
+ F(next, "next") \
+ F(number, "number") \
+ F(object, "object") \
+ F(proto, "__proto__") \
+ F(prototype, "prototype") \
+ F(return, "return") \
+ F(set_space, "set ") \
+ F(star_default_star, "*default*") \
+ F(string, "string") \
+ F(symbol, "symbol") \
+ F(this, "this") \
+ F(this_function, ".this_function") \
+ F(throw, "throw") \
+ F(undefined, "undefined") \
+ F(use_asm, "use asm") \
+ F(use_strict, "use strict") \
F(value, "value")
class AstStringConstants final {
@@ -297,10 +298,15 @@ class AstValueFactory {
return GetTwoByteStringInternal(literal);
}
const AstRawString* GetString(Handle<String> literal);
+
+ // Clones an AstRawString from another ast value factory, adding it to this
+ // factory and returning the clone.
+ const AstRawString* CloneFromOtherFactory(const AstRawString* raw_string);
+
V8_EXPORT_PRIVATE AstConsString* NewConsString();
- AstConsString* NewConsString(const AstRawString* str);
- AstConsString* NewConsString(const AstRawString* str1,
- const AstRawString* str2);
+ V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str);
+ V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str1,
+ const AstRawString* str2);
V8_EXPORT_PRIVATE void Internalize(Isolate* isolate);
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 5a4add6039..617a26b937 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -551,12 +551,6 @@ bool ObjectLiteral::IsFastCloningSupported() const {
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
-bool ArrayLiteral::is_empty() const {
- DCHECK(is_initialized());
- return values()->is_empty() && (boilerplate_description().is_null() ||
- boilerplate_description()->is_empty());
-}
-
int ArrayLiteral::InitDepthAndFlags() {
if (is_initialized()) return depth();
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 6c1e989d30..6cc2cbc8ec 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -383,7 +383,7 @@ class DoExpression final : public Expression {
class Declaration : public AstNode {
public:
- typedef ThreadedList<Declaration> List;
+ typedef base::ThreadedList<Declaration> List;
VariableProxy* proxy() const { return proxy_; }
@@ -397,6 +397,7 @@ class Declaration : public AstNode {
Declaration** next() { return &next_; }
Declaration* next_;
friend List;
+ friend base::ThreadedListTraits<Declaration>;
};
class VariableDeclaration : public Declaration {
@@ -1477,8 +1478,6 @@ class ArrayLiteral final : public AggregateLiteral {
int first_spread_index() const { return first_spread_index_; }
- bool is_empty() const;
-
// Populate the depth field and flags, returns the depth.
int InitDepthAndFlags();
@@ -1578,8 +1577,15 @@ class VariableProxy final : public Expression {
// Bind this proxy to the variable var.
void BindTo(Variable* var);
- void set_next_unresolved(VariableProxy* next) { next_unresolved_ = next; }
- VariableProxy* next_unresolved() { return next_unresolved_; }
+ V8_INLINE VariableProxy* next_unresolved() { return next_unresolved_; }
+
+ // Provides an access type for the ThreadedList used by the PreParsers
+ // expressions, lists, and formal parameters.
+ struct PreParserNext {
+ static VariableProxy** next(VariableProxy* t) {
+ return t->pre_parser_expr_next();
+ }
+ };
private:
friend class AstNodeFactory;
@@ -1590,7 +1596,8 @@ class VariableProxy final : public Expression {
int start_position)
: Expression(start_position, kVariableProxy),
raw_name_(name),
- next_unresolved_(nullptr) {
+ next_unresolved_(nullptr),
+ pre_parser_expr_next_(nullptr) {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
@@ -1613,9 +1620,15 @@ class VariableProxy final : public Expression {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
};
+
+ V8_INLINE VariableProxy** next() { return &next_unresolved_; }
VariableProxy* next_unresolved_;
-};
+ VariableProxy** pre_parser_expr_next() { return &pre_parser_expr_next_; }
+ VariableProxy* pre_parser_expr_next_;
+
+ friend base::ThreadedListTraits<VariableProxy>;
+};
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2248,7 +2261,7 @@ class FunctionLiteral final : public Expression {
void mark_as_iife() { bit_field_ = IIFEBit::update(bit_field_, true); }
bool is_iife() const { return IIFEBit::decode(bit_field_); }
- bool is_top_level() const {
+ bool is_toplevel() const {
return function_literal_id() == FunctionLiteral::kIdTypeTopLevel;
}
bool is_wrapped() const { return function_type() == kWrapped; }
@@ -2308,7 +2321,7 @@ class FunctionLiteral final : public Expression {
// - (function() { ... })();
// - var x = function() { ... }();
bool ShouldEagerCompile() const;
- void SetShouldEagerCompile();
+ V8_EXPORT_PRIVATE void SetShouldEagerCompile();
FunctionType function_type() const {
return FunctionTypeBits::decode(bit_field_);
@@ -2736,7 +2749,7 @@ class TemplateLiteral final : public Expression {
// class SpecificVisitor : public AstVisitor<SpecificVisitor> { ... }
template <class Subclass>
-class AstVisitor BASE_EMBEDDED {
+class AstVisitor {
public:
void Visit(AstNode* node) { impl()->Visit(node); }
@@ -2823,7 +2836,7 @@ class AstVisitor BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// AstNode factory
-class AstNodeFactory final BASE_EMBEDDED {
+class AstNodeFactory final {
public:
AstNodeFactory(AstValueFactory* ast_value_factory, Zone* zone)
: zone_(zone), ast_value_factory_(ast_value_factory) {}
@@ -3330,7 +3343,6 @@ class AstNodeFactory final BASE_EMBEDDED {
}
Zone* zone() const { return zone_; }
- void set_zone(Zone* zone) { zone_ = zone; }
private:
// This zone may be deallocated upon returning from parsing a function body
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index d2e56a9335..f9c2243099 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -31,7 +31,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
InitializeAstVisitor(isolate);
}
-CallPrinter::~CallPrinter() {}
+CallPrinter::~CallPrinter() = default;
CallPrinter::ErrorHint CallPrinter::GetErrorHint() const {
if (is_call_error_) {
@@ -666,7 +666,7 @@ void AstPrinter::PrintLiteral(const AstConsString* value, bool quote) {
//-----------------------------------------------------------------------------
-class IndentedScope BASE_EMBEDDED {
+class IndentedScope {
public:
IndentedScope(AstPrinter* printer, const char* txt)
: ast_printer_(printer) {
diff --git a/deps/v8/src/ast/scopes-inl.h b/deps/v8/src/ast/scopes-inl.h
new file mode 100644
index 0000000000..a70166c5ca
--- /dev/null
+++ b/deps/v8/src/ast/scopes-inl.h
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_SCOPES_INL_H_
+#define V8_AST_SCOPES_INL_H_
+
+#include "src/ast/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+void Scope::ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope,
+ T variable_proxy_stackvisitor,
+ ParseInfo* info) {
+ // Module variables must be allocated before variable resolution
+ // to ensure that UpdateNeedsHoleCheck() can detect import variables.
+ if (info != nullptr && is_module_scope()) {
+ AsModuleScope()->AllocateModuleVariables();
+ }
+ // Lazy parsed declaration scopes are already partially analyzed. If there are
+ // unresolved references remaining, they just need to be resolved in outer
+ // scopes.
+ Scope* lookup =
+ is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
+ ? outer_scope()
+ : this;
+
+ for (VariableProxy *proxy = unresolved_list_.first(), *next = nullptr;
+ proxy != nullptr; proxy = next) {
+ next = proxy->next_unresolved();
+
+ DCHECK(!proxy->is_resolved());
+ Variable* var =
+ lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope());
+ if (var == nullptr) {
+ variable_proxy_stackvisitor(proxy);
+ } else if (var != Scope::kDummyPreParserVariable &&
+ var != Scope::kDummyPreParserLexicalVariable) {
+ if (info != nullptr) {
+ // In this case we need to leave scopes in a way that they can be
+ // allocated. If we resolved variables from lazy parsed scopes, we need
+ // to context allocate the var.
+ ResolveTo(info, proxy, var);
+ if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
+ } else {
+ var->set_is_used();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
+ }
+ }
+ }
+
+ // Clear unresolved_list_ as it's in an inconsistent state.
+ unresolved_list_.Clear();
+
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->ResolveScopesThenForEachVariable(max_outer_scope,
+ variable_proxy_stackvisitor, info);
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_SCOPES_INL_H_
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 74d50c44de..e9fb195609 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -8,6 +8,7 @@
#include "src/accessors.h"
#include "src/ast/ast.h"
+#include "src/ast/scopes-inl.h"
#include "src/base/optional.h"
#include "src/bootstrapper.h"
#include "src/counters.h"
@@ -23,15 +24,11 @@ namespace v8 {
namespace internal {
namespace {
-void* kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
-void* kDummyPreParserLexicalVariable = reinterpret_cast<void*>(0x2);
-
bool IsLexical(Variable* variable) {
- if (variable == kDummyPreParserLexicalVariable) return true;
- if (variable == kDummyPreParserVariable) return false;
+ if (variable == Scope::kDummyPreParserLexicalVariable) return true;
+ if (variable == Scope::kDummyPreParserVariable) return false;
return IsLexicalVariableMode(variable->mode());
}
-
} // namespace
// ----------------------------------------------------------------------------
@@ -76,8 +73,9 @@ Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
- p->value = mode == VariableMode::kVar ? kDummyPreParserVariable
- : kDummyPreParserLexicalVariable;
+ p->value = mode == VariableMode::kVar
+ ? Scope::kDummyPreParserVariable
+ : Scope::kDummyPreParserLexicalVariable;
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -154,7 +152,7 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
Scope::Snapshot::Snapshot(Scope* scope)
: outer_scope_(scope),
top_inner_scope_(scope->inner_scope_),
- top_unresolved_(scope->unresolved_),
+ top_unresolved_(scope->unresolved_list_.first()),
top_local_(scope->GetClosureScope()->locals_.end()),
top_decl_(scope->GetClosureScope()->decls_.end()),
outer_scope_calls_eval_(scope->scope_calls_eval_) {
@@ -310,6 +308,8 @@ void DeclarationScope::SetDefaults() {
has_arguments_parameter_ = false;
scope_uses_super_property_ = false;
has_rest_ = false;
+ has_promise_ = false;
+ has_generator_object_ = false;
sloppy_block_function_map_ = nullptr;
receiver_ = nullptr;
new_target_ = nullptr;
@@ -319,7 +319,7 @@ void DeclarationScope::SetDefaults() {
should_eager_compile_ = false;
was_lazily_parsed_ = false;
is_skipped_function_ = false;
- produced_preparsed_scope_data_ = nullptr;
+ preparsed_scope_data_builder_ = nullptr;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
@@ -337,7 +337,7 @@ void Scope::SetDefaults() {
#endif
inner_scope_ = nullptr;
sibling_ = nullptr;
- unresolved_ = nullptr;
+ unresolved_list_.Clear();
start_position_ = kNoSourcePosition;
end_position_ = kNoSourcePosition;
@@ -779,6 +779,7 @@ Variable* DeclarationScope::DeclareGeneratorObjectVar(
Variable* result = EnsureRareData()->generator_object =
NewTemporary(name, kNotAssigned);
result->set_is_used();
+ has_generator_object_ = true;
return result;
}
@@ -787,6 +788,7 @@ Variable* DeclarationScope::DeclarePromiseVar(const AstRawString* name) {
DCHECK_NULL(promise_var());
Variable* result = EnsureRareData()->promise = NewTemporary(name);
result->set_is_used();
+ has_promise_ = true;
return result;
}
@@ -834,16 +836,9 @@ Scope* Scope::FinalizeBlockScope() {
}
// Move unresolved variables
- if (unresolved_ != nullptr) {
- if (outer_scope()->unresolved_ != nullptr) {
- VariableProxy* unresolved = unresolved_;
- while (unresolved->next_unresolved() != nullptr) {
- unresolved = unresolved->next_unresolved();
- }
- unresolved->set_next_unresolved(outer_scope()->unresolved_);
- }
- outer_scope()->unresolved_ = unresolved_;
- unresolved_ = nullptr;
+ if (!unresolved_list_.is_empty()) {
+ outer_scope()->unresolved_list_.Prepend(std::move(unresolved_list_));
+ unresolved_list_.Clear();
}
if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true;
@@ -887,7 +882,7 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
DCHECK_EQ(new_parent->outer_scope_, outer_scope_);
DCHECK_EQ(new_parent, new_parent->GetClosureScope());
DCHECK_NULL(new_parent->inner_scope_);
- DCHECK_NULL(new_parent->unresolved_);
+ DCHECK(new_parent->unresolved_list_.is_empty());
DCHECK(new_parent->locals_.is_empty());
Scope* inner_scope = new_parent->sibling_;
if (inner_scope != top_inner_scope_) {
@@ -910,14 +905,21 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
new_parent->sibling_ = top_inner_scope_;
}
- if (outer_scope_->unresolved_ != top_unresolved_) {
- VariableProxy* last = outer_scope_->unresolved_;
- while (last->next_unresolved() != top_unresolved_) {
- last = last->next_unresolved();
+ if (outer_scope_->unresolved_list_.first() != top_unresolved_) {
+ // If the marked VariableProxy (snapshoted) is not the first, we need to
+ // find it and move all VariableProxys up to that point into the new_parent,
+ // then we restore the snapshoted state by reinitializing the outer_scope
+ // list.
+ {
+ auto iter = outer_scope_->unresolved_list_.begin();
+ while (*iter != top_unresolved_) {
+ ++iter;
+ }
+ outer_scope_->unresolved_list_.Rewind(iter);
}
- last->set_next_unresolved(nullptr);
- new_parent->unresolved_ = outer_scope_->unresolved_;
- outer_scope_->unresolved_ = top_unresolved_;
+
+ new_parent->unresolved_list_ = std::move(outer_scope_->unresolved_list_);
+ outer_scope_->unresolved_list_.ReinitializeHead(top_unresolved_);
}
// TODO(verwaest): This currently only moves do-expression declared variables
@@ -1261,8 +1263,7 @@ void Scope::DeclareCatchVariableName(const AstRawString* name) {
void Scope::AddUnresolved(VariableProxy* proxy) {
DCHECK(!already_resolved_);
DCHECK(!proxy->is_resolved());
- proxy->set_next_unresolved(unresolved_);
- unresolved_ = proxy;
+ unresolved_list_.AddFront(proxy);
}
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
@@ -1274,22 +1275,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
}
bool Scope::RemoveUnresolved(VariableProxy* var) {
- if (unresolved_ == var) {
- unresolved_ = var->next_unresolved();
- var->set_next_unresolved(nullptr);
- return true;
- }
- VariableProxy* current = unresolved_;
- while (current != nullptr) {
- VariableProxy* next = current->next_unresolved();
- if (var == next) {
- current->set_next_unresolved(next->next_unresolved());
- var->set_next_unresolved(nullptr);
- return true;
- }
- current = next;
- }
- return false;
+ return unresolved_list_.Remove(var);
}
Variable* Scope::NewTemporary(const AstRawString* name) {
@@ -1483,11 +1469,12 @@ Scope* Scope::GetOuterScopeWithContext() {
Handle<StringSet> DeclarationScope::CollectNonLocals(
Isolate* isolate, ParseInfo* info, Handle<StringSet> non_locals) {
- VariableProxy* free_variables = FetchFreeVariables(this, info);
- for (VariableProxy* proxy = free_variables; proxy != nullptr;
- proxy = proxy->next_unresolved()) {
- non_locals = StringSet::Add(isolate, non_locals, proxy->name());
- }
+ ResolveScopesThenForEachVariable(this,
+ [=, &non_locals](VariableProxy* proxy) {
+ non_locals = StringSet::Add(
+ isolate, non_locals, proxy->name());
+ },
+ info);
return non_locals;
}
@@ -1504,10 +1491,15 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
decls_.Clear();
locals_.Clear();
inner_scope_ = nullptr;
- unresolved_ = nullptr;
+ unresolved_list_.Clear();
sloppy_block_function_map_ = nullptr;
rare_data_ = nullptr;
has_rest_ = false;
+ has_promise_ = false;
+ has_generator_object_ = false;
+
+ DCHECK_NE(zone_, ast_value_factory->zone());
+ zone_->ReleaseMemory();
if (aborted) {
// Prepare scope for use in the outer zone.
@@ -1532,7 +1524,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
void Scope::SavePreParsedScopeData() {
DCHECK(FLAG_preparser_scope_analysis);
- if (ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(this)) {
+ if (PreParsedScopeDataBuilder::ScopeIsSkippableFunctionScope(this)) {
AsDeclarationScope()->SavePreParsedScopeDataForDeclarationScope();
}
@@ -1542,30 +1534,33 @@ void Scope::SavePreParsedScopeData() {
}
void DeclarationScope::SavePreParsedScopeDataForDeclarationScope() {
- if (produced_preparsed_scope_data_ != nullptr) {
+ if (preparsed_scope_data_builder_ != nullptr) {
DCHECK(FLAG_preparser_scope_analysis);
- produced_preparsed_scope_data_->SaveScopeAllocationData(this);
+ preparsed_scope_data_builder_->SaveScopeAllocationData(this);
}
}
void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
DCHECK(!force_eager_compilation_);
- VariableProxy* unresolved = nullptr;
-
- if (!outer_scope_->is_script_scope() ||
- (FLAG_preparser_scope_analysis &&
- produced_preparsed_scope_data_ != nullptr &&
- produced_preparsed_scope_data_->ContainsInnerFunctions())) {
+ base::ThreadedList<VariableProxy> new_unresolved_list;
+ if (!IsArrowFunction(function_kind_) &&
+ (!outer_scope_->is_script_scope() ||
+ (FLAG_preparser_scope_analysis &&
+ preparsed_scope_data_builder_ != nullptr &&
+ preparsed_scope_data_builder_->ContainsInnerFunctions()))) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
- for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
- proxy = proxy->next_unresolved()) {
- DCHECK(!proxy->is_resolved());
- VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
- copy->set_next_unresolved(unresolved);
- unresolved = copy;
- }
+ ResolveScopesThenForEachVariable(
+ this, [=, &new_unresolved_list](VariableProxy* proxy) {
+ // Don't copy unresolved references to the script scope, unless it's a
+ // reference to a private field. In that case keep it so we can fail
+ // later.
+ if (!outer_scope_->is_script_scope() || proxy->is_private_field()) {
+ VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
+ new_unresolved_list.AddFront(copy);
+ }
+ });
// Migrate function_ to the right Zone.
if (function_ != nullptr) {
@@ -1586,7 +1581,7 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false);
- unresolved_ = unresolved;
+ unresolved_list_ = std::move(new_unresolved_list);
}
#ifdef DEBUG
@@ -1673,8 +1668,8 @@ void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var == function_var) continue;
- if (var == kDummyPreParserVariable ||
- var == kDummyPreParserLexicalVariable) {
+ if (var == Scope::kDummyPreParserVariable ||
+ var == Scope::kDummyPreParserLexicalVariable) {
continue;
}
bool local = !IsDynamicVariableMode(var->mode());
@@ -2045,8 +2040,7 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
// scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
DCHECK_EQ(variables_.occupancy(), 0);
- for (VariableProxy* proxy = unresolved_; proxy != nullptr;
- proxy = proxy->next_unresolved()) {
+ for (VariableProxy* proxy : unresolved_list_) {
Variable* var = outer_scope()->LookupRecursive(info, proxy, nullptr);
if (var == nullptr) {
DCHECK(proxy->is_private_field());
@@ -2060,8 +2054,7 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
}
} else {
// Resolve unresolved variables for this scope.
- for (VariableProxy* proxy = unresolved_; proxy != nullptr;
- proxy = proxy->next_unresolved()) {
+ for (VariableProxy* proxy : unresolved_list_) {
if (!ResolveVariable(info, proxy)) return false;
}
@@ -2074,57 +2067,6 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
return true;
}
-VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
- ParseInfo* info,
- VariableProxy* stack) {
- // Module variables must be allocated before variable resolution
- // to ensure that UpdateNeedsHoleCheck() can detect import variables.
- if (info != nullptr && is_module_scope()) {
- AsModuleScope()->AllocateModuleVariables();
- }
- // Lazy parsed declaration scopes are already partially analyzed. If there are
- // unresolved references remaining, they just need to be resolved in outer
- // scopes.
- Scope* lookup =
- is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
- ? outer_scope()
- : this;
- for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
- proxy = next) {
- next = proxy->next_unresolved();
- DCHECK(!proxy->is_resolved());
- Variable* var =
- lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope());
- if (var == nullptr) {
- proxy->set_next_unresolved(stack);
- stack = proxy;
- } else if (var != kDummyPreParserVariable &&
- var != kDummyPreParserLexicalVariable) {
- if (info != nullptr) {
- // In this case we need to leave scopes in a way that they can be
- // allocated. If we resolved variables from lazy parsed scopes, we need
- // to context allocate the var.
- ResolveTo(info, proxy, var);
- if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
- } else {
- var->set_is_used();
- if (proxy->is_assigned()) {
- var->set_maybe_assigned();
- }
- }
- }
- }
-
- // Clear unresolved_ as it's in an inconsistent state.
- unresolved_ = nullptr;
-
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
- }
-
- return stack;
-}
-
bool Scope::MustAllocate(Variable* var) {
if (var == kDummyPreParserLexicalVariable || var == kDummyPreParserVariable) {
return true;
@@ -2236,6 +2178,24 @@ void DeclarationScope::AllocateReceiver() {
AllocateParameter(receiver(), -1);
}
+void DeclarationScope::AllocatePromise() {
+ if (!has_promise_) return;
+ DCHECK_NOT_NULL(promise_var());
+ DCHECK_EQ(this, promise_var()->scope());
+ AllocateStackSlot(promise_var());
+ DCHECK_EQ(VariableLocation::LOCAL, promise_var()->location());
+ DCHECK_EQ(kPromiseVarIndex, promise_var()->index());
+}
+
+void DeclarationScope::AllocateGeneratorObject() {
+ if (!has_generator_object_) return;
+ DCHECK_NOT_NULL(generator_object_var());
+ DCHECK_EQ(this, generator_object_var()->scope());
+ AllocateStackSlot(generator_object_var());
+ DCHECK_EQ(VariableLocation::LOCAL, generator_object_var()->location());
+ DCHECK_EQ(kGeneratorObjectVarIndex, generator_object_var()->index());
+}
+
void Scope::AllocateNonParameterLocal(Variable* var) {
DCHECK(var->scope() == this);
if (var->IsUnallocated() && MustAllocate(var)) {
@@ -2304,6 +2264,19 @@ void Scope::AllocateVariablesRecursively() {
return;
}
+ // Make sure to allocate the .promise (for async functions) or
+ // .generator_object (for async generators) first, so that it
+ // get's the required stack slot 0 in case it's needed. See
+ // http://bit.ly/v8-zero-cost-async-stack-traces for details.
+ if (is_function_scope()) {
+ FunctionKind kind = GetClosureScope()->function_kind();
+ if (IsAsyncGeneratorFunction(kind)) {
+ AsDeclarationScope()->AllocateGeneratorObject();
+ } else if (IsAsyncFunction(kind)) {
+ AsDeclarationScope()->AllocatePromise();
+ }
+ }
+
// Allocate variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->AllocateVariablesRecursively();
@@ -2410,5 +2383,9 @@ int Scope::ContextLocalCount() const {
(is_function_var_in_context ? 1 : 0);
}
+void* const Scope::kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
+void* const Scope::kDummyPreParserLexicalVariable =
+ reinterpret_cast<void*>(0x2);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index f43761af58..0b88cc027c 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -20,8 +20,7 @@ class AstValueFactory;
class AstRawString;
class Declaration;
class ParseInfo;
-class PreParsedScopeData;
-class ProducedPreParsedScopeData;
+class PreParsedScopeDataBuilder;
class SloppyBlockFunctionStatement;
class Statement;
class StringSet;
@@ -103,7 +102,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void SetScopeName(const AstRawString* scope_name) {
scope_name_ = scope_name;
}
- void set_needs_migration() { needs_migration_ = true; }
#endif
// TODO(verwaest): Is this needed on Scope?
@@ -114,7 +112,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
ModuleScope* AsModuleScope();
const ModuleScope* AsModuleScope() const;
- class Snapshot final BASE_EMBEDDED {
+ class Snapshot final {
public:
explicit Snapshot(Scope* scope);
~Snapshot();
@@ -125,8 +123,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Scope* outer_scope_;
Scope* top_inner_scope_;
VariableProxy* top_unresolved_;
- ThreadedList<Variable>::Iterator top_local_;
- ThreadedList<Declaration>::Iterator top_decl_;
+ base::ThreadedList<Variable>::Iterator top_local_;
+ base::ThreadedList<Declaration>::Iterator top_decl_;
const bool outer_scope_calls_eval_;
};
@@ -203,9 +201,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void DeclareCatchVariableName(const AstRawString* name);
// Declarations list.
- ThreadedList<Declaration>* declarations() { return &decls_; }
+ base::ThreadedList<Declaration>* declarations() { return &decls_; }
- ThreadedList<Variable>* locals() { return &locals_; }
+ base::ThreadedList<Variable>* locals() { return &locals_; }
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
@@ -218,8 +216,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
DCHECK(!already_resolved_);
DCHECK_EQ(factory->zone(), zone());
VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_pos);
- proxy->set_next_unresolved(unresolved_);
- unresolved_ = proxy;
+ AddUnresolved(proxy);
return proxy;
}
@@ -480,6 +477,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return false;
}
+ static void* const kDummyPreParserVariable;
+ static void* const kDummyPreParserLexicalVariable;
+
protected:
explicit Scope(Zone* zone);
@@ -522,12 +522,12 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
VariableMap variables_;
// In case of non-scopeinfo-backed scopes, this contains the variables of the
// map above in order of addition.
- ThreadedList<Variable> locals_;
+ base::ThreadedList<Variable> locals_;
// Unresolved variables referred to from this scope. The proxies themselves
// form a linked list of all unresolved proxies.
- VariableProxy* unresolved_;
+ base::ThreadedList<VariableProxy> unresolved_list_;
// Declarations.
- ThreadedList<Declaration> decls_;
+ base::ThreadedList<Declaration> decls_;
// Serialized scope info support.
Handle<ScopeInfo> scope_info_;
@@ -597,9 +597,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Finds free variables of this scope. This mutates the unresolved variables
// list along the way, so full resolution cannot be done afterwards.
// If a ParseInfo* is passed, non-free variables will be resolved.
- VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
- ParseInfo* info = nullptr,
- VariableProxy* stack = nullptr);
+ template <typename T>
+ void ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope,
+ T variable_proxy_stackvisitor,
+ ParseInfo* info = nullptr);
// Predicates.
bool MustAllocate(Variable* var);
@@ -682,6 +683,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
}
bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; }
#endif
+ void set_zone(Zone* zone) {
+#ifdef DEBUG
+ needs_migration_ = true;
+#endif
+ zone_ = zone;
+ }
bool ShouldEagerCompile() const;
void set_should_eager_compile();
@@ -759,11 +766,22 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// literals, or nullptr. Only valid for function scopes.
Variable* function_var() const { return function_; }
+ // The variable holding the JSGeneratorObject for generator, async
+ // and async generator functions, and modules. Only valid for
+ // function and module scopes.
Variable* generator_object_var() const {
DCHECK(is_function_scope() || is_module_scope());
return GetRareVariable(RareVariable::kGeneratorObject);
}
+ // For async generators, the .generator_object variable is always
+ // allocated to a fixed stack slot, such that the stack trace
+ // construction logic can access it.
+ static constexpr int kGeneratorObjectVarIndex = 0;
+
+ // The variable holding the promise returned from async functions.
+ // Only valid for function scopes in async functions (i.e. not
+ // for async generators).
Variable* promise_var() const {
DCHECK(is_function_scope());
DCHECK(IsAsyncFunction(function_kind_));
@@ -771,6 +789,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return GetRareVariable(RareVariable::kPromise);
}
+ // For async functions, the .promise variable is always allocated
+ // to a fixed stack slot, such that the stack trace construction
+ // logic can access it.
+ static constexpr int kPromiseVarIndex = 0;
+
// Parameters. The left-most parameter has index 0.
// Only valid for function and module scopes.
Variable* parameter(int index) const {
@@ -898,6 +921,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void AllocateLocals();
void AllocateParameterLocals();
void AllocateReceiver();
+ void AllocatePromise();
+ void AllocateGeneratorObject();
void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
@@ -919,13 +944,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// saved in produced_preparsed_scope_data_.
void SavePreParsedScopeDataForDeclarationScope();
- void set_produced_preparsed_scope_data(
- ProducedPreParsedScopeData* produced_preparsed_scope_data) {
- produced_preparsed_scope_data_ = produced_preparsed_scope_data;
+ void set_preparsed_scope_data_builder(
+ PreParsedScopeDataBuilder* preparsed_scope_data_builder) {
+ preparsed_scope_data_builder_ = preparsed_scope_data_builder;
}
- ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
- return produced_preparsed_scope_data_;
+ PreParsedScopeDataBuilder* preparsed_scope_data_builder() const {
+ return preparsed_scope_data_builder_;
}
private:
@@ -954,6 +979,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool force_eager_compilation_ : 1;
// This function scope has a rest parameter.
bool has_rest_ : 1;
+ // This function scope has a .promise variable.
+ bool has_promise_ : 1;
+ // This function scope has a .generator_object variable.
+ bool has_generator_object_ : 1;
// This scope has a parameter called "arguments".
bool has_arguments_parameter_ : 1;
// This scope uses "super" property ('super.foo').
@@ -981,7 +1010,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
Variable* arguments_;
// For producing the scope allocation data during preparsing.
- ProducedPreParsedScopeData* produced_preparsed_scope_data_;
+ PreParsedScopeDataBuilder* preparsed_scope_data_builder_;
struct RareData : public ZoneObject {
// Convenience variable; Subclass constructor only
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 10ac5c48a5..d33062973b 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -181,7 +181,7 @@ class Variable final : public ZoneObject {
: kNeedsInitialization;
}
- typedef ThreadedList<Variable> List;
+ typedef base::ThreadedList<Variable> List;
private:
Scope* scope_;
@@ -215,6 +215,7 @@ class Variable final : public ZoneObject {
ForceHoleInitializationField::kNext, 1> {};
Variable** next() { return &next_; }
friend List;
+ friend base::ThreadedListTraits<Variable>;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h
new file mode 100644
index 0000000000..6b733cfe4d
--- /dev/null
+++ b/deps/v8/src/base/address-region.h
@@ -0,0 +1,70 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_ADDRESS_REGION_H_
+#define V8_BASE_ADDRESS_REGION_H_
+
+#include <iostream>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// Helper class representing an address region of certain size.
+class AddressRegion {
+ public:
+ typedef uintptr_t Address;
+
+ AddressRegion() = default;
+
+ AddressRegion(Address address, size_t size)
+ : address_(address), size_(size) {}
+
+ Address begin() const { return address_; }
+ Address end() const { return address_ + size_; }
+
+ size_t size() const { return size_; }
+ void set_size(size_t size) { size_ = size; }
+
+ bool is_empty() const { return size_ == 0; }
+
+ bool contains(Address address) const {
+ STATIC_ASSERT(std::is_unsigned<Address>::value);
+ return (address - begin()) < size();
+ }
+
+ bool contains(Address address, size_t size) const {
+ STATIC_ASSERT(std::is_unsigned<Address>::value);
+ Address offset = address - begin();
+ return (offset < size_) && (offset + size <= size_);
+ }
+
+ bool contains(AddressRegion region) const {
+ return contains(region.address_, region.size_);
+ }
+
+ bool operator==(AddressRegion other) const {
+ return address_ == other.address_ && size_ == other.size_;
+ }
+
+ bool operator!=(AddressRegion other) const {
+ return address_ != other.address_ || size_ != other.size_;
+ }
+
+ private:
+ Address address_ = 0;
+ size_t size_ = 0;
+};
+ASSERT_TRIVIALLY_COPYABLE(AddressRegion);
+
+inline std::ostream& operator<<(std::ostream& out, AddressRegion region) {
+ return out << "[" << reinterpret_cast<void*>(region.begin()) << "+"
+ << region.size() << "]";
+}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_ADDRESS_REGION_H_
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index d81c537e57..90681b8a35 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -377,6 +377,22 @@ class AtomicElement {
T value_;
};
+template <typename T,
+ typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
+inline void CheckedIncrement(std::atomic<T>* number, T amount) {
+ const T old = number->fetch_add(amount);
+ DCHECK_GE(old + amount, old);
+ USE(old);
+}
+
+template <typename T,
+ typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
+inline void CheckedDecrement(std::atomic<T>* number, T amount) {
+ const T old = number->fetch_sub(amount);
+ DCHECK_GE(old, amount);
+ USE(old);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 731a7181d7..147a1730b2 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -146,6 +146,14 @@ constexpr inline bool IsPowerOfTwo(T value) {
V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
// Same for 64 bit integers. |value| must be <= 2^63
V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
+// Same for size_t integers.
+inline size_t RoundUpToPowerOfTwo(size_t value) {
+ if (sizeof(size_t) == sizeof(uint64_t)) {
+ return RoundUpToPowerOfTwo64(value);
+ } else {
+ return RoundUpToPowerOfTwo32(value);
+ }
+}
// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
// less than or equal to |value|. If you pass in a |value| that is already a
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
new file mode 100644
index 0000000000..ca9dde25f7
--- /dev/null
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -0,0 +1,101 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bounded-page-allocator.h"
+
+namespace v8 {
+namespace base {
+
+BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
+ Address start, size_t size,
+ size_t allocate_page_size)
+ : allocate_page_size_(allocate_page_size),
+ commit_page_size_(page_allocator->CommitPageSize()),
+ page_allocator_(page_allocator),
+ region_allocator_(start, size, allocate_page_size_) {
+ CHECK_NOT_NULL(page_allocator);
+ CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
+ CHECK(IsAligned(allocate_page_size_, commit_page_size_));
+}
+
+BoundedPageAllocator::Address BoundedPageAllocator::begin() const {
+ return region_allocator_.begin();
+}
+
+size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }
+
+void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
+ size_t alignment,
+ PageAllocator::Permission access) {
+ LockGuard<Mutex> guard(&mutex_);
+ CHECK(IsAligned(alignment, region_allocator_.page_size()));
+
+ // Region allocator does not support alignments bigger than it's own
+ // allocation alignment.
+ CHECK_LE(alignment, allocate_page_size_);
+
+ // TODO(ishell): Consider using randomized version here.
+ Address address = region_allocator_.AllocateRegion(size);
+ if (address == RegionAllocator::kAllocationFailure) {
+ return nullptr;
+ }
+ CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
+ access));
+ return reinterpret_cast<void*>(address);
+}
+
+bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
+ LockGuard<Mutex> guard(&mutex_);
+
+ Address address = reinterpret_cast<Address>(raw_address);
+ size_t freed_size = region_allocator_.FreeRegion(address);
+ if (freed_size != size) return false;
+ CHECK(page_allocator_->SetPermissions(raw_address, size,
+ PageAllocator::kNoAccess));
+ return true;
+}
+
+bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
+ size_t new_size) {
+ Address address = reinterpret_cast<Address>(raw_address);
+ CHECK(IsAligned(address, allocate_page_size_));
+
+ DCHECK_LT(new_size, size);
+ DCHECK(IsAligned(size - new_size, commit_page_size_));
+
+ // Check if we freed any allocatable pages by this release.
+ size_t allocated_size = RoundUp(size, allocate_page_size_);
+ size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);
+
+#ifdef DEBUG
+ {
+ // There must be an allocated region at given |address| of a size not
+ // smaller than |size|.
+ LockGuard<Mutex> guard(&mutex_);
+ CHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
+ }
+#endif
+
+ if (new_allocated_size < allocated_size) {
+ LockGuard<Mutex> guard(&mutex_);
+ region_allocator_.TrimRegion(address, new_allocated_size);
+ }
+
+ // Keep the region in "used" state just uncommit some pages.
+ Address free_address = address + new_size;
+ size_t free_size = size - new_size;
+ return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
+ free_size, PageAllocator::kNoAccess);
+}
+
+bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) {
+ DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
+ DCHECK(IsAligned(size, commit_page_size_));
+ DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
+ return page_allocator_->SetPermissions(address, size, access);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
new file mode 100644
index 0000000000..e3d928618b
--- /dev/null
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -0,0 +1,79 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
+#define V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/region-allocator.h"
+
+namespace v8 {
+namespace base {
+
+// This is a v8::PageAllocator implementation that allocates pages within the
+// pre-reserved region of virtual space. This class requires the virtual space
+// to be kept reserved during the lifetime of this object.
+// The main application of bounded page allocator are
+// - V8 heap pointer compression which requires the whole V8 heap to be
+// allocated within a contiguous range of virtual address space,
+// - executable page allocation, which allows to use PC-relative 32-bit code
+// displacement on certain 64-bit platforms.
+// Bounded page allocator uses other page allocator instance for doing actual
+// page allocations.
+// The implementation is thread-safe.
+class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
+ public:
+ typedef uintptr_t Address;
+
+ BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
+ size_t size, size_t allocate_page_size);
+ ~BoundedPageAllocator() override = default;
+
+ // These functions are not inlined to avoid https://crbug.com/v8/8275.
+ Address begin() const;
+ size_t size() const;
+
+ // Returns true if given address is in the range controlled by the bounded
+ // page allocator instance.
+ bool contains(Address address) const {
+ return region_allocator_.contains(address);
+ }
+
+ size_t AllocatePageSize() override { return allocate_page_size_; }
+
+ size_t CommitPageSize() override { return commit_page_size_; }
+
+ void SetRandomMmapSeed(int64_t seed) override {
+ page_allocator_->SetRandomMmapSeed(seed);
+ }
+
+ void* GetRandomMmapAddr() override {
+ return page_allocator_->GetRandomMmapAddr();
+ }
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override;
+
+ private:
+ v8::base::Mutex mutex_;
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
+ v8::PageAllocator* const page_allocator_;
+ v8::base::RegionAllocator region_allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index df0d1110a5..695e67a618 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -196,9 +196,9 @@
#endif
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
-#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK true
#else
-#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK false
#endif
// Number of bits to represent the page size for paged spaces. The value of 19
diff --git a/deps/v8/src/base/debug/stack_trace.cc b/deps/v8/src/base/debug/stack_trace.cc
index 2a3fb87a19..cbf00ad17c 100644
--- a/deps/v8/src/base/debug/stack_trace.cc
+++ b/deps/v8/src/base/debug/stack_trace.cc
@@ -21,7 +21,7 @@ StackTrace::StackTrace(const void* const* trace, size_t count) {
count_ = count;
}
-StackTrace::~StackTrace() {}
+StackTrace::~StackTrace() = default;
const void* const* StackTrace::Addresses(size_t* count) const {
*count = count_;
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index 51b821bdd1..ed602af547 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -61,7 +61,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding);
namespace {
volatile sig_atomic_t in_signal_handler = 0;
-bool dump_stack_in_signal_handler = 1;
+bool dump_stack_in_signal_handler = true;
// The prefix used for mangled symbols, per the Itanium C++ ABI:
// http://www.codesourcery.com/cxx-abi/abi.html#mangling
@@ -104,7 +104,7 @@ void DemangleSymbols(std::string* text) {
// Try to demangle the mangled symbol candidate.
int status = 0;
std::unique_ptr<char, FreeDeleter> demangled_symbol(
- abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status));
+ abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, nullptr, &status));
if (status == 0) { // Demangling is successful.
// Remove the mangled symbol.
text->erase(mangled_start, mangled_end - mangled_start);
@@ -125,7 +125,7 @@ class BacktraceOutputHandler {
virtual void HandleOutput(const char* output) = 0;
protected:
- virtual ~BacktraceOutputHandler() {}
+ virtual ~BacktraceOutputHandler() = default;
};
#if HAVE_EXECINFO_H
@@ -266,7 +266,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
public:
- PrintBacktraceOutputHandler() {}
+ PrintBacktraceOutputHandler() = default;
void HandleOutput(const char* output) override {
// NOTE: This code MUST be async-signal safe (it's used by in-process
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
index 3fe66d97ad..6b22131233 100644
--- a/deps/v8/src/base/debug/stack_trace_win.cc
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -7,13 +7,6 @@
#include "src/base/debug/stack_trace.h"
-// This file can't use "src/base/win32-headers.h" because it defines symbols
-// that lead to compilation errors. But `NOMINMAX` should be defined to disable
-// defining of the `min` and `max` MACROS.
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-
#include <windows.h>
#include <dbghelp.h>
#include <Shlwapi.h>
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index 7a1cc175cb..8c5641569d 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -90,7 +90,7 @@ typedef union {
ew_u.value = (d); \
(ix0) = ew_u.parts.msw; \
(ix1) = ew_u.parts.lsw; \
- } while (0)
+ } while (false)
/* Get a 64-bit int from a double. */
#define EXTRACT_WORD64(ix, d) \
@@ -98,7 +98,7 @@ typedef union {
ieee_double_shape_type ew_u; \
ew_u.value = (d); \
(ix) = ew_u.xparts.w; \
- } while (0)
+ } while (false)
/* Get the more significant 32 bit int from a double. */
@@ -107,7 +107,7 @@ typedef union {
ieee_double_shape_type gh_u; \
gh_u.value = (d); \
(i) = gh_u.parts.msw; \
- } while (0)
+ } while (false)
/* Get the less significant 32 bit int from a double. */
@@ -116,7 +116,7 @@ typedef union {
ieee_double_shape_type gl_u; \
gl_u.value = (d); \
(i) = gl_u.parts.lsw; \
- } while (0)
+ } while (false)
/* Set a double from two 32 bit ints. */
@@ -126,7 +126,7 @@ typedef union {
iw_u.parts.msw = (ix0); \
iw_u.parts.lsw = (ix1); \
(d) = iw_u.value; \
- } while (0)
+ } while (false)
/* Set a double from a 64-bit int. */
#define INSERT_WORD64(d, ix) \
@@ -134,7 +134,7 @@ typedef union {
ieee_double_shape_type iw_u; \
iw_u.xparts.w = (ix); \
(d) = iw_u.value; \
- } while (0)
+ } while (false)
/* Set the more significant 32 bits of a double from an int. */
@@ -144,7 +144,7 @@ typedef union {
sh_u.value = (d); \
sh_u.parts.msw = (v); \
(d) = sh_u.value; \
- } while (0)
+ } while (false)
/* Set the less significant 32 bits of a double from an int. */
@@ -154,7 +154,7 @@ typedef union {
sl_u.value = (d); \
sl_u.parts.lsw = (v); \
(d) = sl_u.value; \
- } while (0)
+ } while (false)
/* Support macro. */
@@ -1210,9 +1210,9 @@ double atan(double x) {
if (ix > 0x7FF00000 || (ix == 0x7FF00000 && (low != 0)))
return x + x; /* NaN */
if (hx > 0)
- return atanhi[3] + *(volatile double *)&atanlo[3];
+ return atanhi[3] + *const_cast<volatile double*>(&atanlo[3]);
else
- return -atanhi[3] - *(volatile double *)&atanlo[3];
+ return -atanhi[3] - *const_cast<volatile double*>(&atanlo[3]);
}
if (ix < 0x3FDC0000) { /* |x| < 0.4375 */
if (ix < 0x3E400000) { /* |x| < 2^-27 */
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index baf6b12ccb..9a9538d065 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -49,7 +49,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
if (V8_UNLIKELY(!(condition))) { \
FATAL("Check failed: %s.", message); \
} \
- } while (0)
+ } while (false)
#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
#ifdef DEBUG
@@ -59,7 +59,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
if (V8_UNLIKELY(!(condition))) { \
V8_Dcheck(__FILE__, __LINE__, message); \
} \
- } while (0)
+ } while (false)
#define DCHECK(condition) DCHECK_WITH_MSG(condition, #condition)
// Helper macro for binary operators.
@@ -73,7 +73,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
FATAL("Check failed: %s.", _msg->c_str()); \
delete _msg; \
} \
- } while (0)
+ } while (false)
#define DCHECK_OP(name, op, lhs, rhs) \
do { \
@@ -84,7 +84,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
V8_Dcheck(__FILE__, __LINE__, _msg->c_str()); \
delete _msg; \
} \
- } while (0)
+ } while (false)
#else
@@ -98,7 +98,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>((lhs), \
(rhs)); \
CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \
- } while (0)
+ } while (false)
#define DCHECK_WITH_MSG(condition, msg) void(0);
diff --git a/deps/v8/src/base/lsan-page-allocator.cc b/deps/v8/src/base/lsan-page-allocator.cc
new file mode 100644
index 0000000000..4840c7ea80
--- /dev/null
+++ b/deps/v8/src/base/lsan-page-allocator.cc
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/lsan-page-allocator.h"
+
+#include "src/base/logging.h"
+
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+LsanPageAllocator::LsanPageAllocator(v8::PageAllocator* page_allocator)
+ : page_allocator_(page_allocator),
+ allocate_page_size_(page_allocator_->AllocatePageSize()),
+ commit_page_size_(page_allocator_->CommitPageSize()) {
+ DCHECK_NOT_NULL(page_allocator);
+}
+
+void* LsanPageAllocator::AllocatePages(void* address, size_t size,
+ size_t alignment,
+ PageAllocator::Permission access) {
+ void* result =
+ page_allocator_->AllocatePages(address, size, alignment, access);
+#if defined(LEAK_SANITIZER)
+ if (result != nullptr) {
+ __lsan_register_root_region(result, size);
+ }
+#endif
+ return result;
+}
+
+bool LsanPageAllocator::FreePages(void* address, size_t size) {
+ bool result = page_allocator_->FreePages(address, size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ }
+#endif
+ return result;
+}
+
+bool LsanPageAllocator::ReleasePages(void* address, size_t size,
+ size_t new_size) {
+ bool result = page_allocator_->ReleasePages(address, size, new_size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ __lsan_register_root_region(address, new_size);
+ }
+#endif
+ return result;
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/lsan-page-allocator.h b/deps/v8/src/base/lsan-page-allocator.h
new file mode 100644
index 0000000000..d95c7fbf1e
--- /dev/null
+++ b/deps/v8/src/base/lsan-page-allocator.h
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_LSAN_PAGE_ALLOCATOR_H_
+#define V8_BASE_LSAN_PAGE_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+// This is a v8::PageAllocator implementation that decorates provided page
+// allocator object with leak sanitizer notifications when LEAK_SANITIZER
+// is defined.
+class V8_BASE_EXPORT LsanPageAllocator
+ : public NON_EXPORTED_BASE(::v8::PageAllocator) {
+ public:
+ LsanPageAllocator(v8::PageAllocator* page_allocator);
+ ~LsanPageAllocator() override = default;
+
+ size_t AllocatePageSize() override { return allocate_page_size_; }
+
+ size_t CommitPageSize() override { return commit_page_size_; }
+
+ void SetRandomMmapSeed(int64_t seed) override {
+ return page_allocator_->SetRandomMmapSeed(seed);
+ }
+
+ void* GetRandomMmapAddr() override {
+ return page_allocator_->GetRandomMmapAddr();
+ }
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override {
+ return page_allocator_->SetPermissions(address, size, access);
+ }
+
+ private:
+ v8::PageAllocator* const page_allocator_;
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
+};
+
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_LSAN_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 081018cc2e..8a2efe61a9 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -14,6 +14,9 @@
// No-op macro which is used to work around MSVC's funky VA_ARGS support.
#define EXPAND(x) x
+// This macro does nothing. That's all.
+#define NOTHING(...)
+
// TODO(all) Replace all uses of this macro with C++'s offsetof. To do that, we
// have to make sure that only standard-layout types and simple field
// designators are used.
@@ -195,8 +198,9 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
#endif
-
-// TODO(all) Replace all uses of this macro with static_assert, remove macro.
+// A convenience wrapper around static_assert without a string message argument.
+// Once C++17 becomes the default, this macro can be removed in favor of the
+// new static_assert(condition) overload.
#define STATIC_ASSERT(test) static_assert(test, #test)
namespace v8 {
@@ -276,6 +280,12 @@ struct Use {
(void)unused_tmp_array_for_use_macro; \
} while (false)
+// Evaluate the instantiations of an expression with parameter packs.
+// Since USE has left-to-right evaluation order of it's arguments,
+// the parameter pack is iterated from left to right and side effects
+// have defined behavior.
+#define ITERATE_PACK(...) USE(0, ((__VA_ARGS__), 0)...)
+
} // namespace base
} // namespace v8
@@ -346,47 +356,37 @@ V8_INLINE A implicit_cast(A x) {
// write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
-
-// Compute the 0-relative offset of some absolute value x of type T.
-// This allows conversion of Addresses and integral types into
-// 0-relative int offsets.
-template <typename T>
-constexpr inline intptr_t OffsetFrom(T x) {
- return x - static_cast<T>(0);
-}
-
-
-// Compute the absolute value of type T for some 0-relative offset x.
-// This allows conversion of 0-relative int offsets into Addresses and
-// integral types.
-template <typename T>
-constexpr inline T AddressFrom(intptr_t x) {
- return static_cast<T>(static_cast<T>(0) + x);
-}
-
-
// Return the largest multiple of m which is <= x.
template <typename T>
inline T RoundDown(T x, intptr_t m) {
+ STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two.
DCHECK(m != 0 && ((m & (m - 1)) == 0));
- return AddressFrom<T>(OffsetFrom(x) & -m);
+ return x & -m;
}
template <intptr_t m, typename T>
constexpr inline T RoundDown(T x) {
+ STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two.
STATIC_ASSERT(m != 0 && ((m & (m - 1)) == 0));
- return AddressFrom<T>(OffsetFrom(x) & -m);
+ return x & -m;
}
// Return the smallest multiple of m which is >= x.
template <typename T>
inline T RoundUp(T x, intptr_t m) {
+ STATIC_ASSERT(std::is_integral<T>::value);
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
template <intptr_t m, typename T>
constexpr inline T RoundUp(T x) {
- return RoundDown<m, T>(static_cast<T>(x + m - 1));
+ STATIC_ASSERT(std::is_integral<T>::value);
+ return RoundDown<m, T>(static_cast<T>(x + (m - 1)));
+}
+
+template <typename T, typename U>
+inline bool IsAligned(T value, U alignment) {
+ return (value & (alignment - 1)) == 0;
}
inline void* AlignedAddress(void* address, size_t alignment) {
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index 6f5276843d..7dfef2d31f 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -123,7 +123,7 @@ class Optional {
public:
using value_type = T;
- constexpr Optional() {}
+ constexpr Optional() = default;
constexpr Optional(base::nullopt_t) {} // NOLINT(runtime/explicit)
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index 25ee2e4721..c25104739d 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -24,11 +24,9 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
#undef STATIC_ASSERT_ENUM
-size_t PageAllocator::AllocatePageSize() {
- return base::OS::AllocatePageSize();
-}
-
-size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); }
+PageAllocator::PageAllocator()
+ : allocate_page_size_(base::OS::AllocatePageSize()),
+ commit_page_size_(base::OS::CommitPageSize()) {}
void PageAllocator::SetRandomMmapSeed(int64_t seed) {
base::OS::SetRandomMmapSeed(seed);
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
index ff817cdba2..68e17db494 100644
--- a/deps/v8/src/base/page-allocator.h
+++ b/deps/v8/src/base/page-allocator.h
@@ -15,11 +15,12 @@ namespace base {
class V8_BASE_EXPORT PageAllocator
: public NON_EXPORTED_BASE(::v8::PageAllocator) {
public:
- virtual ~PageAllocator() = default;
+ PageAllocator();
+ ~PageAllocator() override = default;
- size_t AllocatePageSize() override;
+ size_t AllocatePageSize() override { return allocate_page_size_; }
- size_t CommitPageSize() override;
+ size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override;
@@ -34,6 +35,10 @@ class V8_BASE_EXPORT PageAllocator
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
+
+ private:
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
};
} // namespace base
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index 5deaa67ce7..cbaed6105d 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -3,4 +3,6 @@ set noparent
hpayer@chromium.org
mlippautz@chromium.org
+per-file platform-fuchsia.cc=wez@chromium.org
+
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index d1979fb9d8..713ee404bd 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -57,8 +57,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
strlen(kVirtualMemoryName));
uintptr_t reservation;
uint32_t prot = GetProtectionFromMemoryPermission(access);
- zx_status_t status = zx_vmar_map_old(zx_vmar_root_self(), 0, vmo, 0,
- request_size, prot, &reservation);
+ zx_status_t status = zx_vmar_map(zx_vmar_root_self(), prot, 0, vmo, 0,
+ request_size, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way.
zx_handle_close(vmo);
@@ -67,7 +67,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
+ uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
@@ -114,9 +115,8 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
uint32_t prot = GetProtectionFromMemoryPermission(access);
- return zx_vmar_protect_old(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- prot) == ZX_OK;
+ return zx_vmar_protect(zx_vmar_root_self(), prot,
+ reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 725ad0c6eb..10815f29c5 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -27,14 +27,6 @@
#include <sys/types.h> // mmap & munmap
#include <unistd.h> // sysconf
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h> // NOLINT
-#endif
-
#include <cmath>
#undef MAP_TYPE
diff --git a/deps/v8/src/base/platform/platform-posix-time.h b/deps/v8/src/base/platform/platform-posix-time.h
index 4d3373715b..7814296b83 100644
--- a/deps/v8/src/base/platform/platform-posix-time.h
+++ b/deps/v8/src/base/platform/platform-posix-time.h
@@ -15,7 +15,7 @@ class PosixDefaultTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time_ms) override;
double LocalTimeOffset(double time_ms, bool is_utc) override;
- ~PosixDefaultTimezoneCache() override {}
+ ~PosixDefaultTimezoneCache() override = default;
};
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index cb25196970..c93974bcfc 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -86,7 +86,7 @@ namespace base {
namespace {
// 0 is never a valid thread id.
-const pthread_t kNoThread = (pthread_t) 0;
+const pthread_t kNoThread = static_cast<pthread_t>(0);
bool g_hard_abort = false;
@@ -254,10 +254,6 @@ void* OS::GetRandomMmapAddr() {
// Little-endian Linux: 46 bits of virtual addressing.
raw_addr &= uint64_t{0x3FFFFFFF0000};
#endif
-#elif V8_TARGET_ARCH_MIPS64
- // We allocate code in 256 MB aligned segments because of optimizations using
- // J instruction that require that all code is within a single 256 MB segment
- raw_addr &= uint64_t{0x3FFFE0000000};
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
@@ -267,6 +263,10 @@ void* OS::GetRandomMmapAddr() {
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1FFFF000;
+#elif V8_TARGET_ARCH_MIPS64
+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
@@ -313,7 +313,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Unmap memory allocated before the aligned base address.
uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
+ uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index 55861bc9ac..8cf5e54604 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -15,7 +15,7 @@ class PosixTimezoneCache : public TimezoneCache {
public:
double DaylightSavingsOffset(double time_ms) override;
void Clear() override {}
- ~PosixTimezoneCache() override {}
+ ~PosixTimezoneCache() override = default;
protected:
static const int msPerSecond = 1000;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 2e56ac5df1..11a008e6c6 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -822,7 +822,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
- uint8_t* aligned_base = RoundUp(base, alignment);
+ uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
@@ -843,7 +844,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size));
- aligned_base = RoundUp(base, alignment);
+ aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 51b6014821..f9d01edf00 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -188,7 +188,7 @@ class V8_BASE_EXPORT OS {
class V8_BASE_EXPORT MemoryMappedFile {
public:
- virtual ~MemoryMappedFile() {}
+ virtual ~MemoryMappedFile() = default;
virtual void* memory() const = 0;
virtual size_t size() const = 0;
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 5950664523..a7e50f5880 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -91,7 +91,9 @@ void Semaphore::Signal() {
// This check may fail with <libc-2.21, which we use on the try bots, if the
// semaphore is destroyed while sem_post is still executed. A work around is
// to extend the lifetime of the semaphore.
- CHECK_EQ(0, result);
+ if (result != 0) {
+ FATAL("Error when signaling semaphore, errno: %d", errno);
+ }
}
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 161092ad8b..9e99166487 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -105,10 +105,7 @@ class V8_BASE_EXPORT TimeDelta final {
static TimeDelta FromTimespec(struct timespec ts);
struct timespec ToTimespec() const;
- TimeDelta& operator=(const TimeDelta& other) {
- delta_ = other.delta_;
- return *this;
- }
+ TimeDelta& operator=(const TimeDelta& other) = default;
// Computations with other deltas.
TimeDelta operator+(const TimeDelta& other) const {
diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc
new file mode 100644
index 0000000000..46ceca1857
--- /dev/null
+++ b/deps/v8/src/base/region-allocator.cc
@@ -0,0 +1,291 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/region-allocator.h"
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// If |free_size| < |region_size| * |kMaxLoadFactorForRandomization| stop trying
+// to randomize region allocation.
+constexpr double kMaxLoadFactorForRandomization = 0.40;
+
+// Max number of attempts to allocate page at random address.
+constexpr int kMaxRandomizationAttempts = 3;
+
+RegionAllocator::RegionAllocator(Address memory_region_begin,
+ size_t memory_region_size, size_t page_size)
+ : whole_region_(memory_region_begin, memory_region_size, false),
+ region_size_in_pages_(size() / page_size),
+ max_load_for_randomization_(
+ static_cast<size_t>(size() * kMaxLoadFactorForRandomization)),
+ free_size_(0),
+ page_size_(page_size) {
+ CHECK_LT(begin(), end());
+ CHECK(base::bits::IsPowerOfTwo(page_size_));
+ CHECK(IsAligned(size(), page_size_));
+ CHECK(IsAligned(begin(), page_size_));
+
+ // Initial region.
+ Region* region = new Region(whole_region_);
+
+ all_regions_.insert(region);
+
+ FreeListAddRegion(region);
+}
+
+RegionAllocator::~RegionAllocator() {
+ for (Region* region : all_regions_) {
+ delete region;
+ }
+}
+
+RegionAllocator::AllRegionsSet::iterator RegionAllocator::FindRegion(
+ Address address) {
+ if (!whole_region_.contains(address)) return all_regions_.end();
+
+ Region key(address, 0, false);
+ AllRegionsSet::iterator iter = all_regions_.upper_bound(&key);
+ // Regions in |all_regions_| are compared by end() values and key's end()
+ // points exactly to the address we are querying, so the upper_bound will
+ // find the region whose |end()| is greater than the requested address.
+ DCHECK_NE(iter, all_regions_.end());
+ DCHECK((*iter)->contains(address));
+ return iter;
+}
+
+void RegionAllocator::FreeListAddRegion(Region* region) {
+ free_size_ += region->size();
+ free_regions_.insert(region);
+}
+
+RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) {
+ Region key(0, size, false);
+ auto iter = free_regions_.lower_bound(&key);
+ return iter == free_regions_.end() ? nullptr : *iter;
+}
+
+void RegionAllocator::FreeListRemoveRegion(Region* region) {
+ DCHECK(!region->is_used());
+ auto iter = free_regions_.find(region);
+ DCHECK_NE(iter, free_regions_.end());
+ DCHECK_EQ(region, *iter);
+ DCHECK_LE(region->size(), free_size_);
+ free_size_ -= region->size();
+ free_regions_.erase(iter);
+}
+
+RegionAllocator::Region* RegionAllocator::Split(Region* region,
+ size_t new_size) {
+ DCHECK(IsAligned(new_size, page_size_));
+ DCHECK_NE(new_size, 0);
+ DCHECK_GT(region->size(), new_size);
+
+ // Create new region and put it to the lists after the |region|.
+ bool used = region->is_used();
+ Region* new_region =
+ new Region(region->begin() + new_size, region->size() - new_size, used);
+ if (!used) {
+ // Remove region from the free list before updating it's size.
+ FreeListRemoveRegion(region);
+ }
+ region->set_size(new_size);
+
+ all_regions_.insert(new_region);
+
+ if (!used) {
+ FreeListAddRegion(region);
+ FreeListAddRegion(new_region);
+ }
+ return new_region;
+}
+
+void RegionAllocator::Merge(AllRegionsSet::iterator prev_iter,
+ AllRegionsSet::iterator next_iter) {
+ Region* prev = *prev_iter;
+ Region* next = *next_iter;
+ DCHECK_EQ(prev->end(), next->begin());
+ prev->set_size(prev->size() + next->size());
+
+ all_regions_.erase(next_iter); // prev_iter stays valid.
+
+ // The |next| region must already not be in the free list.
+ DCHECK_EQ(free_regions_.find(next), free_regions_.end());
+ delete next;
+}
+
+RegionAllocator::Address RegionAllocator::AllocateRegion(size_t size) {
+ DCHECK_NE(size, 0);
+ DCHECK(IsAligned(size, page_size_));
+
+ Region* region = FreeListFindRegion(size);
+ if (region == nullptr) return kAllocationFailure;
+
+ if (region->size() != size) {
+ Split(region, size);
+ }
+ DCHECK(IsAligned(region->begin(), page_size_));
+ DCHECK_EQ(region->size(), size);
+
+ // Mark region as used.
+ FreeListRemoveRegion(region);
+ region->set_is_used(true);
+ return region->begin();
+}
+
+RegionAllocator::Address RegionAllocator::AllocateRegion(
+ RandomNumberGenerator* rng, size_t size) {
+ if (free_size() >= max_load_for_randomization_) {
+ // There is enough free space for trying to randomize the address.
+ size_t random = 0;
+
+ for (int i = 0; i < kMaxRandomizationAttempts; i++) {
+ rng->NextBytes(&random, sizeof(random));
+ size_t random_offset = page_size_ * (random % region_size_in_pages_);
+ Address address = begin() + random_offset;
+ if (AllocateRegionAt(address, size)) {
+ return address;
+ }
+ }
+ // Fall back to free list allocation.
+ }
+ return AllocateRegion(size);
+}
+
+bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) {
+ DCHECK(IsAligned(requested_address, page_size_));
+ DCHECK_NE(size, 0);
+ DCHECK(IsAligned(size, page_size_));
+
+ Address requested_end = requested_address + size;
+ DCHECK_LE(requested_end, end());
+
+ Region* region;
+ {
+ AllRegionsSet::iterator region_iter = FindRegion(requested_address);
+ if (region_iter == all_regions_.end()) {
+ return false;
+ }
+ region = *region_iter;
+ }
+ if (region->is_used() || region->end() < requested_end) {
+ return false;
+ }
+ // Found free region that includes the requested one.
+ if (region->begin() != requested_address) {
+ // Split the region at the |requested_address| boundary.
+ size_t new_size = requested_address - region->begin();
+ DCHECK(IsAligned(new_size, page_size_));
+ region = Split(region, new_size);
+ }
+ if (region->end() != requested_end) {
+ // Split the region at the |requested_end| boundary.
+ Split(region, size);
+ }
+ DCHECK_EQ(region->begin(), requested_address);
+ DCHECK_EQ(region->size(), size);
+
+ // Mark region as used.
+ FreeListRemoveRegion(region);
+ region->set_is_used(true);
+ return true;
+}
+
+size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
+ DCHECK(IsAligned(new_size, page_size_));
+
+ AllRegionsSet::iterator region_iter = FindRegion(address);
+ if (region_iter == all_regions_.end()) {
+ return 0;
+ }
+ Region* region = *region_iter;
+ if (region->begin() != address || !region->is_used()) {
+ return 0;
+ }
+
+ // The region must not be in the free list.
+ DCHECK_EQ(free_regions_.find(*region_iter), free_regions_.end());
+
+ if (new_size > 0) {
+ region = Split(region, new_size);
+ ++region_iter;
+ }
+ size_t size = region->size();
+ region->set_is_used(false);
+
+ // Merge current region with the surrounding ones if they are free.
+ if (region->end() != whole_region_.end()) {
+ // There must be a range after the current one.
+ AllRegionsSet::iterator next_iter = std::next(region_iter);
+ DCHECK_NE(next_iter, all_regions_.end());
+ if (!(*next_iter)->is_used()) {
+ // |next| region object will be deleted during merge, remove it from
+ // the free list.
+ FreeListRemoveRegion(*next_iter);
+ Merge(region_iter, next_iter);
+ }
+ }
+ if (new_size == 0 && region->begin() != whole_region_.begin()) {
+ // There must be a range before the current one.
+ AllRegionsSet::iterator prev_iter = std::prev(region_iter);
+ DCHECK_NE(prev_iter, all_regions_.end());
+ if (!(*prev_iter)->is_used()) {
+ // |prev| region's size will change, we'll have to re-insert it into
+ // the proper place of the free list.
+ FreeListRemoveRegion(*prev_iter);
+ Merge(prev_iter, region_iter);
+ // |prev| region becomes the current region.
+ region_iter = prev_iter;
+ region = *region_iter;
+ }
+ }
+ FreeListAddRegion(region);
+ return size;
+}
+
+size_t RegionAllocator::CheckRegion(Address address) {
+ AllRegionsSet::iterator region_iter = FindRegion(address);
+ if (region_iter == all_regions_.end()) {
+ return 0;
+ }
+ Region* region = *region_iter;
+ if (region->begin() != address || !region->is_used()) {
+ return 0;
+ }
+ return region->size();
+}
+
+void RegionAllocator::Region::Print(std::ostream& os) const {
+ std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
+ os << "[" << begin() << ", " << end() << "), size: " << size();
+ os << ", " << (is_used() ? "used" : "free");
+ os.flags(flags);
+}
+
+void RegionAllocator::Print(std::ostream& os) const {
+ std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
+ os << "RegionAllocator: [" << begin() << ", " << end() << ")";
+ os << "\nsize: " << size();
+ os << "\nfree_size: " << free_size();
+ os << "\npage_size: " << page_size_;
+
+ os << "\nall regions: ";
+ for (const Region* region : all_regions_) {
+ os << "\n ";
+ region->Print(os);
+ }
+
+ os << "\nfree regions: ";
+ for (const Region* region : free_regions_) {
+ os << "\n ";
+ region->Print(os);
+ }
+ os << "\n";
+ os.flags(flags);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
new file mode 100644
index 0000000000..fb51472fa9
--- /dev/null
+++ b/deps/v8/src/base/region-allocator.h
@@ -0,0 +1,164 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_REGION_ALLOCATOR_H_
+#define V8_BASE_REGION_ALLOCATOR_H_
+
+#include <set>
+
+#include "src/base/address-region.h"
+#include "src/base/utils/random-number-generator.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+namespace base {
+
+// Helper class for managing used/free regions within [address, address+size)
+// region. Minimum allocation unit is |page_size|. Requested allocation size
+// is rounded up to |page_size|.
+// The region allocation algorithm implements best-fit with coalescing strategy:
+// it tries to find a smallest suitable free region upon allocation and tries
+// to merge region with its neighbors upon freeing.
+//
+// This class does not perform any actual region reservation.
+// Not thread-safe.
+class V8_BASE_EXPORT RegionAllocator final {
+ public:
+ typedef uintptr_t Address;
+
+ static constexpr Address kAllocationFailure = static_cast<Address>(-1);
+
+ RegionAllocator(Address address, size_t size, size_t page_size);
+ ~RegionAllocator();
+
+ // Allocates region of |size| (must be |page_size|-aligned). Returns
+ // the address of the region on success or kAllocationFailure.
+ Address AllocateRegion(size_t size);
+ // Same as above but tries to randomize the region displacement.
+ Address AllocateRegion(RandomNumberGenerator* rng, size_t size);
+
+ // Allocates region of |size| at |requested_address| if it's free. Both the
+ // address and the size must be |page_size|-aligned. On success returns
+ // true.
+ // This kind of allocation is supposed to be used during setup phase to mark
+ // certain regions as used or for randomizing regions displacement.
+ bool AllocateRegionAt(Address requested_address, size_t size);
+
+ // Frees region at given |address|, returns the size of the region.
+ // There must be a used region starting at given address otherwise nothing
+ // will be freed and 0 will be returned.
+ size_t FreeRegion(Address address) { return TrimRegion(address, 0); }
+
+ // Decreases size of the previously allocated region at |address|, returns
+ // freed size. |new_size| must be |page_size|-aligned and
+ // less than or equal to current region's size. Setting new size to zero
+ // frees the region.
+ size_t TrimRegion(Address address, size_t new_size);
+
+ // If there is a used region starting at given address returns its size
+ // otherwise 0.
+ size_t CheckRegion(Address address);
+
+ Address begin() const { return whole_region_.begin(); }
+ Address end() const { return whole_region_.end(); }
+ size_t size() const { return whole_region_.size(); }
+
+ bool contains(Address address) const {
+ return whole_region_.contains(address);
+ }
+
+ bool contains(Address address, size_t size) const {
+ return whole_region_.contains(address, size);
+ }
+
+ // Total size of not yet aquired regions.
+ size_t free_size() const { return free_size_; }
+
+ // The alignment of the allocated region's addresses and granularity of
+ // the allocated region's sizes.
+ size_t page_size() const { return page_size_; }
+
+ void Print(std::ostream& os) const;
+
+ private:
+ class Region : public AddressRegion {
+ public:
+ Region(Address address, size_t size, bool is_used)
+ : AddressRegion(address, size), is_used_(is_used) {}
+
+ bool is_used() const { return is_used_; }
+ void set_is_used(bool used) { is_used_ = used; }
+
+ void Print(std::ostream& os) const;
+
+ private:
+ bool is_used_;
+ };
+
+ // The whole region.
+ const Region whole_region_;
+
+ // Number of |page_size_| in the whole region.
+ const size_t region_size_in_pages_;
+
+ // If the free size is less than this value - stop trying to randomize the
+ // allocation addresses.
+ const size_t max_load_for_randomization_;
+
+ // Size of all free regions.
+ size_t free_size_;
+
+ // Minimum region size. Must be a pow of 2.
+ const size_t page_size_;
+
+ struct AddressEndOrder {
+ bool operator()(const Region* a, const Region* b) const {
+ return a->end() < b->end();
+ }
+ };
+ // All regions ordered by addresses.
+ typedef std::set<Region*, AddressEndOrder> AllRegionsSet;
+ AllRegionsSet all_regions_;
+
+ struct SizeAddressOrder {
+ bool operator()(const Region* a, const Region* b) const {
+ if (a->size() != b->size()) return a->size() < b->size();
+ return a->begin() < b->begin();
+ }
+ };
+ // Free regions ordered by sizes and addresses.
+ std::set<Region*, SizeAddressOrder> free_regions_;
+
+ // Returns region containing given address or nullptr.
+ AllRegionsSet::iterator FindRegion(Address address);
+
+ // Adds given region to the set of free regions.
+ void FreeListAddRegion(Region* region);
+
+ // Finds best-fit free region for given size.
+ Region* FreeListFindRegion(size_t size);
+
+ // Removes given region from the set of free regions.
+ void FreeListRemoveRegion(Region* region);
+
+ // Splits given |region| into two: one of |new_size| size and a new one
+ // having the rest. The new region is returned.
+ Region* Split(Region* region, size_t new_size);
+
+ // For two coalescing regions merges |next| to |prev| and deletes |next|.
+ void Merge(AllRegionsSet::iterator prev_iter,
+ AllRegionsSet::iterator next_iter);
+
+ FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom);
+ FRIEND_TEST(RegionAllocatorTest, Fragmentation);
+ FRIEND_TEST(RegionAllocatorTest, FindRegion);
+ FRIEND_TEST(RegionAllocatorTest, Contains);
+
+ DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_REGION_ALLOCATOR_H_
diff --git a/deps/v8/src/base/safe_math.h b/deps/v8/src/base/safe_math.h
index 62a2f723f2..700bc3387f 100644
--- a/deps/v8/src/base/safe_math.h
+++ b/deps/v8/src/base/safe_math.h
@@ -49,7 +49,7 @@ class CheckedNumeric {
public:
typedef T type;
- CheckedNumeric() {}
+ CheckedNumeric() = default;
// Copy constructor.
template <typename Src>
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
new file mode 100644
index 0000000000..d54bcb8f70
--- /dev/null
+++ b/deps/v8/src/base/threaded-list.h
@@ -0,0 +1,267 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_THREADED_LIST_H_
+#define V8_BASE_THREADED_LIST_H_
+
+#include <iterator>
+
+#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <typename T>
+struct ThreadedListTraits {
+ static T** next(T* t) { return t->next(); }
+};
+
+// Represents a linked list that threads through the nodes in the linked list.
+// Entries in the list are pointers to nodes. By default nodes need to have a
+// T** next() method that returns the location where the next value is stored.
+// The default can be overwritten by providing a ThreadedTraits class.
+template <typename T, typename BaseClass,
+ typename TLTraits = ThreadedListTraits<T>>
+class ThreadedListBase final : public BaseClass {
+ public:
+ ThreadedListBase() : head_(nullptr), tail_(&head_) {}
+ void Add(T* v) {
+ DCHECK_NULL(*tail_);
+ DCHECK_NULL(*TLTraits::next(v));
+ *tail_ = v;
+ tail_ = TLTraits::next(v);
+ }
+
+ void AddFront(T* v) {
+ DCHECK_NULL(*TLTraits::next(v));
+ DCHECK_NOT_NULL(v);
+ T** const next = TLTraits::next(v);
+
+ *next = head_;
+ if (head_ == nullptr) tail_ = next;
+ head_ = v;
+ }
+
+ // Reinitializing the head to a new node, this costs O(n).
+ void ReinitializeHead(T* v) {
+ head_ = v;
+ T* current = v;
+ if (current != nullptr) { // Find tail
+ T* tmp;
+ while ((tmp = *TLTraits::next(current))) {
+ current = tmp;
+ }
+ tail_ = TLTraits::next(current);
+ } else {
+ tail_ = &head_;
+ }
+ }
+
+ void DropHead() {
+ DCHECK_NOT_NULL(head_);
+
+ T* old_head = head_;
+ head_ = *TLTraits::next(head_);
+ if (head_ == nullptr) tail_ = &head_;
+ *TLTraits::next(old_head) = nullptr;
+ }
+
+ void Append(ThreadedListBase&& list) {
+ *tail_ = list.head_;
+ tail_ = list.tail_;
+ list.Clear();
+ }
+
+ void Prepend(ThreadedListBase&& list) {
+ if (list.head_ == nullptr) return;
+
+ T* new_head = list.head_;
+ *list.tail_ = head_;
+ if (head_ == nullptr) {
+ tail_ = list.tail_;
+ }
+ head_ = new_head;
+ list.Clear();
+ }
+
+ void Clear() {
+ head_ = nullptr;
+ tail_ = &head_;
+ }
+
+ ThreadedListBase& operator=(ThreadedListBase&& other) V8_NOEXCEPT {
+ head_ = other.head_;
+ tail_ = other.head_ ? other.tail_ : &head_;
+#ifdef DEBUG
+ other.Clear();
+#endif
+ return *this;
+ }
+
+ ThreadedListBase(ThreadedListBase&& other) V8_NOEXCEPT
+ : head_(other.head_),
+ tail_(other.head_ ? other.tail_ : &head_) {
+#ifdef DEBUG
+ other.Clear();
+#endif
+ }
+
+ bool Remove(T* v) {
+ T* current = first();
+ if (current == v) {
+ DropHead();
+ return true;
+ }
+
+ while (current != nullptr) {
+ T* next = *TLTraits::next(current);
+ if (next == v) {
+ *TLTraits::next(current) = *TLTraits::next(next);
+ *TLTraits::next(next) = nullptr;
+
+ if (TLTraits::next(next) == tail_) {
+ tail_ = TLTraits::next(current);
+ }
+ return true;
+ }
+ current = next;
+ }
+ return false;
+ }
+
+ class Iterator final {
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
+ using value_type = T*;
+ using reference = value_type;
+ using pointer = value_type*;
+
+ public:
+ Iterator& operator++() {
+ entry_ = TLTraits::next(*entry_);
+ return *this;
+ }
+ bool operator==(const Iterator& other) const {
+ return entry_ == other.entry_;
+ }
+ bool operator!=(const Iterator& other) const {
+ return entry_ != other.entry_;
+ }
+ T* operator*() { return *entry_; }
+ T* operator->() { return *entry_; }
+ Iterator& operator=(T* entry) {
+ T* next = *TLTraits::next(*entry_);
+ *TLTraits::next(entry) = next;
+ *entry_ = entry;
+ return *this;
+ }
+
+ private:
+ explicit Iterator(T** entry) : entry_(entry) {}
+
+ T** entry_;
+
+ friend class ThreadedListBase;
+ };
+
+ class ConstIterator final {
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
+ using value_type = T*;
+ using reference = const value_type;
+ using pointer = const value_type*;
+
+ public:
+ ConstIterator& operator++() {
+ entry_ = TLTraits::next(*entry_);
+ return *this;
+ }
+ bool operator==(const ConstIterator& other) const {
+ return entry_ == other.entry_;
+ }
+ bool operator!=(const ConstIterator& other) const {
+ return entry_ != other.entry_;
+ }
+ const T* operator*() const { return *entry_; }
+
+ private:
+ explicit ConstIterator(T* const* entry) : entry_(entry) {}
+
+ T* const* entry_;
+
+ friend class ThreadedListBase;
+ };
+
+ Iterator begin() { return Iterator(&head_); }
+ Iterator end() { return Iterator(tail_); }
+
+ ConstIterator begin() const { return ConstIterator(&head_); }
+ ConstIterator end() const { return ConstIterator(tail_); }
+
+ // Rewinds the list's tail to the reset point, i.e., cutting of the rest of
+ // the list, including the reset_point.
+ void Rewind(Iterator reset_point) {
+ tail_ = reset_point.entry_;
+ *tail_ = nullptr;
+ }
+
+ // Moves the tail of the from_list, starting at the from_location, to the end
+ // of this list.
+ void MoveTail(ThreadedListBase* from_list, Iterator from_location) {
+ if (from_list->end() != from_location) {
+ DCHECK_NULL(*tail_);
+ *tail_ = *from_location;
+ tail_ = from_list->tail_;
+ from_list->Rewind(from_location);
+ }
+ }
+
+ bool is_empty() const { return head_ == nullptr; }
+
+ T* first() const { return head_; }
+
+ // Slow. For testing purposes.
+ int LengthForTest() {
+ int result = 0;
+ for (Iterator t = begin(); t != end(); ++t) ++result;
+ return result;
+ }
+
+ T* AtForTest(int i) {
+ Iterator t = begin();
+ while (i-- > 0) ++t;
+ return *t;
+ }
+
+ bool Verify() {
+ T* last = this->first();
+ if (last == nullptr) {
+ CHECK_EQ(&head_, tail_);
+ } else {
+ while (*TLTraits::next(last) != nullptr) {
+ last = *TLTraits::next(last);
+ }
+ CHECK_EQ(TLTraits::next(last), tail_);
+ }
+ return true;
+ }
+
+ private:
+ T* head_;
+ T** tail_;
+ DISALLOW_COPY_AND_ASSIGN(ThreadedListBase);
+};
+
+struct EmptyBase {};
+
+template <typename T, typename TLTraits = ThreadedListTraits<T>>
+using ThreadedList = ThreadedListBase<T, EmptyBase, TLTraits>;
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_THREADED_LIST_H_
diff --git a/deps/v8/src/base/timezone-cache.h b/deps/v8/src/base/timezone-cache.h
index 96ad7bb41f..3d97eee126 100644
--- a/deps/v8/src/base/timezone-cache.h
+++ b/deps/v8/src/base/timezone-cache.h
@@ -27,7 +27,7 @@ class TimezoneCache {
virtual void Clear() = 0;
// Called when tearing down the isolate
- virtual ~TimezoneCache() {}
+ virtual ~TimezoneCache() = default;
};
} // namespace base
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index afe5a1f098..a3313f4e88 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -99,7 +99,7 @@ int RandomNumberGenerator::NextInt(int max) {
double RandomNumberGenerator::NextDouble() {
XorShift128(&state0_, &state1_);
- return ToDouble(state0_, state1_);
+ return ToDouble(state0_);
}
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index b4b67970c7..45ec259305 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -108,11 +108,10 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
int64_t initial_seed() const { return initial_seed_; }
// Static and exposed for external use.
- static inline double ToDouble(uint64_t state0, uint64_t state1) {
+ static inline double ToDouble(uint64_t state0) {
// Exponent for double values for [1.0 .. 2.0)
static const uint64_t kExponentBits = uint64_t{0x3FF0000000000000};
- static const uint64_t kMantissaMask = uint64_t{0x000FFFFFFFFFFFFF};
- uint64_t random = ((state0 + state1) & kMantissaMask) | kExponentBits;
+ uint64_t random = (state0 >> 12) | kExponentBits;
return bit_cast<double>(random) - 1;
}
@@ -128,6 +127,8 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
*state1 = s1;
}
+ static uint64_t MurmurHash3(uint64_t);
+
private:
static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
static const int64_t kAddend = 0xb;
@@ -135,8 +136,6 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
int Next(int bits) V8_WARN_UNUSED_RESULT;
- static uint64_t MurmurHash3(uint64_t);
-
int64_t initial_seed_;
uint64_t state0_;
uint64_t state1_;
diff --git a/deps/v8/src/basic-block-profiler.cc b/deps/v8/src/basic-block-profiler.cc
index eaecd5dc68..d79dbcdfa8 100644
--- a/deps/v8/src/basic-block-profiler.cc
+++ b/deps/v8/src/basic-block-profiler.cc
@@ -27,9 +27,6 @@ BasicBlockProfiler::Data::Data(size_t n_blocks)
block_rpo_numbers_(n_blocks_),
counts_(n_blocks_, 0) {}
-BasicBlockProfiler::Data::~Data() {}
-
-
static void InsertIntoString(std::ostringstream* os, std::string* string) {
string->insert(0, os->str());
}
@@ -68,10 +65,6 @@ void BasicBlockProfiler::Data::ResetCounts() {
}
}
-
-BasicBlockProfiler::BasicBlockProfiler() {}
-
-
BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) {
base::LockGuard<base::Mutex> lock(&data_list_mutex_);
Data* data = new Data(n_blocks);
diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/basic-block-profiler.h
index 975840e46e..835dda5356 100644
--- a/deps/v8/src/basic-block-profiler.h
+++ b/deps/v8/src/basic-block-profiler.h
@@ -36,7 +36,7 @@ class BasicBlockProfiler {
const BasicBlockProfiler::Data& s);
explicit Data(size_t n_blocks);
- ~Data();
+ ~Data() = default;
void ResetCounts();
@@ -51,7 +51,7 @@ class BasicBlockProfiler {
typedef std::list<Data*> DataList;
- BasicBlockProfiler();
+ BasicBlockProfiler() = default;
~BasicBlockProfiler();
V8_EXPORT_PRIVATE static BasicBlockProfiler* Get();
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index ef87600753..5be3198cc6 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -21,7 +21,7 @@ class BitVector : public ZoneObject {
};
// Iterator for the elements of this BitVector.
- class Iterator BASE_EMBEDDED {
+ class Iterator {
public:
explicit Iterator(BitVector* target)
: target_(target),
@@ -31,7 +31,7 @@ class BitVector : public ZoneObject {
current_(-1) {
Advance();
}
- ~Iterator() {}
+ ~Iterator() = default;
bool Done() const { return current_index_ >= target_->data_length_; }
void Advance();
@@ -305,10 +305,9 @@ class BitVector : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(BitVector);
};
-
-class GrowableBitVector BASE_EMBEDDED {
+class GrowableBitVector {
public:
- class Iterator BASE_EMBEDDED {
+ class Iterator {
public:
Iterator(const GrowableBitVector* target, Zone* zone)
: it_(target->bits_ == nullptr ? new (zone) BitVector(1, zone)
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 30450b133b..d9104863aa 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -19,22 +19,30 @@
#include "src/extensions/trigger-failure-extension.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
+#include "src/math-random.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments.h"
+#include "src/objects/builtin-function-id.h"
#include "src/objects/hash-table-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
+#include "src/objects/js-date-time-format.h"
#include "src/objects/js-list-format.h"
#include "src/objects/js-locale.h"
+#include "src/objects/js-number-format.h"
+#include "src/objects/js-plural-rules.h"
#endif // V8_INTL_SUPPORT
-#include "src/objects/js-array-buffer-inl.h"
-#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-plural-rules.h"
#include "src/objects/js-relative-time-format.h"
+#include "src/objects/js-segmenter.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/templates.h"
#include "src/snapshot/natives.h"
@@ -89,7 +97,7 @@ Handle<String> Bootstrapper::GetNativeSource(NativeType type, int index) {
new NativesExternalStringResource(type, index);
Handle<ExternalOneByteString> source_code =
isolate_->factory()->NewNativeSourceString(resource);
- DCHECK(source_code->is_short());
+ DCHECK(source_code->is_uncached());
return source_code;
}
@@ -146,8 +154,7 @@ void Bootstrapper::TearDown() {
extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
-
-class Genesis BASE_EMBEDDED {
+class Genesis {
public:
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
@@ -156,7 +163,7 @@ class Genesis BASE_EMBEDDED {
GlobalContextType context_type);
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template);
- ~Genesis() { }
+ ~Genesis() = default;
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate_->factory(); }
@@ -503,10 +510,9 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
const char* function_name, Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
- // Function name does not have to be internalized.
return SimpleInstallFunction(
isolate, base, property_name,
- isolate->factory()->NewStringFromAsciiChecked(function_name), call, len,
+ isolate->factory()->InternalizeUtf8String(function_name), call, len,
adapt, attrs, id);
}
@@ -587,8 +593,7 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(
V8_NOINLINE void InstallConstant(Isolate* isolate, Handle<JSObject> holder,
const char* name, Handle<Object> value) {
JSObject::AddProperty(
- isolate, holder, isolate->factory()->NewStringFromAsciiChecked(name),
- value,
+ isolate, holder, isolate->factory()->InternalizeUtf8String(name), value,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
}
@@ -855,11 +860,10 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
factory()->NewJSObject(isolate()->object_function(), TENURED);
JSObject::ForceSetPrototype(generator_function_prototype, empty);
- JSObject::AddProperty(
- isolate(), generator_function_prototype,
- factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("GeneratorFunction"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(isolate(), generator_function_prototype,
+ factory()->to_string_tag_symbol(),
+ factory()->InternalizeUtf8String("GeneratorFunction"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
JSObject::AddProperty(isolate(), generator_function_prototype,
factory()->prototype_string(),
generator_object_prototype,
@@ -871,7 +875,7 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
JSObject::AddProperty(isolate(), generator_object_prototype,
factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("Generator"),
+ factory()->InternalizeUtf8String("Generator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate(), generator_object_prototype, "next",
Builtins::kGeneratorPrototypeNext, 1, false);
@@ -951,7 +955,7 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
JSObject::AddProperty(
isolate(), async_from_sync_iterator_prototype,
factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("Async-from-Sync Iterator"),
+ factory()->InternalizeUtf8String("Async-from-Sync Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
@@ -1001,7 +1005,7 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
JSObject::AddProperty(isolate(), async_generator_object_prototype,
factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("AsyncGenerator"),
+ factory()->InternalizeUtf8String("AsyncGenerator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate(), async_generator_object_prototype, "next",
Builtins::kAsyncGeneratorPrototypeNext, 1, false);
@@ -1056,7 +1060,7 @@ void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
JSObject::AddProperty(isolate(), async_function_prototype,
factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("AsyncFunction"),
+ factory()->InternalizeUtf8String("AsyncFunction"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
Handle<Map> map;
@@ -1263,7 +1267,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
isolate());
js_global_object_function = ApiNatives::CreateApiFunction(
isolate(), js_global_object_constructor, factory()->the_hole_value(),
- ApiNatives::GlobalObjectType);
+ JS_GLOBAL_OBJECT_TYPE);
}
js_global_object_function->initial_map()->set_is_prototype_map(true);
@@ -1289,7 +1293,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
FunctionTemplateInfo::cast(data->constructor()), isolate());
global_proxy_function = ApiNatives::CreateApiFunction(
isolate(), global_constructor, factory()->the_hole_value(),
- ApiNatives::GlobalProxyType);
+ JS_GLOBAL_PROXY_TYPE);
}
global_proxy_function->initial_map()->set_is_access_check_needed(true);
global_proxy_function->initial_map()->set_has_hidden_prototype(true);
@@ -1731,6 +1735,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kArrayPrototypeFind, 1, false);
SimpleInstallFunction(isolate_, proto, "findIndex",
Builtins::kArrayPrototypeFindIndex, 1, false);
+ SimpleInstallFunction(isolate_, proto, "lastIndexOf",
+ Builtins::kArrayPrototypeLastIndexOf, 1, false);
SimpleInstallFunction(isolate_, proto, "pop", Builtins::kArrayPrototypePop,
0, false);
SimpleInstallFunction(isolate_, proto, "push",
@@ -1739,19 +1745,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kArrayPrototypeReverse, 0, false);
SimpleInstallFunction(isolate_, proto, "shift",
Builtins::kArrayPrototypeShift, 0, false);
- SimpleInstallFunction(isolate_, proto, "unshift", Builtins::kArrayUnshift,
- 1, false);
+ SimpleInstallFunction(isolate_, proto, "unshift",
+ Builtins::kArrayPrototypeUnshift, 1, false);
SimpleInstallFunction(isolate_, proto, "slice",
Builtins::kArrayPrototypeSlice, 2, false);
SimpleInstallFunction(isolate_, proto, "sort",
Builtins::kArrayPrototypeSort, 1, false);
- if (FLAG_enable_experimental_builtins) {
- SimpleInstallFunction(isolate_, proto, "splice",
- Builtins::kArraySpliceTorque, 2, false);
- } else {
- SimpleInstallFunction(isolate_, proto, "splice", Builtins::kArraySplice,
- 2, false);
- }
+ SimpleInstallFunction(isolate_, proto, "splice", Builtins::kArraySplice, 2,
+ false);
SimpleInstallFunction(isolate_, proto, "includes", Builtins::kArrayIncludes,
1, false);
SimpleInstallFunction(isolate_, proto, "indexOf", Builtins::kArrayIndexOf,
@@ -1861,14 +1862,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, number_fun, "parseFloat",
Builtins::kNumberParseFloat, 1, true);
JSObject::AddProperty(isolate_, global_object,
- factory->NewStringFromAsciiChecked("parseFloat"),
+ factory->InternalizeUtf8String("parseFloat"),
parse_float_fun, DONT_ENUM);
// Install Number.parseInt and Global.parseInt.
Handle<JSFunction> parse_int_fun = SimpleInstallFunction(
isolate_, number_fun, "parseInt", Builtins::kNumberParseInt, 2, true);
JSObject::AddProperty(isolate_, global_object,
- factory->NewStringFromAsciiChecked("parseInt"),
+ factory->InternalizeUtf8String("parseInt"),
parse_int_fun, DONT_ENUM);
// Install Number constants
@@ -1879,14 +1880,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Object> infinity = factory->infinity_value();
Handle<Object> nan = factory->nan_value();
- Handle<String> nan_name = factory->NewStringFromAsciiChecked("NaN");
+ Handle<String> nan_name = factory->InternalizeUtf8String("NaN");
JSObject::AddProperty(
- isolate_, number_fun, factory->NewStringFromAsciiChecked("MAX_VALUE"),
+ isolate_, number_fun, factory->InternalizeUtf8String("MAX_VALUE"),
factory->NewNumber(kMaxValue),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- isolate_, number_fun, factory->NewStringFromAsciiChecked("MIN_VALUE"),
+ isolate_, number_fun, factory->InternalizeUtf8String("MIN_VALUE"),
factory->NewNumber(kMinValue),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
@@ -1894,37 +1895,36 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
isolate_, number_fun,
- factory->NewStringFromAsciiChecked("NEGATIVE_INFINITY"),
+ factory->InternalizeUtf8String("NEGATIVE_INFINITY"),
factory->NewNumber(-V8_INFINITY),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
isolate_, number_fun,
- factory->NewStringFromAsciiChecked("POSITIVE_INFINITY"), infinity,
+ factory->InternalizeUtf8String("POSITIVE_INFINITY"), infinity,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
isolate_, number_fun,
- factory->NewStringFromAsciiChecked("MAX_SAFE_INTEGER"),
+ factory->InternalizeUtf8String("MAX_SAFE_INTEGER"),
factory->NewNumber(kMaxSafeInteger),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
isolate_, number_fun,
- factory->NewStringFromAsciiChecked("MIN_SAFE_INTEGER"),
+ factory->InternalizeUtf8String("MIN_SAFE_INTEGER"),
factory->NewNumber(kMinSafeInteger),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- isolate_, number_fun, factory->NewStringFromAsciiChecked("EPSILON"),
+ isolate_, number_fun, factory->InternalizeUtf8String("EPSILON"),
factory->NewNumber(kEPS),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- isolate_, global, factory->NewStringFromAsciiChecked("Infinity"),
- infinity,
+ isolate_, global, factory->InternalizeUtf8String("Infinity"), infinity,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
isolate_, global, nan_name, nan,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
JSObject::AddProperty(
- isolate_, global, factory->NewStringFromAsciiChecked("undefined"),
+ isolate_, global, factory->InternalizeUtf8String("undefined"),
factory->undefined_value(),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
}
@@ -2086,10 +2086,23 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeToString, 0, true);
SimpleInstallFunction(isolate_, prototype, "trim",
Builtins::kStringPrototypeTrim, 0, false);
- SimpleInstallFunction(isolate_, prototype, "trimLeft",
- Builtins::kStringPrototypeTrimStart, 0, false);
- SimpleInstallFunction(isolate_, prototype, "trimRight",
- Builtins::kStringPrototypeTrimEnd, 0, false);
+
+ // Install `String.prototype.trimStart` with `trimLeft` alias.
+ Handle<JSFunction> trim_start_fun =
+ SimpleInstallFunction(isolate_, prototype, "trimStart",
+ Builtins::kStringPrototypeTrimStart, 0, false);
+ JSObject::AddProperty(isolate_, prototype,
+ factory->InternalizeUtf8String("trimLeft"),
+ trim_start_fun, DONT_ENUM);
+
+ // Install `String.prototype.trimEnd` with `trimRight` alias.
+ Handle<JSFunction> trim_end_fun =
+ SimpleInstallFunction(isolate_, prototype, "trimEnd",
+ Builtins::kStringPrototypeTrimEnd, 0, false);
+ JSObject::AddProperty(isolate_, prototype,
+ factory->InternalizeUtf8String("trimRight"),
+ trim_end_fun, DONT_ENUM);
+
SimpleInstallFunction(isolate_, prototype, "toLocaleLowerCase",
Builtins::kStringPrototypeToLocaleLowerCase, 0,
false);
@@ -2126,7 +2139,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::AddProperty(
isolate_, string_iterator_prototype, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("String Iterator"),
+ factory->InternalizeUtf8String("String Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate_, string_iterator_prototype, "next",
@@ -2134,12 +2147,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
BuiltinFunctionId::kStringIteratorNext);
Handle<JSFunction> string_iterator_function = CreateFunction(
- isolate_, factory->NewStringFromAsciiChecked("StringIterator"),
+ isolate_, factory->InternalizeUtf8String("StringIterator"),
JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize, 0,
string_iterator_prototype, Builtins::kIllegal);
string_iterator_function->shared()->set_native(false);
- native_context()->set_string_iterator_map(
+ native_context()->set_initial_string_iterator_map(
string_iterator_function->initial_map());
+ native_context()->set_initial_string_iterator_prototype(
+ *string_iterator_prototype);
}
{ // --- S y m b o l ---
@@ -2186,14 +2201,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
isolate_, prototype, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("Symbol"),
+ factory->InternalizeUtf8String("Symbol"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Install the Symbol.prototype methods.
SimpleInstallFunction(isolate_, prototype, "toString",
- Builtins::kSymbolPrototypeToString, 0, true);
+ Builtins::kSymbolPrototypeToString, 0, true,
+ BuiltinFunctionId::kSymbolPrototypeToString);
SimpleInstallFunction(isolate_, prototype, "valueOf",
- Builtins::kSymbolPrototypeValueOf, 0, true);
+ Builtins::kSymbolPrototypeValueOf, 0, true,
+ BuiltinFunctionId::kSymbolPrototypeValueOf);
// Install the @@toPrimitive function.
Handle<JSFunction> to_primitive = InstallFunction(
@@ -2319,6 +2336,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, "toJSON",
Builtins::kDatePrototypeToJson, 1, false);
+#ifdef V8_INTL_SUPPORT
+ SimpleInstallFunction(isolate_, prototype, "toLocaleString",
+ Builtins::kDatePrototypeToLocaleString, 0, false);
+ SimpleInstallFunction(isolate_, prototype, "toLocaleDateString",
+ Builtins::kDatePrototypeToLocaleDateString, 0, false);
+ SimpleInstallFunction(isolate_, prototype, "toLocaleTimeString",
+ Builtins::kDatePrototypeToLocaleTimeString, 0, false);
+#else
// Install Intl fallback functions.
SimpleInstallFunction(isolate_, prototype, "toLocaleString",
Builtins::kDatePrototypeToString, 0, false);
@@ -2326,6 +2351,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kDatePrototypeToDateString, 0, false);
SimpleInstallFunction(isolate_, prototype, "toLocaleTimeString",
Builtins::kDatePrototypeToTimeString, 0, false);
+#endif // V8_INTL_SUPPORT
// Install the @@toPrimitive function.
Handle<JSFunction> to_primitive = InstallFunction(
@@ -2734,7 +2760,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kJsonStringify, 3, true);
JSObject::AddProperty(
isolate_, json_object, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("JSON"),
+ factory->InternalizeUtf8String("JSON"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
@@ -2815,7 +2841,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewNumber(std::sqrt(2.0)));
JSObject::AddProperty(
isolate_, math, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("Math"),
+ factory->InternalizeUtf8String("Math"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
@@ -2869,6 +2895,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kConsoleProfileEnd, 1, false, NONE);
SimpleInstallFunction(isolate_, console, "time", Builtins::kConsoleTime, 1,
false, NONE);
+ SimpleInstallFunction(isolate_, console, "timeLog",
+ Builtins::kConsoleTimeLog, 1, false, NONE);
SimpleInstallFunction(isolate_, console, "timeEnd",
Builtins::kConsoleTimeEnd, 1, false, NONE);
SimpleInstallFunction(isolate_, console, "timeStamp",
@@ -2877,7 +2905,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kConsoleContext, 1, true, NONE);
JSObject::AddProperty(
isolate_, console, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("Object"),
+ factory->InternalizeUtf8String("Object"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
@@ -2890,11 +2918,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> date_time_format_constructor = InstallFunction(
- isolate_, intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize,
- 0, factory->the_hole_value(), Builtins::kIllegal);
+ isolate_, intl, "DateTimeFormat", JS_INTL_DATE_TIME_FORMAT_TYPE,
+ JSDateTimeFormat::kSize, 0, factory->the_hole_value(),
+ Builtins::kDateTimeFormatConstructor);
+ date_time_format_constructor->shared()->set_length(0);
+ date_time_format_constructor->shared()->DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate_, date_time_format_constructor,
+ Context::INTL_DATE_TIME_FORMAT_FUNCTION_INDEX);
+
native_context()->set_intl_date_time_format_function(
*date_time_format_constructor);
+ SimpleInstallFunction(
+ isolate(), date_time_format_constructor, "supportedLocalesOf",
+ Builtins::kDateTimeFormatSupportedLocalesOf, 1, false);
+
Handle<JSObject> prototype(
JSObject::cast(date_time_format_constructor->prototype()), isolate_);
@@ -2904,6 +2943,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
+ Builtins::kDateTimeFormatPrototypeResolvedOptions,
+ 0, false);
+
SimpleInstallFunction(isolate_, prototype, "formatToParts",
Builtins::kDateTimeFormatPrototypeFormatToParts, 1,
false);
@@ -2911,21 +2954,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallGetter(isolate_, prototype,
factory->InternalizeUtf8String("format"),
Builtins::kDateTimeFormatPrototypeFormat, false);
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
- isolate_, Builtins::kDateTimeFormatInternalFormat,
- factory->empty_string(), 1);
- native_context()->set_date_format_internal_format_shared_fun(*info);
- }
}
{
Handle<JSFunction> number_format_constructor = InstallFunction(
- isolate_, intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize,
- 0, factory->the_hole_value(), Builtins::kIllegal);
- native_context()->set_intl_number_format_function(
- *number_format_constructor);
+ isolate_, intl, "NumberFormat", JS_INTL_NUMBER_FORMAT_TYPE,
+ JSNumberFormat::kSize, 0, factory->the_hole_value(),
+ Builtins::kNumberFormatConstructor);
+ number_format_constructor->shared()->set_length(0);
+ number_format_constructor->shared()->DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate_, number_format_constructor,
+ Context::INTL_NUMBER_FORMAT_FUNCTION_INDEX);
+
+ SimpleInstallFunction(
+ isolate(), number_format_constructor, "supportedLocalesOf",
+ Builtins::kNumberFormatSupportedLocalesOf, 1, false);
Handle<JSObject> prototype(
JSObject::cast(number_format_constructor->prototype()), isolate_);
@@ -2936,20 +2980,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
+ Builtins::kNumberFormatPrototypeResolvedOptions, 0,
+ false);
+
SimpleInstallFunction(isolate_, prototype, "formatToParts",
Builtins::kNumberFormatPrototypeFormatToParts, 1,
false);
SimpleInstallGetter(isolate_, prototype,
factory->InternalizeUtf8String("format"),
Builtins::kNumberFormatPrototypeFormatNumber, false);
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
- isolate_, Builtins::kNumberFormatInternalFormatNumber,
- factory->empty_string(), 1);
- native_context()->set_number_format_internal_format_number_shared_fun(
- *info);
- }
}
{
@@ -2960,6 +3000,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallWithIntrinsicDefaultProto(isolate_, collator_constructor,
Context::INTL_COLLATOR_FUNCTION_INDEX);
+ SimpleInstallFunction(isolate(), collator_constructor,
+ "supportedLocalesOf",
+ Builtins::kCollatorSupportedLocalesOf, 1, false);
+
Handle<JSObject> prototype(
JSObject::cast(collator_constructor->prototype()), isolate_);
@@ -2969,25 +3013,28 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
+ Builtins::kCollatorPrototypeResolvedOptions, 0,
+ false);
+
SimpleInstallGetter(isolate_, prototype,
factory->InternalizeUtf8String("compare"),
Builtins::kCollatorPrototypeCompare, false);
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
- isolate_, Builtins::kCollatorInternalCompare,
- factory->empty_string(), 2);
- native_context()->set_collator_internal_compare_shared_fun(*info);
- }
}
{
- Handle<JSFunction> v8_break_iterator_constructor =
- InstallFunction(isolate_, intl, "v8BreakIterator", JS_OBJECT_TYPE,
- V8BreakIterator::kSize, 0, factory->the_hole_value(),
- Builtins::kIllegal);
- native_context()->set_intl_v8_break_iterator_function(
- *v8_break_iterator_constructor);
+ Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
+ isolate_, intl, "v8BreakIterator", JS_INTL_V8_BREAK_ITERATOR_TYPE,
+ JSV8BreakIterator::kSize, 0, factory->the_hole_value(),
+ Builtins::kV8BreakIteratorConstructor);
+ v8_break_iterator_constructor->shared()->DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate_, v8_break_iterator_constructor,
+ Context::INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX);
+
+ SimpleInstallFunction(
+ isolate_, v8_break_iterator_constructor, "supportedLocalesOf",
+ Builtins::kV8BreakIteratorSupportedLocalesOf, 1, false);
Handle<JSObject> prototype(
JSObject::cast(v8_break_iterator_constructor->prototype()), isolate_);
@@ -2998,17 +3045,29 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
+ Builtins::kV8BreakIteratorPrototypeResolvedOptions,
+ 0, false);
+
SimpleInstallGetter(isolate_, prototype,
factory->InternalizeUtf8String("adoptText"),
- Builtins::kBreakIteratorPrototypeAdoptText, false);
+ Builtins::kV8BreakIteratorPrototypeAdoptText, false);
- {
- Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
- isolate_, Builtins::kBreakIteratorInternalAdoptText,
- factory->empty_string(), 1);
- native_context()->set_break_iterator_internal_adopt_text_shared_fun(
- *info);
- }
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("first"),
+ Builtins::kV8BreakIteratorPrototypeFirst, false);
+
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("next"),
+ Builtins::kV8BreakIteratorPrototypeNext, false);
+
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("current"),
+ Builtins::kV8BreakIteratorPrototypeCurrent, false);
+
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("breakType"),
+ Builtins::kV8BreakIteratorPrototypeBreakType, false);
}
{
@@ -3021,6 +3080,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, plural_rules_constructor,
Context::INTL_PLURAL_RULES_FUNCTION_INDEX);
+ SimpleInstallFunction(isolate(), plural_rules_constructor,
+ "supportedLocalesOf",
+ Builtins::kPluralRulesSupportedLocalesOf, 1, false);
+
Handle<JSObject> prototype(
JSObject::cast(plural_rules_constructor->prototype()), isolate_);
@@ -3029,6 +3092,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, factory->to_string_tag_symbol(),
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
+ Builtins::kPluralRulesPrototypeResolvedOptions, 0,
+ false);
+
+ SimpleInstallFunction(isolate_, prototype, "select",
+ Builtins::kPluralRulesPrototypeSelect, 1, false);
}
}
#endif // V8_INTL_SUPPORT
@@ -3043,7 +3113,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> array_buffer_noinit_fun = SimpleCreateFunction(
isolate_,
- factory->NewStringFromAsciiChecked(
+ factory->InternalizeUtf8String(
"arrayBufferConstructor_DoNotInitialize"),
Builtins::kArrayBufferConstructor_DoNotInitialize, 1, false);
native_context()->set_array_buffer_noinit_fun(*array_buffer_noinit_fun);
@@ -3088,7 +3158,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, atomics_object, "wake",
Builtins::kAtomicsWake, 3, true);
SimpleInstallFunction(isolate_, atomics_object, "notify",
- Builtins::kAtomicsWake, 3, true);
+ Builtins::kAtomicsNotify, 3, true);
}
{ // -- T y p e d A r r a y
@@ -3213,7 +3283,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
isolate_, prototype, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("DataView"),
+ factory->InternalizeUtf8String("DataView"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Install the "buffer", "byteOffset" and "byteLength" getters
@@ -3260,6 +3330,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kDataViewPrototypeGetFloat64, 1, false);
SimpleInstallFunction(isolate_, prototype, "setFloat64",
Builtins::kDataViewPrototypeSetFloat64, 2, false);
+ SimpleInstallFunction(isolate_, prototype, "getBigInt64",
+ Builtins::kDataViewPrototypeGetBigInt64, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "setBigInt64",
+ Builtins::kDataViewPrototypeSetBigInt64, 2, false);
+ SimpleInstallFunction(isolate_, prototype, "getBigUint64",
+ Builtins::kDataViewPrototypeGetBigUint64, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "setBigUint64",
+ Builtins::kDataViewPrototypeSetBigUint64, 2, false);
}
{ // -- M a p
@@ -3321,6 +3399,48 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(isolate_, js_map_fun);
}
+ { // -- B i g I n t
+ Handle<JSFunction> bigint_fun = InstallFunction(
+ isolate_, global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
+ factory->the_hole_value(), Builtins::kBigIntConstructor);
+ bigint_fun->shared()->set_builtin_function_id(
+ BuiltinFunctionId::kBigIntConstructor);
+ bigint_fun->shared()->DontAdaptArguments();
+ bigint_fun->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate_, bigint_fun,
+ Context::BIGINT_FUNCTION_INDEX);
+
+ // Install the properties of the BigInt constructor.
+ // asUintN(bits, bigint)
+ SimpleInstallFunction(isolate_, bigint_fun, "asUintN",
+ Builtins::kBigIntAsUintN, 2, false);
+ // asIntN(bits, bigint)
+ SimpleInstallFunction(isolate_, bigint_fun, "asIntN",
+ Builtins::kBigIntAsIntN, 2, false);
+
+ // Set up the %BigIntPrototype%.
+ Handle<JSObject> prototype(JSObject::cast(bigint_fun->instance_prototype()),
+ isolate_);
+ JSFunction::SetPrototype(bigint_fun, prototype);
+
+ // Install the properties of the BigInt.prototype.
+ // "constructor" is created implicitly by InstallFunction() above.
+ // toLocaleString([reserved1 [, reserved2]])
+ SimpleInstallFunction(isolate_, prototype, "toLocaleString",
+ Builtins::kBigIntPrototypeToLocaleString, 0, false);
+ // toString([radix])
+ SimpleInstallFunction(isolate_, prototype, "toString",
+ Builtins::kBigIntPrototypeToString, 0, false);
+ // valueOf()
+ SimpleInstallFunction(isolate_, prototype, "valueOf",
+ Builtins::kBigIntPrototypeValueOf, 0, false);
+ // @@toStringTag
+ JSObject::AddProperty(
+ isolate_, prototype, factory->to_string_tag_symbol(),
+ factory->BigInt_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
+
{ // -- S e t
Handle<JSFunction> js_set_fun =
InstallFunction(isolate_, global, "Set", JS_SET_TYPE, JSSet::kSize, 0,
@@ -3435,8 +3555,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, "delete",
Builtins::kWeakMapPrototypeDelete, 1, true);
- SimpleInstallFunction(isolate_, prototype, "get", Builtins::kWeakMapGet, 1,
- true);
+ Handle<JSFunction> weakmap_get = SimpleInstallFunction(
+ isolate_, prototype, "get", Builtins::kWeakMapGet, 1, true);
+ native_context()->set_weakmap_get(*weakmap_get);
SimpleInstallFunction(isolate_, prototype, "has", Builtins::kWeakMapHas, 1,
true);
Handle<JSFunction> weakmap_set = SimpleInstallFunction(
@@ -3445,7 +3566,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::AddProperty(
isolate_, prototype, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("WeakMap"),
+ factory->InternalizeUtf8String("WeakMap"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
native_context()->set_initial_weakmap_prototype_map(prototype->map());
@@ -3476,7 +3597,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::AddProperty(
isolate_, prototype, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("WeakSet"),
+ factory->InternalizeUtf8String("WeakSet"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
native_context()->set_initial_weakset_prototype_map(prototype->map());
@@ -3852,8 +3973,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
bool Genesis::CallUtilsFunction(Isolate* isolate, const char* name) {
Handle<JSObject> utils =
Handle<JSObject>::cast(isolate->natives_utils_object());
- Handle<String> name_string =
- isolate->factory()->NewStringFromAsciiChecked(name);
+ Handle<String> name_string = isolate->factory()->InternalizeUtf8String(name);
Handle<Object> fun = JSObject::GetDataProperty(utils, name_string);
Handle<Object> receiver = isolate->factory()->undefined_value();
Handle<Object> args[] = {utils};
@@ -3975,17 +4095,17 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Factory* factory = isolate->factory();
HandleScope scope(isolate);
Handle<NativeContext> native_context = isolate->native_context();
-#define EXPORT_PRIVATE_SYMBOL(NAME) \
- Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
+#define EXPORT_PRIVATE_SYMBOL(_, NAME) \
+ Handle<String> NAME##_name = factory->InternalizeUtf8String(#NAME); \
JSObject::AddProperty(isolate, container, NAME##_name, factory->NAME(), NONE);
- PRIVATE_SYMBOL_LIST(EXPORT_PRIVATE_SYMBOL)
+ PRIVATE_SYMBOL_LIST_GENERATOR(EXPORT_PRIVATE_SYMBOL, /* not used */)
#undef EXPORT_PRIVATE_SYMBOL
-#define EXPORT_PUBLIC_SYMBOL(NAME, DESCRIPTION) \
- Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
+#define EXPORT_PUBLIC_SYMBOL(_, NAME, DESCRIPTION) \
+ Handle<String> NAME##_name = factory->InternalizeUtf8String(#NAME); \
JSObject::AddProperty(isolate, container, NAME##_name, factory->NAME(), NONE);
- PUBLIC_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
- WELL_KNOWN_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
+ PUBLIC_SYMBOL_LIST_GENERATOR(EXPORT_PUBLIC_SYMBOL, /* not used */)
+ WELL_KNOWN_SYMBOL_LIST_GENERATOR(EXPORT_PUBLIC_SYMBOL, /* not used */)
#undef EXPORT_PUBLIC_SYMBOL
Handle<JSObject> iterator_prototype(
@@ -4238,6 +4358,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Builtins::kCallSitePrototypeGetScriptNameOrSourceURL},
{"getThis", Builtins::kCallSitePrototypeGetThis},
{"getTypeName", Builtins::kCallSitePrototypeGetTypeName},
+ {"isAsync", Builtins::kCallSitePrototypeIsAsync},
{"isConstructor", Builtins::kCallSitePrototypeIsConstructor},
{"isEval", Builtins::kCallSitePrototypeIsEval},
{"isNative", Builtins::kCallSitePrototypeIsNative},
@@ -4261,7 +4382,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_static_fields)
@@ -4269,6 +4389,9 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_json_stringify)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_await_optimization)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
@@ -4305,40 +4428,6 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
}
}
-void Genesis::InitializeGlobal_harmony_string_trimming() {
- if (!FLAG_harmony_string_trimming) return;
-
- Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- Factory* factory = isolate()->factory();
-
- Handle<JSObject> string_prototype(
- native_context()->initial_string_prototype(), isolate());
-
- {
- Handle<String> trim_left_name = factory->InternalizeUtf8String("trimLeft");
- Handle<String> trim_start_name =
- factory->InternalizeUtf8String("trimStart");
- Handle<JSFunction> trim_left_fun = Handle<JSFunction>::cast(
- JSObject::GetProperty(isolate_, string_prototype, trim_left_name)
- .ToHandleChecked());
- JSObject::AddProperty(isolate_, string_prototype, trim_start_name,
- trim_left_fun, DONT_ENUM);
- trim_left_fun->shared()->SetName(*trim_start_name);
- }
-
- {
- Handle<String> trim_right_name =
- factory->InternalizeUtf8String("trimRight");
- Handle<String> trim_end_name = factory->InternalizeUtf8String("trimEnd");
- Handle<JSFunction> trim_right_fun = Handle<JSFunction>::cast(
- JSObject::GetProperty(isolate_, string_prototype, trim_right_name)
- .ToHandleChecked());
- JSObject::AddProperty(isolate_, string_prototype, trim_end_name,
- trim_right_fun, DONT_ENUM);
- trim_right_fun->shared()->SetName(*trim_end_name);
- }
-}
-
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
Handle<JSFunction> array_constructor(native_context()->array_function(),
@@ -4425,7 +4514,7 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
JSObject::AddProperty(
isolate(), regexp_string_iterator_prototype,
factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("RegExp String Iterator"),
+ factory()->InternalizeUtf8String("RegExp String Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate(), regexp_string_iterator_prototype, "next",
@@ -4433,7 +4522,7 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
true);
Handle<JSFunction> regexp_string_iterator_function = CreateFunction(
- isolate(), factory()->NewStringFromAsciiChecked("RegExpStringIterator"),
+ isolate(), factory()->InternalizeUtf8String("RegExpStringIterator"),
JS_REGEXP_STRING_ITERATOR_TYPE, JSRegExpStringIterator::kSize, 0,
regexp_string_iterator_prototype, Builtins::kIllegal);
regexp_string_iterator_function->shared()->set_native(false);
@@ -4449,126 +4538,6 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
}
}
-void Genesis::InitializeGlobal_harmony_bigint() {
- Factory* factory = isolate()->factory();
- Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- if (!FLAG_harmony_bigint) {
- // Typed arrays are installed by default; remove them if the flag is off.
- CHECK(JSObject::DeleteProperty(
- global, factory->InternalizeUtf8String("BigInt64Array"))
- .ToChecked());
- CHECK(JSObject::DeleteProperty(
- global, factory->InternalizeUtf8String("BigUint64Array"))
- .ToChecked());
- return;
- }
-
- Handle<JSFunction> bigint_fun = InstallFunction(
- isolate(), global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
- factory->the_hole_value(), Builtins::kBigIntConstructor);
- bigint_fun->shared()->set_builtin_function_id(
- BuiltinFunctionId::kBigIntConstructor);
- bigint_fun->shared()->DontAdaptArguments();
- bigint_fun->shared()->set_length(1);
- InstallWithIntrinsicDefaultProto(isolate(), bigint_fun,
- Context::BIGINT_FUNCTION_INDEX);
-
- // Install the properties of the BigInt constructor.
- // asUintN(bits, bigint)
- SimpleInstallFunction(isolate(), bigint_fun, "asUintN",
- Builtins::kBigIntAsUintN, 2, false);
- // asIntN(bits, bigint)
- SimpleInstallFunction(isolate(), bigint_fun, "asIntN",
- Builtins::kBigIntAsIntN, 2, false);
-
- // Set up the %BigIntPrototype%.
- Handle<JSObject> prototype(JSObject::cast(bigint_fun->instance_prototype()),
- isolate());
- JSFunction::SetPrototype(bigint_fun, prototype);
-
- // Install the properties of the BigInt.prototype.
- // "constructor" is created implicitly by InstallFunction() above.
- // toLocaleString([reserved1 [, reserved2]])
- SimpleInstallFunction(isolate(), prototype, "toLocaleString",
- Builtins::kBigIntPrototypeToLocaleString, 0, false);
- // toString([radix])
- SimpleInstallFunction(isolate(), prototype, "toString",
- Builtins::kBigIntPrototypeToString, 0, false);
- // valueOf()
- SimpleInstallFunction(isolate(), prototype, "valueOf",
- Builtins::kBigIntPrototypeValueOf, 0, false);
- // @@toStringTag
- JSObject::AddProperty(isolate(), prototype, factory->to_string_tag_symbol(),
- factory->BigInt_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- // Install 64-bit DataView accessors.
- // TODO(jkummerow): Move these to the "DataView" section when dropping the
- // FLAG_harmony_bigint.
- Handle<JSObject> dataview_prototype(
- JSObject::cast(native_context()->data_view_fun()->instance_prototype()),
- isolate());
- SimpleInstallFunction(isolate(), dataview_prototype, "getBigInt64",
- Builtins::kDataViewPrototypeGetBigInt64, 1, false);
- SimpleInstallFunction(isolate(), dataview_prototype, "setBigInt64",
- Builtins::kDataViewPrototypeSetBigInt64, 2, false);
- SimpleInstallFunction(isolate(), dataview_prototype, "getBigUint64",
- Builtins::kDataViewPrototypeGetBigUint64, 1, false);
- SimpleInstallFunction(isolate(), dataview_prototype, "setBigUint64",
- Builtins::kDataViewPrototypeSetBigUint64, 2, false);
-}
-
-void Genesis::InitializeGlobal_harmony_await_optimization() {
- if (!FLAG_harmony_await_optimization) return;
-
- // async/await
- Handle<JSFunction> await_caught_function = SimpleCreateFunction(
- isolate(), factory()->empty_string(),
- Builtins::kAsyncFunctionAwaitCaughtOptimized, 2, false);
- native_context()->set_async_function_await_caught(*await_caught_function);
-
- Handle<JSFunction> await_uncaught_function = SimpleCreateFunction(
- isolate(), factory()->empty_string(),
- Builtins::kAsyncFunctionAwaitUncaughtOptimized, 2, false);
- native_context()->set_async_function_await_uncaught(*await_uncaught_function);
-
- // async generators
- Handle<JSObject> async_iterator_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
-
- SimpleInstallFunction(
- isolate(), async_iterator_prototype, factory()->async_iterator_symbol(),
- "[Symbol.asyncIterator]", Builtins::kReturnReceiver, 0, true);
-
- Handle<JSObject> async_from_sync_iterator_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SimpleInstallFunction(
- isolate(), async_from_sync_iterator_prototype, factory()->next_string(),
- Builtins::kAsyncFromSyncIteratorPrototypeNextOptimized, 1, true);
- SimpleInstallFunction(
- isolate(), async_from_sync_iterator_prototype, factory()->return_string(),
- Builtins::kAsyncFromSyncIteratorPrototypeReturnOptimized, 1, true);
- SimpleInstallFunction(
- isolate(), async_from_sync_iterator_prototype, factory()->throw_string(),
- Builtins::kAsyncFromSyncIteratorPrototypeThrowOptimized, 1, true);
-
- JSObject::AddProperty(
- isolate(), async_from_sync_iterator_prototype,
- factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("Async-from-Sync Iterator"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
- async_iterator_prototype);
-
- Handle<Map> async_from_sync_iterator_map = factory()->NewMap(
- JS_ASYNC_FROM_SYNC_ITERATOR_TYPE, JSAsyncFromSyncIterator::kSize);
- Map::SetPrototype(isolate(), async_from_sync_iterator_map,
- async_from_sync_iterator_prototype);
- native_context()->set_async_from_sync_iterator_map(
- *async_from_sync_iterator_map);
-}
-
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_intl_list_format() {
if (!FLAG_harmony_intl_list_format) return;
@@ -4586,13 +4555,16 @@ void Genesis::InitializeGlobal_harmony_intl_list_format() {
list_format_fun->shared()->set_length(0);
list_format_fun->shared()->DontAdaptArguments();
+ SimpleInstallFunction(isolate(), list_format_fun, "supportedLocalesOf",
+ Builtins::kListFormatSupportedLocalesOf, 1, false);
+
// Setup %ListFormatPrototype%.
Handle<JSObject> prototype(
JSObject::cast(list_format_fun->instance_prototype()), isolate());
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->NewStringFromStaticChars("Intl.ListFormat"),
+ factory()->InternalizeUtf8String("Intl.ListFormat"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
@@ -4628,7 +4600,7 @@ void Genesis::InitializeGlobal_harmony_locale() {
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked("Locale"),
+ factory()->InternalizeUtf8String("Intl.Locale"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate(), prototype, "toString",
@@ -4687,6 +4659,10 @@ void Genesis::InitializeGlobal_harmony_intl_relative_time_format() {
relative_time_format_fun->shared()->set_length(0);
relative_time_format_fun->shared()->DontAdaptArguments();
+ SimpleInstallFunction(
+ isolate(), relative_time_format_fun, "supportedLocalesOf",
+ Builtins::kRelativeTimeFormatSupportedLocalesOf, 1, false);
+
// Setup %RelativeTimeFormatPrototype%.
Handle<JSObject> prototype(
JSObject::cast(relative_time_format_fun->instance_prototype()),
@@ -4695,7 +4671,7 @@ void Genesis::InitializeGlobal_harmony_intl_relative_time_format() {
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->NewStringFromStaticChars("Intl.RelativeTimeFormat"),
+ factory()->InternalizeUtf8String("Intl.RelativeTimeFormat"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
@@ -4708,6 +4684,37 @@ void Genesis::InitializeGlobal_harmony_intl_relative_time_format() {
false);
}
+void Genesis::InitializeGlobal_harmony_intl_segmenter() {
+ if (!FLAG_harmony_intl_segmenter) return;
+ Handle<JSObject> intl = Handle<JSObject>::cast(
+ JSReceiver::GetProperty(
+ isolate(),
+ Handle<JSReceiver>(native_context()->global_object(), isolate()),
+ factory()->InternalizeUtf8String("Intl"))
+ .ToHandleChecked());
+
+ Handle<JSFunction> segmenter_fun = InstallFunction(
+ isolate(), intl, "Segmenter", JS_INTL_SEGMENTER_TYPE, JSSegmenter::kSize,
+ 0, factory()->the_hole_value(), Builtins::kSegmenterConstructor);
+ segmenter_fun->shared()->set_length(0);
+ segmenter_fun->shared()->DontAdaptArguments();
+
+ SimpleInstallFunction(isolate(), segmenter_fun, "supportedLocalesOf",
+ Builtins::kSegmenterSupportedLocalesOf, 1, false);
+
+ // Setup %SegmenterPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(segmenter_fun->instance_prototype()), isolate());
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromStaticChars("Intl.Segmenter"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
+ Builtins::kSegmenterPrototypeResolvedOptions, 0, false);
+}
+
#endif // V8_INTL_SUPPORT
Handle<JSFunction> Genesis::CreateArrayBuffer(
@@ -4826,7 +4833,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Builtins::kPromiseInternalConstructor, 1, true);
promise_internal_constructor->shared()->set_native(false);
InstallFunction(isolate(), extras_utils, promise_internal_constructor,
- factory()->NewStringFromAsciiChecked("createPromise"));
+ factory()->InternalizeUtf8String("createPromise"));
// v8.rejectPromise(promise, reason)
Handle<JSFunction> promise_internal_reject =
@@ -4834,7 +4841,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Builtins::kPromiseInternalReject, 2, true);
promise_internal_reject->shared()->set_native(false);
InstallFunction(isolate(), extras_utils, promise_internal_reject,
- factory()->NewStringFromAsciiChecked("rejectPromise"));
+ factory()->InternalizeUtf8String("rejectPromise"));
// v8.resolvePromise(promise, resolution)
Handle<JSFunction> promise_internal_resolve =
@@ -4842,10 +4849,10 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Builtins::kPromiseInternalResolve, 2, true);
promise_internal_resolve->shared()->set_native(false);
InstallFunction(isolate(), extras_utils, promise_internal_resolve,
- factory()->NewStringFromAsciiChecked("resolvePromise"));
+ factory()->InternalizeUtf8String("resolvePromise"));
InstallFunction(isolate(), extras_utils, isolate()->is_promise(),
- factory()->NewStringFromAsciiChecked("isPromise"));
+ factory()->InternalizeUtf8String("isPromise"));
int builtin_index = Natives::GetDebuggerCount();
// Only run prologue.js at this point.
@@ -5708,6 +5715,7 @@ Genesis::Genesis(
DCHECK_EQ(0u, context_snapshot_index);
// We get here if there was no context snapshot.
CreateRoots();
+ MathRandom::InitializeContext(isolate, native_context());
Handle<JSFunction> empty_function = CreateEmptyFunction();
CreateSloppyModeFunctionMaps(empty_function);
CreateStrictModeFunctionMaps(empty_function);
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index e3ba8c06f2..4ad02eb836 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -18,7 +18,7 @@ namespace internal {
// (array.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
-class SourceCodeCache final BASE_EMBEDDED {
+class SourceCodeCache final {
public:
explicit SourceCodeCache(Script::Type type) : type_(type), cache_(nullptr) {}
@@ -122,8 +122,7 @@ class Bootstrapper final {
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
-
-class BootstrapperActive final BASE_EMBEDDED {
+class BootstrapperActive final {
public:
explicit BootstrapperActive(Bootstrapper* bootstrapper)
: bootstrapper_(bootstrapper) {
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index c18811a4b6..92cb6df45d 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -60,8 +60,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- // tail call a stub
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -122,7 +120,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiUntag(r0);
// The receiver for the builtin/api call.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
__ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -169,6 +167,20 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Jump(lr);
}
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ b(le, stack_overflow); // Signed comparison.
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -188,7 +200,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
// Preserve the incoming parameters on the stack.
- __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r4, RootIndex::kTheHoleValue);
__ SmiTag(r0);
__ Push(cp, r0, r1, r4, r3);
@@ -214,7 +226,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver);
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r0, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- r0: receiver
@@ -253,6 +265,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to last argument.
__ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
+ __ b(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+
+ __ bind(&enough_stack_space);
+
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ mov(r5, r0);
@@ -303,7 +328,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &use_receiver);
+ __ JumpIfRoot(r0, RootIndex::kUndefinedValue, &use_receiver);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
@@ -325,7 +350,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ bind(&use_receiver);
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
- __ JumpIfRoot(r0, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
@@ -399,7 +424,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, RootIndex::kRealStackLimit);
__ b(lo, &stack_overflow);
// Push receiver.
@@ -466,7 +491,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r4);
// Push hole as receiver since we do not use it for stepping.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r1);
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
@@ -497,21 +522,6 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
- __ b(le, stack_overflow); // Signed comparison.
-}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
@@ -573,7 +583,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
__ mov(r8, Operand(r4));
@@ -878,7 +888,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ sub(r9, sp, Operand(r4));
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(r2, RootIndex::kRealStackLimit);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -887,7 +897,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r9, RootIndex::kUndefinedValue);
__ b(&loop_check, al);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
@@ -907,7 +917,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
// Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -987,7 +997,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ mov(r3, r0); // Argument count is correct.
}
@@ -1201,7 +1211,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ push(r4);
}
for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
}
if (j < 3) {
__ jmp(&args_done);
@@ -1302,15 +1312,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset));
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -1327,11 +1332,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ bind(&skip);
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ LeaveFrame(StackFrame::STUB);
- }
+ __ LeaveFrame(StackFrame::STUB);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1354,14 +1357,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
}
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1375,7 +1370,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r5, RootIndex::kUndefinedValue);
__ mov(r2, r5);
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
__ sub(r4, r0, Operand(1), SetCC);
@@ -1398,8 +1393,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r2, RootIndex::kNullValue, &no_arguments);
+ __ JumpIfRoot(r2, RootIndex::kUndefinedValue, &no_arguments);
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
@@ -1422,7 +1417,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label done;
__ cmp(r0, Operand::Zero());
__ b(ne, &done);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ add(r0, r0, Operand(1));
__ bind(&done);
}
@@ -1471,7 +1466,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r5, r1);
__ mov(r2, r1);
__ sub(r4, r0, Operand(1), SetCC);
@@ -1513,7 +1508,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r2, r1);
__ str(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
__ sub(r4, r0, Operand(1), SetCC);
@@ -1600,26 +1595,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&ok);
}
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // The stack might already be overflowed here which will cause 'scratch' to
- // become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(r4, LSL, kPointerSizeLog2));
- __ b(gt, &done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, r4, scratch, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ mov(r6, Operand(0));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r5, RootIndex::kTheHoleValue);
Label done, loop;
__ bind(&loop);
__ cmp(r6, r4);
@@ -1627,7 +1609,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(scratch, r5);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue, eq);
__ Push(scratch);
__ add(r6, r6, Operand(1));
__ b(&loop);
@@ -1637,6 +1619,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
@@ -1771,9 +1756,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ b(hs, &done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy);
- __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ JumpIfRoot(r3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(r3, RootIndex::kNullValue, &convert_to_object);
__ bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
@@ -1859,8 +1843,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
- __ b(gt, &done); // Signed comparison.
+ __ CompareRoot(sp, RootIndex::kRealStackLimit);
+ __ b(hs, &done);
// Restore the stack pointer.
__ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
{
@@ -1987,7 +1971,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Calling convention for function specific ConstructStubs require
// r2 to contain either an AllocationSite or undefined.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r2, RootIndex::kUndefinedValue);
Label call_generic_stub;
@@ -2165,7 +2149,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: new target (passed through to callee)
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
__ sub(r4, r4,
@@ -2331,7 +2315,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Check result for exception sentinel.
Label exception_returned;
- __ CompareRoot(r0, Heap::kExceptionRootIndex);
+ __ CompareRoot(r0, RootIndex::kException);
__ b(eq, &exception_returned);
// Check that there is no pending exception, otherwise we
@@ -2342,7 +2326,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ Move(r3, pending_exception_address);
__ ldr(r3, MemOperand(r3));
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r3, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ b(eq, &okay);
__ stop("Unexpected pending exception");
@@ -2401,9 +2385,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
// Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
- // both configurations. It is safe to always do this, because the underlying
- // register is caller-saved and can be arbitrarily clobbered.
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
@@ -2585,6 +2569,10 @@ namespace {
void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
ElementsKind kind) {
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
+
__ cmp(r0, Operand(1));
__ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 61fee9013b..0d51d9decf 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -55,7 +55,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -129,7 +128,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Claim(slot_count);
// Preserve the incoming parameters on the stack.
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(x10, RootIndex::kTheHoleValue);
// Compute a pointer to the slot immediately above the location on the
// stack to which arguments will be later copied.
@@ -195,6 +194,24 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Label* stack_overflow) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ Sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ B(le, stack_overflow);
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -249,7 +266,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ Bind(&not_create_implicit_receiver);
- __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(x0, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- x0: receiver
@@ -303,6 +320,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// slots for the arguments. If the number of arguments was odd, the last
// argument will overwrite one of the receivers pushed above.
__ Bic(x10, x12, 1);
+
+ // Check if we have enough stack space to push all arguments.
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, x10, &stack_overflow);
+ __ B(&enough_stack_space);
+
+ __ Bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
+
+ __ Bind(&enough_stack_space);
__ Claim(x10);
// Copy the arguments.
@@ -342,7 +372,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ CompareRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(x0, RootIndex::kUndefinedValue);
__ B(eq, &use_receiver);
// Otherwise we do a smi check and fall through to check if the return value
@@ -364,7 +394,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ Bind(&use_receiver);
__ Peek(x0, 0 * kPointerSize);
- __ CompareRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(x0, RootIndex::kTheHoleValue);
__ B(eq, &do_throw);
__ Bind(&leave_frame);
@@ -425,7 +455,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, RootIndex::kRealStackLimit);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -508,7 +538,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push hole as receiver since we do not use it for stepping.
- __ LoadRoot(x5, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(x5, RootIndex::kTheHoleValue);
__ Push(x1, padreg, x4, x5);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(padreg, x1);
@@ -534,24 +564,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
}
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Label* stack_overflow) {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- // Check the stack for overflow.
- // We are not trying to catch interruptions (e.g. debug break and
- // preemption) here, so the "real stack limit" is checked.
- Label enough_stack_space;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ Sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
- __ B(le, stack_overflow);
-}
-
// Input:
// x0: new.target.
// x1: function.
@@ -639,7 +651,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
// The original values have been saved in JSEntryStub::GenerateBody().
- __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(x19, RootIndex::kUndefinedValue);
__ Mov(x20, x19);
__ Mov(x21, x19);
__ Mov(x22, x19);
@@ -957,7 +969,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ Sub(x10, sp, Operand(x11));
- __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(x10, RootIndex::kRealStackLimit);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&ok);
@@ -966,7 +978,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(x10, RootIndex::kUndefinedValue);
__ Lsr(x11, x11, kPointerSizeLog2);
// Round up the number of registers to a multiple of 2, to align the stack
// to 16 bytes.
@@ -988,7 +1000,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -1081,7 +1093,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
Register receiver = x14;
- __ LoadRoot(receiver, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(receiver, RootIndex::kUndefinedValue);
__ SlotAddress(stack_addr, num_args);
__ Str(receiver, MemOperand(stack_addr));
__ Mov(slots_to_copy, num_args);
@@ -1300,7 +1312,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
Register scratch1 = x12;
Register scratch2 = x13;
Register scratch3 = x14;
- __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undef, RootIndex::kUndefinedValue);
Label at_least_one_arg;
Label three_args;
@@ -1452,15 +1464,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1476,11 +1483,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ Bind(&skip);
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ LeaveFrame(StackFrame::STUB);
- }
+ __ LeaveFrame(StackFrame::STUB);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1501,14 +1506,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ Ret();
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1526,8 +1523,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register undefined_value = x3;
Register null_value = x4;
- __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+ __ LoadRoot(null_value, RootIndex::kNullValue);
// 1. Load receiver into x1, argArray into x2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
@@ -1609,7 +1606,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label non_zero;
Register scratch = x10;
__ Cbnz(argc, &non_zero);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
// Overwrite receiver with undefined, which will be the new receiver.
// We do not need to overwrite the padding slot above it with anything.
__ Poke(scratch, 0);
@@ -1666,7 +1663,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register this_argument = x4;
Register undefined_value = x3;
- __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
// 1. Load target into x1 (if present), argumentsList into x2 (if present),
// remove all arguments from the stack (including the receiver), and push
@@ -1743,7 +1740,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register new_target = x3;
Register undefined_value = x4;
- __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
// 1. Load target into x1 (if present), argumentsList into x2 (if present),
// new.target into x3 (if present, otherwise use target), remove all
@@ -1933,21 +1930,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register argc = x0;
Register len = x4;
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
- // Make x10 the space we have left. The stack might already be overflowed
- // here which will cause x10 to become negative.
- __ Sub(x10, sp, x10);
- // Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
- __ B(gt, &done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ Bind(&done);
- }
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, len, &stack_overflow);
// Skip argument setup if we don't need to push any varargs.
Label done;
@@ -1963,8 +1947,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register undefined_value = x12;
Register scratch = x13;
__ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
- __ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(the_hole_value, RootIndex::kTheHoleValue);
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
// We do not use the CompareRoot macro as it would do a LoadRoot behind the
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
@@ -1980,6 +1964,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
@@ -2121,9 +2108,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ B(hs, &done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy);
- __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ JumpIfRoot(x3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(x3, RootIndex::kNullValue, &convert_to_object);
__ Bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
@@ -2211,13 +2197,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
Label done;
- __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(x10, RootIndex::kRealStackLimit);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
- __ B(gt, &done); // Signed comparison.
+ __ B(hs, &done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
}
@@ -2379,7 +2365,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Calling convention for function specific ConstructStubs require
// x2 to contain either an AllocationSite or undefined.
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(x2, RootIndex::kUndefinedValue);
Label call_generic_stub;
@@ -2586,7 +2572,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
__ RecordComment("-- Fill slots with undefined --");
__ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2));
- __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
Label fill;
__ Bind(&fill);
@@ -2856,7 +2842,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Check result for exception sentinel.
Label exception_returned;
- __ CompareRoot(result, Heap::kExceptionRootIndex);
+ __ CompareRoot(result, RootIndex::kException);
__ B(eq, &exception_returned);
// The call succeeded, so unwind the stack and return.
@@ -2922,9 +2908,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Bind(&not_js_frame);
// Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
- // both configurations. It is safe to always do this, because the underlying
- // register is caller-saved and can be arbitrarily clobbered.
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
@@ -3131,6 +3117,9 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
__ Bind(&n_case);
// N arguments.
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
index 8406123b20..6b9ba934a4 100644
--- a/deps/v8/src/builtins/array-copywithin.tq
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -4,7 +4,7 @@
module array {
macro ConvertToRelativeIndex(index: Number, length: Number): Number {
- return index < 0 ? max(index + length, 0) : min(index, length);
+ return index < 0 ? Max(index + length, 0) : Min(index, length);
}
// https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
@@ -17,32 +17,32 @@ module array {
const length: Number = GetLengthProperty(context, object);
// 3. Let relativeTarget be ? ToInteger(target).
- const relative_target: Number = ToInteger_Inline(context, arguments[0]);
+ const relativeTarget: Number = ToInteger_Inline(context, arguments[0]);
// 4. If relativeTarget < 0, let to be max((len + relativeTarget), 0);
// else let to be min(relativeTarget, len).
- let to: Number = ConvertToRelativeIndex(relative_target, length);
+ let to: Number = ConvertToRelativeIndex(relativeTarget, length);
// 5. Let relativeStart be ? ToInteger(start).
- const relative_start: Number = ToInteger_Inline(context, arguments[1]);
+ const relativeStart: Number = ToInteger_Inline(context, arguments[1]);
// 6. If relativeStart < 0, let from be max((len + relativeStart), 0);
// else let from be min(relativeStart, len).
- let from: Number = ConvertToRelativeIndex(relative_start, length);
+ let from: Number = ConvertToRelativeIndex(relativeStart, length);
// 7. If end is undefined, let relativeEnd be len;
// else let relativeEnd be ? ToInteger(end).
- let relative_end: Number = length;
+ let relativeEnd: Number = length;
if (arguments[2] != Undefined) {
- relative_end = ToInteger_Inline(context, arguments[2]);
+ relativeEnd = ToInteger_Inline(context, arguments[2]);
}
// 8. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
// else let final be min(relativeEnd, len).
- const final: Number = ConvertToRelativeIndex(relative_end, length);
+ const final: Number = ConvertToRelativeIndex(relativeEnd, length);
// 9. Let count be min(final-from, len-to).
- let count: Number = min(final - from, length - to);
+ let count: Number = Min(final - from, length - to);
// 10. If from<to and to<from+count, then.
let direction: Number = 1;
@@ -63,15 +63,15 @@ module array {
// a. Let fromKey be ! ToString(from).
// b. Let toKey be ! ToString(to).
// c. Let fromPresent be ? HasProperty(O, fromKey).
- const from_present: Boolean = HasProperty(context, object, from);
+ const fromPresent: Boolean = HasProperty(context, object, from);
// d. If fromPresent is true, then.
- if (from_present == True) {
+ if (fromPresent == True) {
// i. Let fromVal be ? Get(O, fromKey).
- const from_val: Object = GetProperty(context, object, from);
+ const fromVal: Object = GetProperty(context, object, from);
// ii. Perform ? Set(O, toKey, fromVal, true).
- SetProperty(context, object, to, from_val);
+ SetProperty(context, object, to, fromVal);
} else {
// i. Perform ? DeletePropertyOrThrow(O, toKey).
DeleteProperty(context, object, to, kStrict);
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index c0e19c0803..5a189a517f 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -5,20 +5,21 @@
module array {
macro ArrayForEachTorqueContinuation(
context: Context, o: JSReceiver, len: Number, callbackfn: Callable,
- thisArg: Object, initial_k: Smi): Object {
+ thisArg: Object, initialK: Number): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
- for (let k: Smi = initial_k; k < len; k = k + 1) {
+ for (let k: Number = initialK; k < len; k = k + 1) {
// 6a. Let Pk be ! ToString(k).
- const pK: String = ToString_Inline(context, k);
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
// 6b. Let kPresent be ? HasProperty(O, Pk).
- const kPresent: Boolean = HasProperty(context, o, pK);
+ const kPresent: Boolean = HasProperty_Inline(context, o, k);
// 6c. If kPresent is true, then
if (kPresent == True) {
// 6c. i. Let kValue be ? Get(O, Pk).
- const kValue: Object = GetProperty(context, o, pK);
+ const kValue: Object = GetProperty(context, o, k);
// 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
Call(context, callbackfn, thisArg, kValue, k, o);
@@ -32,10 +33,10 @@ module array {
javascript builtin ArrayForEachLoopEagerDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object): Object {
- // The unsafe cast is safe because all continuation points in forEach are
+ // The unsafe Cast is safe because all continuation points in forEach are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
- const jsreceiver: JSReceiver = unsafe_cast<JSReceiver>(receiver);
+ const jsreceiver: JSReceiver = UnsafeCast<JSReceiver>(receiver);
return ArrayForEachLoopContinuation(
context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK,
length, Undefined);
@@ -44,10 +45,10 @@ module array {
javascript builtin ArrayForEachLoopLazyDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object, result: Object): Object {
- // The unsafe cast is safe because all continuation points in forEach are
+ // The unsafe Cast is safe because all continuation points in forEach are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
- const jsreceiver: JSReceiver = unsafe_cast<JSReceiver>(receiver);
+ const jsreceiver: JSReceiver = UnsafeCast<JSReceiver>(receiver);
return ArrayForEachLoopContinuation(
context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK,
length, Undefined);
@@ -59,22 +60,22 @@ module array {
to: Object): Object {
try {
const callbackfn: Callable =
- cast<Callable>(callback) otherwise Unexpected;
- const k: Smi = cast<Smi>(initialK) otherwise Unexpected;
- const number_length: Number = cast<Number>(length) otherwise Unexpected;
+ Cast<Callable>(callback) otherwise Unexpected;
+ const k: Number = Cast<Number>(initialK) otherwise Unexpected;
+ const numberLength: Number = Cast<Number>(length) otherwise Unexpected;
return ArrayForEachTorqueContinuation(
- context, receiver, number_length, callbackfn, thisArg, k);
+ context, receiver, numberLength, callbackfn, thisArg, k);
}
- label Unexpected {
+ label Unexpected deferred {
unreachable;
}
}
- macro VisitAllElements<FixedArrayType : type>(
+ macro VisitAllElements<FixedArrayType: type>(
context: Context, a: JSArray, len: Smi, callbackfn: Callable,
- thisArg: Object): void labels
- Bailout(Smi) {
+ thisArg: Object): void
+ labels Bailout(Smi) {
let k: Smi = 0;
const map: Map = a.map;
@@ -100,19 +101,19 @@ module array {
}
}
}
- label Slow {
+ label Slow deferred {
goto Bailout(k);
}
}
macro FastArrayForEach(
context: Context, o: JSReceiver, len: Number, callbackfn: Callable,
- thisArg: Object): Object labels
- Bailout(Smi) {
+ thisArg: Object): Object
+ labels Bailout(Smi) {
let k: Smi = 0;
try {
- const smi_len: Smi = cast<Smi>(len) otherwise Slow;
- const a: JSArray = cast<JSArray>(o) otherwise Slow;
+ const smiLen: Smi = Cast<Smi>(len) otherwise Slow;
+ const a: JSArray = Cast<JSArray>(o) otherwise Slow;
const map: Map = a.map;
if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow;
@@ -121,14 +122,14 @@ module array {
if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
VisitAllElements<FixedDoubleArray>(
- context, a, smi_len, callbackfn, thisArg)
- otherwise Bailout;
+ context, a, smiLen, callbackfn, thisArg)
+ otherwise Bailout;
} else {
- VisitAllElements<FixedArray>(context, a, smi_len, callbackfn, thisArg)
- otherwise Bailout;
+ VisitAllElements<FixedArray>(context, a, smiLen, callbackfn, thisArg)
+ otherwise Bailout;
}
}
- label Slow {
+ label Slow deferred {
goto Bailout(k);
}
return Undefined;
@@ -153,28 +154,28 @@ module array {
goto TypeError;
}
const callbackfn: Callable =
- cast<Callable>(arguments[0]) otherwise TypeError;
+ Cast<Callable>(arguments[0]) otherwise TypeError;
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
// Special cases.
- let k: Smi = 0;
+ let k: Number = 0;
try {
return FastArrayForEach(context, o, len, callbackfn, thisArg)
- otherwise Bailout;
+ otherwise Bailout;
}
- label Bailout(k_value: Smi) {
- k = k_value;
+ label Bailout(kValue: Smi) deferred {
+ k = kValue;
}
return ArrayForEachTorqueContinuation(
context, o, len, callbackfn, thisArg, k);
}
- label TypeError {
+ label TypeError deferred {
ThrowTypeError(context, kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError {
+ label NullOrUndefinedError deferred {
ThrowTypeError(
context, kCalledOnNullOrUndefined, 'Array.prototype.forEach');
}
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
new file mode 100644
index 0000000000..056220092e
--- /dev/null
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -0,0 +1,159 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module array {
+ macro LoadWithHoleCheck<Elements: type>(
+ elements: FixedArrayBase, index: Smi): Object
+ labels IfHole;
+
+ LoadWithHoleCheck<FixedArray>(elements: FixedArrayBase, index: Smi): Object
+ labels IfHole {
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ const element: Object = elements[index];
+ if (element == Hole) goto IfHole;
+ return element;
+ }
+
+ LoadWithHoleCheck<FixedDoubleArray>(elements: FixedArrayBase, index: Smi):
+ Object
+ labels IfHole {
+ const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
+ const element: float64 = LoadDoubleWithHoleCheck(elements, index)
+ otherwise IfHole;
+ return AllocateHeapNumberWithValue(element);
+ }
+
+ macro FastArrayLastIndexOf<Elements: type>(
+ context: Context, array: JSArray, from: Smi, searchElement: Object): Smi {
+ const elements: FixedArrayBase = array.elements;
+ let k: Smi = from;
+
+ // Bug(898785): Due to side-effects in the evaluation of `fromIndex`
+ // the {from} can be out-of-bounds here, so we need to clamp {k} to
+ // the {elements} length. We might be reading holes / hole NaNs still
+ // due to that, but those will be ignored below.
+ if (k >= elements.length) {
+ k = elements.length - 1;
+ }
+
+ while (k >= 0) {
+ try {
+ const element: Object = LoadWithHoleCheck<Elements>(elements, k)
+ otherwise Hole;
+
+ const same: Boolean = StrictEqual(searchElement, element);
+ if (same == True) {
+ assert(IsFastJSArray(array, context));
+ return k;
+ }
+ }
+ label Hole {} // Do nothing for holes.
+
+ --k;
+ }
+
+ assert(IsFastJSArray(array, context));
+ return -1;
+ }
+
+ macro GetFromIndex(
+ context: Context, length: Number,
+ arguments: constexpr Arguments): Number {
+ // 4. If fromIndex is present, let n be ? ToInteger(fromIndex);
+ // else let n be len - 1.
+ const n: Number = arguments.length < 2 ?
+ length - 1 :
+ ToInteger_Inline(context, arguments[1], kTruncateMinusZero);
+
+ // 5. If n >= 0, then.
+ let k: Number = SmiConstant(0);
+ if (n >= 0) {
+ // a. If n is -0, let k be +0; else let k be min(n, len - 1).
+ // If n was -0 it got truncated to 0.0, so taking the minimum is fine.
+ k = Min(n, length - 1);
+ } else {
+ // a. Let k be len + n.
+ k = length + n;
+ }
+ return k;
+ }
+
+ macro TryFastArrayLastIndexOf(
+ context: Context, receiver: JSReceiver, searchElement: Object,
+ from: Number): Object
+ labels Slow {
+ EnsureFastJSArray(context, receiver) otherwise Slow;
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+
+ const length: Smi = array.length_fast;
+ if (length == 0) return SmiConstant(-1);
+
+ const fromSmi: Smi = Cast<Smi>(from) otherwise Slow;
+ const kind: ElementsKind = array.map.elements_kind;
+ if (IsFastSmiOrTaggedElementsKind(kind)) {
+ return FastArrayLastIndexOf<FixedArray>(
+ context, array, fromSmi, searchElement);
+ }
+ assert(IsDoubleElementsKind(kind));
+ return FastArrayLastIndexOf<FixedDoubleArray>(
+ context, array, fromSmi, searchElement);
+ }
+
+ macro GenericArrayLastIndexOf(
+ context: Context, object: JSReceiver, searchElement: Object,
+ from: Number): Object {
+ let k: Number = from;
+
+ // 7. Repeat, while k >= 0.
+ while (k >= 0) {
+ // a. Let kPresent be ? HasProperty(O, ! ToString(k)).
+ const kPresent: Boolean = HasProperty(context, object, k);
+
+ // b. If kPresent is true, then.
+ if (kPresent == True) {
+ // i. Let elementK be ? Get(O, ! ToString(k)).
+ const element: Object = GetProperty(context, object, k);
+
+ // ii. Let same be the result of performing Strict Equality Comparison
+ // searchElement === elementK.
+ const same: Boolean = StrictEqual(searchElement, element);
+
+ // iii. If same is true, return k.
+ if (same == True) return k;
+ }
+
+ // c. Decrease k by 1.
+ --k;
+ }
+
+ // 8. Return -1.
+ return SmiConstant(-1);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf
+ javascript builtin ArrayPrototypeLastIndexOf(
+ context: Context, receiver: Object, ...arguments): Object {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(context, object);
+
+ // 3. If len is 0, return -1.
+ if (length == SmiConstant(0)) return SmiConstant(-1);
+
+ // Step 4 - 6.
+ const from: Number = GetFromIndex(context, length, arguments);
+
+ const searchElement: Object = arguments[0];
+
+ try {
+ return TryFastArrayLastIndexOf(context, object, searchElement, from)
+ otherwise Baseline;
+ }
+ label Baseline {
+ return GenericArrayLastIndexOf(context, object, searchElement, from);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index 8db542ddef..327ef12402 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -3,25 +3,25 @@
// found in the LICENSE file.
module array {
- macro LoadElement<ElementsAccessor : type, T : type>(
+ macro LoadElement<ElementsAccessor: type, T: type>(
elements: FixedArrayBase, index: Smi): T;
LoadElement<FastPackedSmiElements, Smi>(
elements: FixedArrayBase, index: Smi): Smi {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
- return unsafe_cast<Smi>(elems[index]);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
+ return UnsafeCast<Smi>(elems[index]);
}
LoadElement<FastPackedObjectElements, Object>(
elements: FixedArrayBase, index: Smi): Object {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
return elems[index];
}
LoadElement<FastPackedDoubleElements, float64>(
elements: FixedArrayBase, index: Smi): float64 {
try {
- const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
+ const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
return LoadDoubleWithHoleCheck(elems, index) otherwise Hole;
}
label Hole {
@@ -31,24 +31,24 @@ module array {
}
}
- macro StoreElement<ElementsAccessor : type, T : type>(
+ macro StoreElement<ElementsAccessor: type, T: type>(
elements: FixedArrayBase, index: Smi, value: T);
StoreElement<FastPackedSmiElements, Smi>(
elements: FixedArrayBase, index: Smi, value: Smi) {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
}
StoreElement<FastPackedObjectElements, Object>(
elements: FixedArrayBase, index: Smi, value: Object) {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
elems[index] = value;
}
StoreElement<FastPackedDoubleElements, float64>(
elements: FixedArrayBase, index: Smi, value: float64) {
- const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
+ const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
assert(value == Float64SilenceNaN(value));
StoreFixedDoubleArrayElementWithSmiIndex(elems, index, value);
@@ -57,16 +57,16 @@ module array {
// Fast-path for all PACKED_* elements kinds. These do not need to check
// whether a property is present, so we can simply swap them using fast
// FixedArray loads/stores.
- macro FastPackedArrayReverse<Accessor : type, T : type>(
+ macro FastPackedArrayReverse<Accessor: type, T: type>(
elements: FixedArrayBase, length: Smi) {
let lower: Smi = 0;
let upper: Smi = length - 1;
while (lower < upper) {
- const lower_value: T = LoadElement<Accessor, T>(elements, lower);
- const upper_value: T = LoadElement<Accessor, T>(elements, upper);
- StoreElement<Accessor, T>(elements, lower, upper_value);
- StoreElement<Accessor, T>(elements, upper, lower_value);
+ const lowerValue: T = LoadElement<Accessor, T>(elements, lower);
+ const upperValue: T = LoadElement<Accessor, T>(elements, upper);
+ StoreElement<Accessor, T>(elements, lower, upperValue);
+ StoreElement<Accessor, T>(elements, upper, lowerValue);
++lower;
--upper;
}
@@ -90,48 +90,48 @@ module array {
let upper: Number = length - 1;
while (lower < upper) {
- let lower_value: Object = Undefined;
- let upper_value: Object = Undefined;
+ let lowerValue: Object = Undefined;
+ let upperValue: Object = Undefined;
// b. Let upperP be ! ToString(upper).
// c. Let lowerP be ! ToString(lower).
// d. Let lowerExists be ? HasProperty(O, lowerP).
- const lower_exists: Boolean = HasProperty(context, object, lower);
+ const lowerExists: Boolean = HasProperty(context, object, lower);
// e. If lowerExists is true, then.
- if (lower_exists == True) {
+ if (lowerExists == True) {
// i. Let lowerValue be ? Get(O, lowerP).
- lower_value = GetProperty(context, object, lower);
+ lowerValue = GetProperty(context, object, lower);
}
// f. Let upperExists be ? HasProperty(O, upperP).
- const upper_exists: Boolean = HasProperty(context, object, upper);
+ const upperExists: Boolean = HasProperty(context, object, upper);
// g. If upperExists is true, then.
- if (upper_exists == True) {
+ if (upperExists == True) {
// i. Let upperValue be ? Get(O, upperP).
- upper_value = GetProperty(context, object, upper);
+ upperValue = GetProperty(context, object, upper);
}
// h. If lowerExists is true and upperExists is true, then
- if (lower_exists == True && upper_exists == True) {
+ if (lowerExists == True && upperExists == True) {
// i. Perform ? Set(O, lowerP, upperValue, true).
- SetProperty(context, object, lower, upper_value);
+ SetProperty(context, object, lower, upperValue);
// ii. Perform ? Set(O, upperP, lowerValue, true).
- SetProperty(context, object, upper, lower_value);
- } else if (lower_exists == False && upper_exists == True) {
+ SetProperty(context, object, upper, lowerValue);
+ } else if (lowerExists == False && upperExists == True) {
// i. Perform ? Set(O, lowerP, upperValue, true).
- SetProperty(context, object, lower, upper_value);
+ SetProperty(context, object, lower, upperValue);
// ii. Perform ? DeletePropertyOrThrow(O, upperP).
DeleteProperty(context, object, upper, kStrict);
- } else if (lower_exists == True && upper_exists == False) {
+ } else if (lowerExists == True && upperExists == False) {
// i. Perform ? DeletePropertyOrThrow(O, lowerP).
DeleteProperty(context, object, lower, kStrict);
// ii. Perform ? Set(O, upperP, lowerValue, true).
- SetProperty(context, object, upper, lower_value);
+ SetProperty(context, object, upper, lowerValue);
}
// l. Increase lower by 1.
@@ -143,29 +143,16 @@ module array {
return object;
}
- macro EnsureWriteableFastElements(array: JSArray) {
- const elements: FixedArrayBase = array.elements;
- if (elements.map != kCOWMap) return;
-
- // There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always
- // extract FixedArrays and don't have to worry about FixedDoubleArrays.
- assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
-
- const length: Smi = array.length_fast;
- array.elements = ExtractFixedArray(
- unsafe_cast<FixedArray>(elements), 0, length, length, kFixedArrays);
- }
-
macro TryFastPackedArrayReverse(receiver: Object) labels Slow {
- const array: JSArray = cast<JSArray>(receiver) otherwise Slow;
- EnsureWriteableFastElements(array);
- assert(array.elements.map != kCOWMap);
+ const array: JSArray = Cast<JSArray>(receiver) otherwise Slow;
const kind: ElementsKind = array.map.elements_kind;
if (kind == PACKED_SMI_ELEMENTS) {
+ EnsureWriteableFastElements(array);
FastPackedArrayReverse<FastPackedSmiElements, Smi>(
array.elements, array.length_fast);
} else if (kind == PACKED_ELEMENTS) {
+ EnsureWriteableFastElements(array);
FastPackedArrayReverse<FastPackedObjectElements, Object>(
array.elements, array.length_fast);
} else if (kind == PACKED_DOUBLE_ELEMENTS) {
diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq
new file mode 100644
index 0000000000..16a192d2c0
--- /dev/null
+++ b/deps/v8/src/builtins/array-splice.tq
@@ -0,0 +1,395 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module array {
+ // Given {elements}, we want to create a non-zero length array of type
+ // FixedArrayType. Most of this behavior is outsourced to ExtractFixedArray(),
+ // but the special case of wanting to have a FixedDoubleArray when given a
+ // zero-length input FixedArray is handled here.
+ macro Extract<FixedArrayType: type>(
+ elements: FixedArrayBase, first: Smi, count: Smi,
+ capacity: Smi): FixedArrayType {
+ return UnsafeCast<FixedArrayType>(
+ ExtractFixedArray(elements, first, count, capacity));
+ }
+
+ Extract<FixedDoubleArray>(
+ elements: FixedArrayBase, first: Smi, count: Smi,
+ capacity: Smi): FixedDoubleArray {
+ if (elements == kEmptyFixedArray) {
+ return AllocateZeroedFixedDoubleArray(Convert<intptr>(capacity));
+ }
+ return UnsafeCast<FixedDoubleArray>(
+ ExtractFixedArray(elements, first, count, capacity));
+ }
+
+ macro FastSplice<FixedArrayType: type, ElementType: type>(
+ args: constexpr Arguments, a: JSArray, length: Smi, newLength: Smi,
+ lengthDelta: Smi, actualStart: Smi, insertCount: Smi,
+ actualDeleteCount: Smi): void
+ labels Bailout {
+ const elements: FixedArrayBase = a.elements;
+ const elementsMap: Map = elements.map;
+
+ // If the spliced array is larger then the
+ // source array, then allocate a new FixedArrayType to hold the result.
+ let newElements: FixedArrayBase = elements;
+ if (elementsMap == kCOWMap || lengthDelta > 0) {
+ newElements =
+ Extract<FixedArrayType>(elements, 0, actualStart, newLength);
+ if (elementsMap == kCOWMap) {
+ newElements.map = elementsMap;
+ }
+ a.elements = newElements;
+ }
+
+ // Copy over inserted elements.
+ let k: Smi = actualStart;
+ if (insertCount > 0) {
+ const typedNewElements: FixedArrayType =
+ UnsafeCast<FixedArrayType>(newElements);
+ for (let e: Object of args [2: ]) {
+ // The argument elements were already validated to be an appropriate
+ // {ElementType} to store in {FixedArrayType}.
+ typedNewElements[k++] = UnsafeCast<ElementType>(e);
+ }
+ }
+
+ // Copy over elements after deleted elements.
+ let count: Smi = length - actualStart - actualDeleteCount;
+ while (count > 0) {
+ const typedElements: FixedArrayType =
+ UnsafeCast<FixedArrayType>(elements);
+ const typedNewElements: FixedArrayType =
+ UnsafeCast<FixedArrayType>(newElements);
+ CopyArrayElement(typedElements, typedNewElements, k - lengthDelta, k);
+ k++;
+ count--;
+ }
+
+ // Fill rest of spliced FixedArray with the hole, but only if the
+ // destination FixedArray is the original array's, since otherwise the array
+ // is pre-filled with holes.
+ if (elements == newElements) {
+ const typedNewElements: FixedArrayType =
+ UnsafeCast<FixedArrayType>(newElements);
+ const limit: Smi = elements.length;
+ while (k < limit) {
+ StoreArrayHole(typedNewElements, k);
+ k++;
+ }
+ }
+
+ // Update the array's length after all the FixedArray shuffling is done.
+ a.length = newLength;
+ }
+
+ macro FastArraySplice(
+ context: Context, args: constexpr Arguments, o: JSReceiver,
+ originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
+ actualDeleteCountNumber: Number): Object
+ labels Bailout {
+ const originalLength: Smi =
+ Cast<Smi>(originalLengthNumber) otherwise Bailout;
+ const actualStart: Smi = Cast<Smi>(actualStartNumber) otherwise Bailout;
+ const actualDeleteCount: Smi =
+ Cast<Smi>(actualDeleteCountNumber) otherwise Bailout;
+ const lengthDelta: Smi = insertCount - actualDeleteCount;
+ const newLength: Smi = originalLength + lengthDelta;
+
+ const a: JSArray = Cast<JSArray>(o) otherwise Bailout;
+
+ const map: Map = a.map;
+ if (!IsPrototypeInitialArrayPrototype(context, map)) goto Bailout;
+ if (IsNoElementsProtectorCellInvalid()) goto Bailout;
+ if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
+
+ // Fast path only works on fast elements kind and with writable length.
+ let elementsKind: ElementsKind = EnsureArrayPushable(map) otherwise Bailout;
+ if (!IsFastElementsKind(elementsKind)) goto Bailout;
+
+ const oldElementsKind: ElementsKind = elementsKind;
+ for (let e: Object of args [2: ]) {
+ if (IsFastSmiElementsKind(elementsKind)) {
+ if (TaggedIsNotSmi(e)) {
+ const heapObject: HeapObject = UnsafeCast<HeapObject>(e);
+ elementsKind = IsHeapNumber(heapObject) ?
+ AllowDoubleElements(elementsKind) :
+ AllowNonNumberElements(elementsKind);
+ }
+ } else if (IsDoubleElementsKind(elementsKind)) {
+ if (!IsNumber(e)) {
+ elementsKind = AllowNonNumberElements(elementsKind);
+ }
+ }
+ }
+
+ if (elementsKind != oldElementsKind) {
+ const smiElementsKind: Smi = Convert<Smi>(Convert<int32>(elementsKind));
+ TransitionElementsKindWithKind(context, a, smiElementsKind);
+ }
+
+ // Make sure that the length hasn't been changed by side-effect.
+ const length: Smi = Cast<Smi>(a.length) otherwise Bailout;
+ if (originalLength != length) goto Bailout;
+
+ const deletedResult: JSArray =
+ ExtractFastJSArray(context, a, actualStart, actualDeleteCount);
+
+ if (newLength == 0) {
+ a.elements = kEmptyFixedArray;
+ a.length = 0;
+ return deletedResult;
+ }
+
+ if (IsFastSmiOrTaggedElementsKind(elementsKind)) {
+ FastSplice<FixedArray, Object>(
+ args, a, length, newLength, lengthDelta, actualStart, insertCount,
+ actualDeleteCount) otherwise Bailout;
+ } else {
+ FastSplice<FixedDoubleArray, Number>(
+ args, a, length, newLength, lengthDelta, actualStart, insertCount,
+ actualDeleteCount) otherwise Bailout;
+ }
+
+ return deletedResult;
+ }
+
+ macro FillDeletedElementsArray(
+ context: Context, o: JSReceiver, actualStart: Number,
+ actualDeleteCount: Number, a: JSReceiver): Object {
+ // 10. Let k be 0.
+ let k: Number = 0;
+
+ // 11. Repeat, while k < actualDeleteCount
+ while (k < actualDeleteCount) {
+ // a. Let from be ! ToString(actualStart + k).
+ const from: Number = actualStart + k;
+
+ // b. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(context, o, from);
+
+ // c. If fromPresent is true, then
+ if (fromPresent == True) {
+ // i. Let fromValue be ? Get(O, from).
+ const fromValue: Object = GetProperty(context, o, from);
+
+ // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue).
+ CreateDataProperty(context, a, k, fromValue);
+ }
+
+ // d. Increment k by 1.
+ k++;
+ }
+ // 12. Perform ? Set(A, "length", actualDeleteCount, true).
+ SetProperty(context, a, kLengthString, actualDeleteCount);
+ return a;
+ }
+
+ // HandleForwardCase implements step 15. "If itemCount < actualDeleteCount,
+ // then...""
+ macro HandleForwardCase(
+ context: Context, o: JSReceiver, len: Number, itemCount: Number,
+ actualStart: Number, actualDeleteCount: Number): void {
+ // 15. If itemCount < actualDeleteCount, then
+ // a. Let k be actualStart.
+ let k: Number = actualStart;
+
+ // b. Repeat, while k < (len - actualDeleteCount)
+ while (k < (len - actualDeleteCount)) {
+ // i. Let from be ! ToString(k + actualDeleteCount).
+ const from: Number = k + actualDeleteCount;
+ // ii. Let to be ! ToString(k + itemCount).
+ const to: Number = k + itemCount;
+
+ // iii. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(context, o, from);
+
+ // iv. If fromPresent is true, then
+ if (fromPresent == True) {
+ // 1. Let fromValue be ? Get(O, from).
+ const fromValue: Object = GetProperty(context, o, from);
+
+ // 2. Perform ? Set(O, to, fromValue, true).
+ SetProperty(context, o, to, fromValue);
+
+ // v. Else fromPresent is false,
+ } else {
+ // 1. Perform ? DeletePropertyOrThrow(O, to).
+ DeleteProperty(context, o, to, kStrict);
+ }
+ // vi. Increase k by 1.
+ k++;
+ }
+
+ // c. Let k be len.
+ k = len;
+
+ // d. Repeat, while k > (len - actualDeleteCount + itemCount)
+ while (k > (len - actualDeleteCount + itemCount)) {
+ // i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)).
+ DeleteProperty(context, o, k - 1, kStrict);
+ // ii. Decrease k by 1.
+ k--;
+ }
+ }
+
+ // HandleBackwardCase implements step 16. "Else if itemCount >
+ // actualDeleteCount, then..."
+ macro HandleBackwardCase(
+ context: Context, o: JSReceiver, len: Number, itemCount: Number,
+ actualStart: Number, actualDeleteCount: Number): void {
+ // 16. Else if itemCount > actualDeleteCount, then
+ // a. Let k be (len - actualDeleteCount).
+ let k: Number = len - actualDeleteCount;
+
+ // b. Repeat, while k > actualStart
+ while (k > actualStart) {
+ // i. Let from be ! ToString(k + actualDeleteCount - 1).
+ const from: Number = k + actualDeleteCount - 1;
+
+ // ii. Let to be ! ToString(k + itemCount - 1).
+ const to: Number = k + itemCount - 1;
+
+ // iii. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(context, o, from);
+
+ // iv. If fromPresent is true, then
+ if (fromPresent == True) {
+ // 1. Let fromValue be ? Get(O, from).
+ const fromValue: Object = GetProperty(context, o, from);
+
+ // 2. Perform ? Set(O, to, fromValue, true).
+ SetProperty(context, o, to, fromValue);
+
+ // v. Else fromPresent is false,
+ } else {
+ // 1. Perform ? DeletePropertyOrThrow(O, to).
+ DeleteProperty(context, o, to, kStrict);
+ }
+
+ // vi. Decrease k by 1.
+ k--;
+ }
+ }
+
+ macro SlowSplice(
+ context: Context, arguments: constexpr Arguments, o: JSReceiver,
+ len: Number, actualStart: Number, insertCount: Smi,
+ actualDeleteCount: Number): Object {
+ const affected: Number = len - actualStart - actualDeleteCount;
+
+ // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount).
+ const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount);
+ const itemCount: Number = insertCount;
+
+ // Steps 9 through 12: creating the array of deleted elements.
+ FillDeletedElementsArray(context, o, actualStart, actualDeleteCount, a);
+
+ // 13. Let items be a List whose elements are, in left-to-right order,
+ // the portion of the actual argument list starting with the third
+ // argument. The list is empty if fewer than three arguments were
+ // passed.
+ // 14. Let itemCount be the Number of elements in items.
+ // (done above).
+
+ // 15. If itemCount < actualDeleteCount, then
+ if (itemCount < actualDeleteCount) {
+ HandleForwardCase(
+ context, o, len, itemCount, actualStart, actualDeleteCount);
+ // 16. Else if itemCount > actualDeleteCount, then
+ } else if (itemCount > actualDeleteCount) {
+ HandleBackwardCase(
+ context, o, len, itemCount, actualStart, actualDeleteCount);
+ }
+
+ // 17. Let k be actualStart.
+ let k: Number = actualStart;
+
+ // 18. Repeat, while items is not empty
+ // a. Remove the first element from items and let E be the value of that
+ // element.
+ if (arguments.length > 2) {
+ for (let e: Object of arguments [2: ]) {
+ // b. Perform ? Set(O, ! ToString(k), E, true).
+ SetProperty(context, o, k, e);
+
+ // c. Increase k by 1.
+ k = k + 1;
+ }
+ }
+
+ // 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
+ // true).
+ SetProperty(context, o, kLengthString, len - actualDeleteCount + itemCount);
+
+ return a;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.splice
+ javascript builtin ArraySplice(
+ context: Context, receiver: Object, ...arguments): Object {
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(context, o);
+
+ // 3. Let relativeStart be ? ToInteger(start).
+ const start: Object = arguments[0];
+ const relativeStart: Number = ToInteger_Inline(context, start);
+
+ // 4. If relativeStart < 0, let actualStart be max((len + relativeStart),
+ // 0);
+ // else let actualStart be min(relativeStart, len).
+ const actualStart: Number = relativeStart < 0 ?
+ Max((len + relativeStart), 0) :
+ Min(relativeStart, len);
+
+ let insertCount: Smi;
+ let actualDeleteCount: Number;
+ // 5. If the Number of actual arguments is 0, then
+ if (arguments.length == 0) {
+ // a. Let insertCount be 0.
+ insertCount = 0;
+ // b. Let actualDeleteCount be 0.
+ actualDeleteCount = 0;
+ // 6. Else if the Number of actual arguments is 1, then
+ } else if (arguments.length == 1) {
+ // a. Let insertCount be 0.
+ insertCount = 0;
+ // b. Let actualDeleteCount be len - actualStart.
+ actualDeleteCount = len - actualStart;
+ // 7. Else,
+ } else {
+ // a. Let insertCount be the Number of actual arguments minus 2.
+ insertCount = Convert<Smi>(arguments.length) - 2;
+ // b. Let dc be ? ToInteger(deleteCount).
+ const deleteCount: Object = arguments[1];
+ const dc: Number = ToInteger_Inline(context, deleteCount);
+ // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart).
+ actualDeleteCount = Min(Max(dc, 0), len - actualStart);
+ }
+
+ // 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a
+ // Bailout exception.
+ const newLength: Number = len + insertCount - actualDeleteCount;
+ if (newLength > kMaxSafeInteger) {
+ ThrowTypeError(context, kInvalidArrayLength, start);
+ }
+
+ try {
+ return FastArraySplice(
+ context, arguments, o, len, actualStart, insertCount,
+ actualDeleteCount) otherwise Bailout;
+ }
+ label Bailout {}
+
+ // If the fast case fails, just continue with the slow, correct,
+ // spec-compliant case.
+ return SlowSplice(
+ context, arguments, o, len, actualStart, insertCount,
+ actualDeleteCount);
+ }
+}
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
new file mode 100644
index 0000000000..3595b139a4
--- /dev/null
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -0,0 +1,107 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module array {
+ extern builtin ArrayUnshift(Context, JSFunction, Object, int32);
+
+ macro TryFastArrayUnshift(
+ context: Context, receiver: Object, arguments: constexpr Arguments):
+ never
+ labels Slow {
+ EnsureFastJSArray(context, receiver) otherwise Slow;
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ EnsureWriteableFastElements(array);
+
+ const map: Map = array.map;
+ if (!IsExtensibleMap(map)) goto Slow;
+ EnsureArrayLengthWritable(map) otherwise Slow;
+
+ tail ArrayUnshift(
+ context, LoadTargetFromFrame(), Undefined,
+ Convert<int32>(arguments.length));
+ }
+
+ macro GenericArrayUnshift(
+ context: Context, receiver: Object,
+ arguments: constexpr Arguments): Number {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(context, object);
+
+ // 3. Let argCount be the number of actual arguments.
+ const argCount: Smi = Convert<Smi>(arguments.length);
+
+ // 4. If argCount > 0, then.
+ if (argCount > 0) {
+ // a. If len + argCount > 2**53 - 1, throw a TypeError exception.
+ if (length + argCount > kMaxSafeInteger) {
+ ThrowTypeError(context, kInvalidArrayLength);
+ }
+
+ // b. Let k be len.
+ let k: Number = length;
+
+ // c. Repeat, while k > 0.
+ while (k > 0) {
+ // i. Let from be ! ToString(k - 1).
+ const from: Number = k - 1;
+
+ // ii. Let to be ! ToString(k + argCount - 1).
+ const to: Number = k + argCount - 1;
+
+ // iii. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(context, object, from);
+
+ // iv. If fromPresent is true, then
+ if (fromPresent == True) {
+ // 1. Let fromValue be ? Get(O, from).
+ const fromValue: Object = GetProperty(context, object, from);
+
+ // 2. Perform ? Set(O, to, fromValue, true).
+ SetProperty(context, object, to, fromValue);
+ } else {
+ // 1. Perform ? DeletePropertyOrThrow(O, to).
+ DeleteProperty(context, object, to, kStrict);
+ }
+
+ // vi. Decrease k by 1.
+ --k;
+ }
+
+ // d. Let j be 0.
+ let j: Smi = 0;
+
+ // e. Let items be a List whose elements are, in left to right order,
+ // the arguments that were passed to this function invocation.
+ // f. Repeat, while items is not empty
+ while (j < argCount) {
+ // ii .Perform ? Set(O, ! ToString(j), E, true).
+ SetProperty(context, object, j, arguments[Convert<intptr>(j)]);
+
+ // iii. Increase j by 1.
+ ++j;
+ }
+ }
+
+ // 5. Perform ? Set(O, "length", len + argCount, true).
+ const newLength: Number = length + argCount;
+ SetProperty(context, object, kLengthString, newLength);
+
+ // 6. Return length + argCount.
+ return newLength;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.unshift
+ javascript builtin ArrayPrototypeUnshift(
+ context: Context, receiver: Object, ...arguments): Object {
+ try {
+ TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline;
+ }
+ label Baseline {
+ return GenericArrayUnshift(context, receiver, arguments);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index 590947dd44..df4387878d 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -14,303 +14,56 @@ module array {
type FastDoubleElements;
type DictionaryElements;
- macro GetLengthProperty(context: Context, o: Object): Number {
- if (BranchIfFastJSArray(o, context)) {
- let a: JSArray = unsafe_cast<JSArray>(o);
- return a.length_fast;
- } else
- deferred {
- return ToLength_Inline(context, GetProperty(context, o, 'length'));
- }
- }
-
- macro FastArraySplice(
- context: Context, args: constexpr Arguments, o: Object,
- originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
- actualDeleteCountNumber: Number): Object
- labels Bailout {
- let originalLength: Smi = cast<Smi>(originalLengthNumber) otherwise Bailout;
- let actualStart: Smi = cast<Smi>(actualStartNumber) otherwise Bailout;
- let actualDeleteCount: Smi =
- cast<Smi>(actualDeleteCountNumber) otherwise Bailout;
- let lengthDelta: Smi = insertCount - actualDeleteCount;
- let newLength: Smi = originalLength + lengthDelta;
-
- let a: JSArray = cast<JSArray>(o) otherwise Bailout;
-
- let map: Map = a.map;
- if (!IsPrototypeInitialArrayPrototype(context, map)) goto Bailout;
- if (IsNoElementsProtectorCellInvalid()) goto Bailout;
- if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
-
- // Fast path only works on fast elements kind and with writable length.
- let elementsKind: ElementsKind = EnsureArrayPushable(map) otherwise Bailout;
- if (!IsFastElementsKind(elementsKind)) goto Bailout;
-
- // For now, only support non-double fast elements
- if (!IsFastSmiOrTaggedElementsKind(elementsKind)) goto Bailout;
-
- if (IsFastSmiElementsKind(elementsKind)) {
- for (let e: Object of args [2: ]) {
- if (TaggedIsNotSmi(e)) goto Bailout;
- }
- }
-
- // Make sure that the length hasn't been changed by side-effect.
- let length: Smi = cast<Smi>(a.length) otherwise Bailout;
- if (originalLength != length) goto Bailout;
-
- let deletedResult: JSArray =
- ExtractFastJSArray(context, a, actualStart, actualDeleteCount);
-
- if (newLength == 0) {
- a.elements = kEmptyFixedArray;
- a.length = 0;
- return deletedResult;
- }
-
- let elements: FixedArray = cast<FixedArray>(a.elements) otherwise Bailout;
- let elementsMap: Map = elements.map;
-
- // If the source is a COW array or the spliced array is larger then the
- // source array, then allocate a new FixedArray to hold the result.
- let newElements: FixedArray = elements;
- if ((elementsMap == kCOWMap) || (lengthDelta > 0)) {
- newElements = ExtractFixedArray(
- elements, 0, actualStart, newLength, kAllFixedArrays);
- newElements.map = elementsMap;
- a.elements = newElements;
- }
+ macro EnsureWriteableFastElements(array: JSArray) {
+ assert(IsFastElementsKind(array.map.elements_kind));
- // Double check that the array is still in fast elements mode
- assert(IsFastSmiElementsKind(a.map.elements_kind));
+ const elements: FixedArrayBase = array.elements;
+ if (elements.map != kCOWMap) return;
- // Copy over inserted elements.
- let k: Smi = actualStart;
- if (insertCount > 0) {
- for (let e: Object of args [2: ]) {
- newElements[k++] = e;
- }
- }
-
- // Copy over elements after deleted elements.
- let count: Smi = length - actualStart - actualDeleteCount;
- while (count > 0) {
- let e: Object = elements[k - lengthDelta];
- newElements[k++] = e;
- count--;
- }
-
- // Fill rest of spliced FixedArray with the hole, but only if the
- // destination FixedArray is the original array's, since otherwise the array
- // is pre-filled with holes.
- if (elements == newElements) {
- let limit: Smi = elements.length;
- while (k < limit) {
- newElements[k++] = Hole;
- }
- }
-
- // Update the array's length after all the FixedArray shuffling is done.
- a.length = newLength;
+ // There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always
+ // extract FixedArrays and don't have to worry about FixedDoubleArrays.
+ assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
- return deletedResult;
+ const length: Smi = array.length_fast;
+ array.elements =
+ ExtractFixedArray(elements, 0, length, length, kFixedArrays);
+ assert(array.elements.map != kCOWMap);
}
- // https://tc39.github.io/ecma262/#sec-array.prototype.splice
- javascript builtin ArraySpliceTorque(
- context: Context, receiver: Object, ...arguments): Object {
- // 1. Let O be ? ToObject(this value).
- let o: JSReceiver = ToObject(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- let len: Number = GetLengthProperty(context, o);
-
- // 3. Let relativeStart be ? ToInteger(start).
- let start: Object = arguments[0];
- let relativeStart: Number = ToInteger_Inline(context, start);
-
- // 4. If relativeStart < 0, let actualStart be max((len + relativeStart),
- // 0);
- // else let actualStart be min(relativeStart, len).
- let actualStart: Number = relativeStart < 0 ?
- max((len + relativeStart), 0) :
- min(relativeStart, len);
-
- let insertCount: Smi;
- let actualDeleteCount: Number;
- // 5. If the Number of actual arguments is 0, then
- if (arguments.length == 0) {
- // a. Let insertCount be 0.
- insertCount = 0;
- // b. Let actualDeleteCount be 0.
- actualDeleteCount = 0;
- // 6. Else if the Number of actual arguments is 1, then
- } else if (arguments.length == 1) {
- // a. Let insertCount be 0.
- insertCount = 0;
- // b. Let actualDeleteCount be len - actualStart.
- actualDeleteCount = len - actualStart;
- // 7. Else,
- } else {
- // a. Let insertCount be the Number of actual arguments minus 2.
- insertCount = convert<Smi>(arguments.length) - 2;
- // b. Let dc be ? ToInteger(deleteCount).
- let deleteCount: Object = arguments[1];
- let dc: Number = ToInteger_Inline(context, deleteCount);
- // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart).
- actualDeleteCount = min(max(dc, 0), len - actualStart);
- }
-
- // 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a
- // Bailout exception.
- if (len + insertCount - actualDeleteCount > kMaxSafeInteger) {
- ThrowRangeError(context, kInvalidArrayLength);
- }
-
+ macro IsJSArray(o: Object): bool {
try {
- return FastArraySplice(
- context, arguments, o, len, actualStart, insertCount,
- actualDeleteCount) otherwise Bailout;
+ const array: JSArray = Cast<JSArray>(o) otherwise NotArray;
+ return true;
}
- label Bailout {}
- // If the fast case fails, just continue with the slow, correct,
- // spec-compliant case.
-
- // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount).
- let a: Object = ArraySpeciesCreate(context, o, actualDeleteCount);
-
- // 10. Let k be 0.
- let k: Number = 0;
-
- // 11. Repeat, while k < actualDeleteCount
- while (k < actualDeleteCount) {
- // a. Let from be ! ToString(actualStart + k).
- let from: String = ToString_Inline(context, actualStart + k);
-
- // b. Let fromPresent be ? HasProperty(O, from).
- let fromPresent: Oddball = HasProperty(context, o, from);
-
- // c. If fromPresent is true, then
- if (fromPresent == True) {
- // i. Let fromValue be ? Get(O, from).
- let fromValue: Object = GetProperty(context, o, from);
-
- // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue).
- CreateDataProperty(context, a, ToString_Inline(context, k), fromValue);
- }
-
- // d. Increment k by 1.
- k = k + 1;
+ label NotArray {
+ return false;
}
+ }
- // 12. Perform ? Set(A, "length", actualDeleteCount, true).
- SetProperty(context, a, 'length', actualDeleteCount);
-
- // 13. Let items be a List whose elements are, in left-to-right order,
- // the portion of the actual argument list starting with the third
- // argument. The list is empty if fewer than three arguments were
- // passed.
- // 14. Let itemCount be the Number of elements in items.
- let itemCount: Number = insertCount;
-
- // 15. If itemCount < actualDeleteCount, then
- if (itemCount < actualDeleteCount) {
- // a. Let k be actualStart.
- let k: Number = actualStart;
-
- // b. Repeat, while k < (len - actualDeleteCount)
- while (k < (len - actualDeleteCount)) {
- // i. Let from be ! ToString(k + actualDeleteCount).
- let from: String = ToString_Inline(context, k + actualDeleteCount);
- // ii. Let to be ! ToString(k + itemCount).
- let to: String = ToString_Inline(context, k + itemCount);
-
- // iii. Let fromPresent be ? HasProperty(O, from).
- let fromPresent: Oddball = HasProperty(context, o, from);
-
- // iv. If fromPresent is true, then
- if (fromPresent == True) {
- // 1. Let fromValue be ? Get(O, from).
- let fromValue: Object = GetProperty(context, o, from);
-
- // 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue);
-
- // v. Else fromPresent is false,
- } else {
- // 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(context, o, to, kStrict);
- }
- // vi. Increase k by 1.
- k = k + 1;
- }
-
- // c. Let k be len.
- k = len;
- // d. Repeat, while k > (len - actualDeleteCount + itemCount)
- while (k > (len - actualDeleteCount + itemCount)) {
- // i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)).
- DeleteProperty(context, o, ToString_Inline(context, k - 1), kStrict);
-
- // ii. Decrease k by 1.
- k = k - 1;
- }
- // 16. Else if itemCount > actualDeleteCount, then
- } else if (itemCount > actualDeleteCount) {
- // a. Let k be (len - actualDeleteCount).
- let k: Number = len - actualDeleteCount;
-
- // b. Repeat, while k > actualStart
- while (k > actualStart) {
- // i. Let from be ! ToString(k + actualDeleteCount - 1).
- let from: String = ToString_Inline(context, k + actualDeleteCount - 1);
-
- // ii. Let to be ! ToString(k + itemCount - 1).
- let to: String = ToString_Inline(context, k + itemCount - 1);
-
- // iii. Let fromPresent be ? HasProperty(O, from).
- let fromPresent: Oddball = HasProperty(context, o, from);
-
- // iv. If fromPresent is true, then
- if (fromPresent == True) {
- // 1. Let fromValue be ? Get(O, from).
- let fromValue: Object = GetProperty(context, o, from);
+ macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
+ StoreFixedDoubleArrayHoleSmi(elements, k);
+ }
- // 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue);
+ macro StoreArrayHole(elements: FixedArray, k: Smi): void {
+ elements[k] = Hole;
+ }
- // v. Else fromPresent is false,
- } else {
- // 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(context, o, to, kStrict);
- }
+ macro CopyArrayElement(
+ elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void {
+ const e: Object = elements[from];
+ newElements[to] = e;
+ }
- // vi. Decrease k by 1.
- k = k - 1;
- }
+ macro CopyArrayElement(
+ elements: FixedDoubleArray, newElements: FixedDoubleArray, from: Smi,
+ to: Smi): void {
+ try {
+ const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from)
+ otherwise FoundHole;
+ newElements[to] = floatValue;
}
-
- // 17. Let k be actualStart.
- k = actualStart;
-
- // 18. Repeat, while items is not empty
- // a. Remove the first element from items and let E be the value of that
- // element.
- if (arguments.length > 2) {
- for (let e: Object of arguments [2: ]) {
- // b. Perform ? Set(O, ! ToString(k), E, true).
- SetProperty(context, o, ToString_Inline(context, k), e);
-
- // c. Increase k by 1.
- k = k + 1;
- }
+ label FoundHole {
+ StoreArrayHole(newElements, to);
}
-
- // 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
- // true).
- SetProperty(context, o, 'length', len - actualDeleteCount + itemCount);
-
- return a;
}
}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 1b9d577f10..20c4f4b9e0 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -26,6 +26,7 @@ type AbstractCode extends HeapObject generates 'TNode<AbstractCode>';
type Code extends AbstractCode generates 'TNode<Code>';
type JSReceiver extends HeapObject generates 'TNode<JSReceiver>';
type Context extends HeapObject generates 'TNode<Context>';
+type NativeContext extends Context generates 'TNode<Context>';
type String extends HeapObject generates 'TNode<String>';
type Oddball extends HeapObject generates 'TNode<Oddball>';
type HeapNumber extends HeapObject generates 'TNode<HeapNumber>';
@@ -35,34 +36,56 @@ type Numeric = Number|BigInt;
type Boolean extends Oddball generates 'TNode<Oddball>';
type JSProxy extends JSReceiver generates 'TNode<JSProxy>';
type JSObject extends JSReceiver generates 'TNode<JSObject>';
-type JSArray extends JSObject generates 'TNode<JSArray>';
+type JSArgumentsObjectWithLength extends JSObject
+ generates 'TNode<JSArgumentsObjectWithLength>';
+type JSArray extends JSArgumentsObjectWithLength
+ generates 'TNode<JSArray>';
type JSFunction extends JSObject generates 'TNode<JSFunction>';
type JSBoundFunction extends JSObject generates 'TNode<JSBoundFunction>';
type Callable = JSFunction|JSBoundFunction|JSProxy;
type Map extends HeapObject generates 'TNode<Map>';
type FixedArrayBase extends HeapObject generates 'TNode<FixedArrayBase>';
type FixedArray extends FixedArrayBase generates 'TNode<FixedArray>';
-type FixedDoubleArray extends FixedArrayBase generates
-'TNode<FixedDoubleArray>';
-type FixedTypedArrayBase extends FixedArrayBase generates
-'TNode<FixedTypedArrayBase>';
-type FixedTypedArray extends FixedTypedArrayBase generates
-'TNode<FixedTypedArray>';
-type NumberDictionary extends HeapObject generates 'TNode<NumberDictionary>';
+type FixedDoubleArray extends FixedArrayBase
+ generates 'TNode<FixedDoubleArray>';
+type FixedTypedArrayBase extends FixedArrayBase
+ generates 'TNode<FixedTypedArrayBase>';
+type FixedTypedArray extends FixedTypedArrayBase
+ generates 'TNode<FixedTypedArray>';
+type NumberDictionary extends HeapObject
+ generates 'TNode<NumberDictionary>';
+
+type NativeContextSlot generates 'TNode<IntPtrT>' constexpr 'int32_t';
+const FAST_ALIASED_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX';
+const SLOW_ALIASED_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX';
+const STRICT_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::STRICT_ARGUMENTS_MAP_INDEX';
+const SLOPPY_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::SLOPPY_ARGUMENTS_MAP_INDEX';
+extern operator '[]' macro LoadContextElement(
+ NativeContext, NativeContextSlot): Object;
+
+extern operator '[]' macro LoadContextElement(Context, intptr): Object;
+extern operator '[]' macro LoadContextElement(Context, Smi): Object;
type JSArrayBuffer extends JSObject generates 'TNode<JSArrayBuffer>';
-type JSArrayBufferView extends JSObject generates 'TNode<JSArrayBufferView>';
-type JSTypedArray extends JSArrayBufferView generates 'TNode<JSTypedArray>';
+type JSArrayBufferView extends JSObject
+ generates 'TNode<JSArrayBufferView>';
+type JSTypedArray extends JSArrayBufferView
+ generates 'TNode<JSTypedArray>';
type JSDataView extends JSArrayBufferView generates 'TNode<JSDataView>';
type InstanceType generates 'TNode<Int32T>' constexpr 'InstanceType';
type ElementsKind generates 'TNode<Int32T>' constexpr 'ElementsKind';
type LanguageMode generates 'TNode<Smi>' constexpr 'LanguageMode';
-type ExtractFixedArrayFlags generates
-'TNode<Smi>' constexpr 'ExtractFixedArrayFlags';
+type ExtractFixedArrayFlags
+ generates 'TNode<Smi>' constexpr 'ExtractFixedArrayFlags';
type ParameterMode generates 'TNode<Int32T>' constexpr 'ParameterMode';
-type RootListIndex generates 'TNode<Int32T>' constexpr 'Heap::RootListIndex';
-type WriteBarrierMode generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
+type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
+type WriteBarrierMode
+ generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
type MessageTemplate constexpr 'MessageTemplate::Template';
@@ -70,30 +93,38 @@ type ToIntegerTruncationMode constexpr 'ToIntegerTruncationMode';
const NO_ELEMENTS: constexpr ElementsKind generates 'NO_ELEMENTS';
-const PACKED_SMI_ELEMENTS: constexpr ElementsKind generates
-'PACKED_SMI_ELEMENTS';
-const HOLEY_SMI_ELEMENTS: constexpr ElementsKind generates 'HOLEY_SMI_ELEMENTS';
-const PACKED_ELEMENTS: constexpr ElementsKind generates 'PACKED_ELEMENTS';
+const PACKED_SMI_ELEMENTS:
+ constexpr ElementsKind generates 'PACKED_SMI_ELEMENTS';
+const HOLEY_SMI_ELEMENTS:
+ constexpr ElementsKind generates 'HOLEY_SMI_ELEMENTS';
+const PACKED_ELEMENTS:
+ constexpr ElementsKind generates 'PACKED_ELEMENTS';
const HOLEY_ELEMENTS: constexpr ElementsKind generates 'HOLEY_ELEMENTS';
-const PACKED_DOUBLE_ELEMENTS: constexpr ElementsKind generates
-'PACKED_DOUBLE_ELEMENTS';
-const HOLEY_DOUBLE_ELEMENTS: constexpr ElementsKind generates
-'HOLEY_DOUBLE_ELEMENTS';
-const DICTIONARY_ELEMENTS: constexpr ElementsKind generates
-'DICTIONARY_ELEMENTS';
+const PACKED_DOUBLE_ELEMENTS:
+ constexpr ElementsKind generates 'PACKED_DOUBLE_ELEMENTS';
+const HOLEY_DOUBLE_ELEMENTS:
+ constexpr ElementsKind generates 'HOLEY_DOUBLE_ELEMENTS';
+const DICTIONARY_ELEMENTS:
+ constexpr ElementsKind generates 'DICTIONARY_ELEMENTS';
const UINT8_ELEMENTS: constexpr ElementsKind generates 'UINT8_ELEMENTS';
const INT8_ELEMENTS: constexpr ElementsKind generates 'INT8_ELEMENTS';
-const UINT16_ELEMENTS: constexpr ElementsKind generates 'UINT16_ELEMENTS';
+const UINT16_ELEMENTS:
+ constexpr ElementsKind generates 'UINT16_ELEMENTS';
const INT16_ELEMENTS: constexpr ElementsKind generates 'INT16_ELEMENTS';
-const UINT32_ELEMENTS: constexpr ElementsKind generates 'UINT32_ELEMENTS';
+const UINT32_ELEMENTS:
+ constexpr ElementsKind generates 'UINT32_ELEMENTS';
const INT32_ELEMENTS: constexpr ElementsKind generates 'INT32_ELEMENTS';
-const FLOAT32_ELEMENTS: constexpr ElementsKind generates 'FLOAT32_ELEMENTS';
-const FLOAT64_ELEMENTS: constexpr ElementsKind generates 'FLOAT64_ELEMENTS';
-const UINT8_CLAMPED_ELEMENTS: constexpr ElementsKind generates
-'UINT8_CLAMPED_ELEMENTS';
-const BIGUINT64_ELEMENTS: constexpr ElementsKind generates 'BIGUINT64_ELEMENTS';
-const BIGINT64_ELEMENTS: constexpr ElementsKind generates 'BIGINT64_ELEMENTS';
+const FLOAT32_ELEMENTS:
+ constexpr ElementsKind generates 'FLOAT32_ELEMENTS';
+const FLOAT64_ELEMENTS:
+ constexpr ElementsKind generates 'FLOAT64_ELEMENTS';
+const UINT8_CLAMPED_ELEMENTS:
+ constexpr ElementsKind generates 'UINT8_CLAMPED_ELEMENTS';
+const BIGUINT64_ELEMENTS:
+ constexpr ElementsKind generates 'BIGUINT64_ELEMENTS';
+const BIGINT64_ELEMENTS:
+ constexpr ElementsKind generates 'BIGINT64_ELEMENTS';
type FixedUint8Array extends FixedTypedArray;
type FixedInt8Array extends FixedTypedArray;
@@ -107,40 +138,42 @@ type FixedUint8ClampedArray extends FixedTypedArray;
type FixedBigUint64Array extends FixedTypedArray;
type FixedBigInt64Array extends FixedTypedArray;
-const kAllFixedArrays: constexpr ExtractFixedArrayFlags generates
-'ExtractFixedArrayFlag::kAllFixedArrays';
-const kFixedArrays: constexpr ExtractFixedArrayFlags generates
-'ExtractFixedArrayFlag::kFixedArrays';
-
-const kFixedCOWArrayMapRootIndex: constexpr RootListIndex generates
-'Heap::kFixedCOWArrayMapRootIndex';
-const kEmptyFixedArrayRootIndex: constexpr RootListIndex generates
-'Heap::kEmptyFixedArrayRootIndex';
-
-const kInvalidArrayLength: constexpr MessageTemplate generates
-'MessageTemplate::kInvalidArrayLength';
-const kCalledNonCallable: constexpr MessageTemplate generates
-'MessageTemplate::kCalledNonCallable';
-const kCalledOnNullOrUndefined: constexpr MessageTemplate generates
-'MessageTemplate::kCalledOnNullOrUndefined';
+const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags
+ generates 'ExtractFixedArrayFlag::kFixedDoubleArrays';
+const kAllFixedArrays: constexpr ExtractFixedArrayFlags
+ generates 'ExtractFixedArrayFlag::kAllFixedArrays';
+const kFixedArrays: constexpr ExtractFixedArrayFlags
+ generates 'ExtractFixedArrayFlag::kFixedArrays';
+
+const kFixedCOWArrayMapRootIndex:
+ constexpr RootIndex generates 'RootIndex::kFixedCOWArrayMap';
+const kEmptyFixedArrayRootIndex:
+ constexpr RootIndex generates 'RootIndex::kEmptyFixedArray';
+
+const kInvalidArrayLength: constexpr MessageTemplate
+ generates 'MessageTemplate::kInvalidArrayLength';
+const kCalledNonCallable: constexpr MessageTemplate
+ generates 'MessageTemplate::kCalledNonCallable';
+const kCalledOnNullOrUndefined: constexpr MessageTemplate
+ generates 'MessageTemplate::kCalledOnNullOrUndefined';
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
-const kTruncateMinusZero: constexpr ToIntegerTruncationMode generates
-'ToIntegerTruncationMode::kTruncateMinusZero';
-
-const kNotTypedArray: constexpr MessageTemplate generates
-'MessageTemplate::kNotTypedArray';
-const kDetachedOperation: constexpr MessageTemplate generates
-'MessageTemplate::kDetachedOperation';
-const kBadSortComparisonFunction: constexpr MessageTemplate generates
-'MessageTemplate::kBadSortComparisonFunction';
-const kIncompatibleMethodReceiver: constexpr MessageTemplate generates
-'MessageTemplate::kIncompatibleMethodReceiver';
-const kInvalidDataViewAccessorOffset: constexpr MessageTemplate generates
-'MessageTemplate::kInvalidDataViewAccessorOffset';
-const kStrictReadOnlyProperty: constexpr MessageTemplate generates
-'MessageTemplate::kStrictReadOnlyProperty';
+const kTruncateMinusZero: constexpr ToIntegerTruncationMode
+ generates 'ToIntegerTruncationMode::kTruncateMinusZero';
+
+const kNotTypedArray: constexpr MessageTemplate
+ generates 'MessageTemplate::kNotTypedArray';
+const kDetachedOperation: constexpr MessageTemplate
+ generates 'MessageTemplate::kDetachedOperation';
+const kBadSortComparisonFunction: constexpr MessageTemplate
+ generates 'MessageTemplate::kBadSortComparisonFunction';
+const kIncompatibleMethodReceiver: constexpr MessageTemplate
+ generates 'MessageTemplate::kIncompatibleMethodReceiver';
+const kInvalidDataViewAccessorOffset: constexpr MessageTemplate
+ generates 'MessageTemplate::kInvalidDataViewAccessorOffset';
+const kStrictReadOnlyProperty: constexpr MessageTemplate
+ generates 'MessageTemplate::kStrictReadOnlyProperty';
extern macro TheHoleConstant(): Oddball;
extern macro NullConstant(): Oddball;
@@ -149,12 +182,14 @@ extern macro TrueConstant(): Boolean;
extern macro FalseConstant(): Boolean;
extern macro Int32TrueConstant(): bool;
extern macro Int32FalseConstant(): bool;
+extern macro LengthStringConstant(): String;
const Hole: Oddball = TheHoleConstant();
const Null: Oddball = NullConstant();
const Undefined: Oddball = UndefinedConstant();
const True: Boolean = TrueConstant();
const False: Boolean = FalseConstant();
+const kLengthString: String = LengthStringConstant();
const true: constexpr bool generates 'true';
const false: constexpr bool generates 'false';
@@ -163,11 +198,11 @@ const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict';
const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy';
const SMI_PARAMETERS: constexpr ParameterMode generates 'SMI_PARAMETERS';
-const INTPTR_PARAMETERS: constexpr ParameterMode generates 'INTPTR_PARAMETERS';
-
+const INTPTR_PARAMETERS:
+ constexpr ParameterMode generates 'INTPTR_PARAMETERS';
-const SKIP_WRITE_BARRIER: constexpr WriteBarrierMode
- generates 'SKIP_WRITE_BARRIER';
+const SKIP_WRITE_BARRIER:
+ constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER';
extern macro Is64(): constexpr bool;
@@ -175,6 +210,7 @@ extern macro SelectBooleanConstant(bool): Boolean;
extern macro Print(constexpr string);
extern macro Print(constexpr string, Object);
+extern macro Comment(constexpr string);
extern macro Print(Object);
extern macro DebugBreak();
extern macro ToInteger_Inline(Context, Object): Number;
@@ -187,14 +223,18 @@ extern macro GetProperty(Context, Object, Object): Object;
extern builtin SetProperty(Context, Object, Object, Object);
extern builtin DeleteProperty(Context, Object, Object, LanguageMode);
extern builtin HasProperty(Context, JSReceiver, Object): Boolean;
+extern macro HasProperty_Inline(Context, JSReceiver, Object): Boolean;
extern macro ThrowRangeError(Context, constexpr MessageTemplate): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object): never;
extern macro ThrowTypeError(
Context, constexpr MessageTemplate, Object, Object, Object): never;
-extern macro ArraySpeciesCreate(Context, Object, Number): Object;
-extern macro EnsureArrayPushable(Map): ElementsKind labels Bailout;
+extern macro ArraySpeciesCreate(Context, Object, Number): JSReceiver;
+extern macro InternalArrayCreate(Context, Number): JSArray;
+extern macro EnsureArrayPushable(Map): ElementsKind
+ labels Bailout;
+extern macro EnsureArrayLengthWritable(Map) labels Bailout;
extern builtin ToObject(Context, Object): JSReceiver;
extern macro ToObject_Inline(Context, Object): JSReceiver;
@@ -203,17 +243,19 @@ extern macro IsTheHole(Object): bool;
extern macro IsString(HeapObject): bool;
extern builtin ToString(Context, Object): String;
-extern runtime CreateDataProperty(Context, Object, Object, Object);
+extern runtime NormalizeElements(Context, JSObject);
+extern runtime TransitionElementsKindWithKind(Context, JSObject, Smi);
+extern runtime CreateDataProperty(Context, JSReceiver, Object, Object);
-extern macro LoadRoot(constexpr RootListIndex): Object;
-extern macro StoreRoot(constexpr RootListIndex, Object): Object;
-extern macro LoadAndUntagToWord32Root(constexpr RootListIndex): int32;
+extern macro LoadRoot(constexpr RootIndex): Object;
+extern macro StoreRoot(constexpr RootIndex, Object): Object;
+extern macro LoadAndUntagToWord32Root(constexpr RootIndex): int32;
extern runtime StringEqual(Context, String, String): Oddball;
extern builtin StringLessThan(Context, String, String): Boolean;
extern macro StrictEqual(Object, Object): Boolean;
-extern runtime SmiLexicographicCompare(Context, Object, Object): Number;
+extern macro SmiLexicographicCompare(Smi, Smi): Smi;
extern operator '<' macro Int32LessThan(int32, int32): bool;
extern operator '>' macro Int32GreaterThan(int32, int32): bool;
@@ -230,6 +272,10 @@ extern operator '>=' macro SmiGreaterThanOrEqual(Smi, Smi): bool;
extern operator '==' macro ElementsKindEqual(
constexpr ElementsKind, constexpr ElementsKind): constexpr bool;
extern operator '==' macro ElementsKindEqual(ElementsKind, ElementsKind): bool;
+operator '!=' macro ElementsKindNotEqual(
+ k1: ElementsKind, k2: ElementsKind): bool {
+ return !ElementsKindEqual(k1, k2);
+}
extern macro IsFastElementsKind(constexpr ElementsKind): constexpr bool;
extern macro IsDoubleElementsKind(constexpr ElementsKind): constexpr bool;
@@ -243,22 +289,25 @@ extern operator '<' macro IntPtrLessThan(intptr, intptr): bool;
extern operator '>' macro IntPtrGreaterThan(intptr, intptr): bool;
extern operator '<=' macro IntPtrLessThanOrEqual(intptr, intptr): bool;
extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool;
+extern operator '>' macro UintPtrGreaterThan(uintptr, uintptr): bool;
extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool;
extern operator '==' macro Float64Equal(float64, float64): bool;
extern operator '!=' macro Float64NotEqual(float64, float64): bool;
+extern operator '>' macro Float64GreaterThan(float64, float64): bool;
+
+extern operator '==' macro BranchIfNumberEqual(Number, Number): never
+ labels Taken, NotTaken;
+extern operator '<' macro BranchIfNumberLessThan(Number, Number): never
+ labels Taken, NotTaken;
+extern operator '<=' macro BranchIfNumberLessThanOrEqual(Number, Number): never
+ labels Taken, NotTaken;
-extern operator
-'<' macro BranchIfNumberLessThan(Number, Number): never labels Taken, NotTaken;
-extern operator
-'<=' macro BranchIfNumberLessThanOrEqual(Number, Number): never labels Taken,
- NotTaken;
-extern operator
-'>' macro BranchIfNumberGreaterThan(Number, Number): never labels Taken,
- NotTaken;
+extern operator '>' macro BranchIfNumberGreaterThan(Number, Number): never
+ labels Taken, NotTaken;
extern operator '>=' macro BranchIfNumberGreaterThanOrEqual(Number, Number):
- never labels Taken,
- NotTaken;
+ never
+ labels Taken, NotTaken;
extern operator '==' macro WordEqual(Object, Object): bool;
extern operator '!=' macro WordNotEqual(Object, Object): bool;
@@ -271,6 +320,7 @@ extern operator '>>>' macro SmiShr(Smi, constexpr int31): Smi;
extern operator '<<' macro SmiShl(Smi, constexpr int31): Smi;
extern operator '+' macro IntPtrAdd(intptr, intptr): intptr;
+extern operator '+' macro UintPtrAdd(uintptr, uintptr): uintptr;
extern operator '-' macro IntPtrSub(intptr, intptr): intptr;
extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr;
extern operator '<<' macro WordShl(intptr, intptr): intptr;
@@ -296,14 +346,16 @@ extern operator '<<' macro Word32Shl(uint32, uint32): uint32;
extern operator '|' macro Word32Or(int32, int32): int32;
extern operator '|' macro Word32Or(uint32, uint32): uint32;
+extern operator '+' macro Float64Add(float64, float64): float64;
+
extern operator '+' macro NumberAdd(Number, Number): Number;
extern operator '-' macro NumberSub(Number, Number): Number;
extern macro NumberMin(Number, Number): Number;
extern macro NumberMax(Number, Number): Number;
-macro min(x: Number, y: Number): Number {
+macro Min(x: Number, y: Number): Number {
return NumberMin(x, y);
}
-macro max(x: Number, y: Number): Number {
+macro Max(x: Number, y: Number): Number {
return NumberMax(x, y);
}
@@ -316,68 +368,89 @@ extern operator '!' macro IsFalse(Boolean): bool;
extern operator '.map' macro LoadMap(HeapObject): Map;
extern operator '.map=' macro StoreMap(HeapObject, Map);
-extern operator
-'.instanceType' macro LoadInstanceType(HeapObject): InstanceType;
+extern operator '.instanceType' macro LoadInstanceType(HeapObject):
+ InstanceType;
extern operator '.length' macro LoadStringLengthAsWord(String): intptr;
extern operator '.length' macro GetArgumentsLength(constexpr Arguments): intptr;
-extern operator
-'[]' macro GetArgumentValue(constexpr Arguments, intptr): Object;
+extern operator '[]' macro GetArgumentValue(
+ constexpr Arguments, intptr): Object;
-extern operator 'is<Smi>' macro TaggedIsSmi(Object): bool;
-extern operator 'isnt<Smi>' macro TaggedIsNotSmi(Object): bool;
+extern macro TaggedIsSmi(Object): bool;
+extern macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
-extern macro HeapObjectToJSDataView(HeapObject): JSDataView labels CastError;
-extern macro TaggedToHeapObject(Object): HeapObject labels CastError;
-extern macro TaggedToSmi(Object): Smi labels CastError;
-extern macro HeapObjectToJSArray(HeapObject): JSArray labels CastError;
-extern macro HeapObjectToCallable(HeapObject): Callable labels CastError;
-extern macro HeapObjectToFixedArray(HeapObject):
- FixedArray labels CastError;
-extern macro HeapObjectToFixedDoubleArray(HeapObject):
- FixedDoubleArray labels CastError;
-extern macro TaggedToNumber(Object): Number labels CastError;
-
-macro cast_HeapObject<A : type>(o : HeapObject) : A labels CastError;
-cast_HeapObject<HeapObject>(o : HeapObject) : HeapObject labels CastError { return o; }
-cast_HeapObject<FixedArray>(o: HeapObject): FixedArray labels CastError {
+extern macro HeapObjectToJSDataView(HeapObject): JSDataView
+ labels CastError;
+extern macro TaggedToHeapObject(Object): HeapObject
+ labels CastError;
+extern macro TaggedToSmi(Object): Smi
+ labels CastError;
+extern macro HeapObjectToJSArray(HeapObject): JSArray
+ labels CastError;
+extern macro HeapObjectToCallable(HeapObject): Callable
+ labels CastError;
+extern macro HeapObjectToFixedArray(HeapObject): FixedArray
+ labels CastError;
+extern macro HeapObjectToFixedDoubleArray(HeapObject): FixedDoubleArray
+ labels CastError;
+extern macro TaggedToNumber(Object): Number
+ labels CastError;
+
+macro CastHeapObject<A: type>(o: HeapObject): A
+ labels CastError;
+CastHeapObject<HeapObject>(o: HeapObject): HeapObject
+ labels CastError {
+ return o;
+}
+CastHeapObject<FixedArray>(o: HeapObject): FixedArray
+ labels CastError {
return HeapObjectToFixedArray(o) otherwise CastError;
}
-cast_HeapObject<FixedDoubleArray>(o: HeapObject): FixedDoubleArray labels CastError {
+CastHeapObject<FixedDoubleArray>(o: HeapObject): FixedDoubleArray
+ labels CastError {
return HeapObjectToFixedDoubleArray(o) otherwise CastError;
}
-cast_HeapObject<JSDataView>(o: HeapObject): JSDataView labels CastError {
+CastHeapObject<JSDataView>(o: HeapObject): JSDataView
+ labels CastError {
return HeapObjectToJSDataView(o) otherwise CastError;
}
-cast_HeapObject<Callable>(o: HeapObject): Callable labels CastError {
+CastHeapObject<Callable>(o: HeapObject): Callable
+ labels CastError {
return HeapObjectToCallable(o) otherwise CastError;
}
-cast_HeapObject<JSArray>(o: HeapObject): JSArray labels CastError {
+CastHeapObject<JSArray>(o: HeapObject): JSArray
+ labels CastError {
return HeapObjectToJSArray(o) otherwise CastError;
}
-macro cast<A : type>(o: HeapObject): A labels CastError {
- return cast_HeapObject<A>(o) otherwise CastError;
+macro Cast<A: type>(o: HeapObject): A
+ labels CastError {
+ return CastHeapObject<A>(o) otherwise CastError;
}
-// cast_HeapObject allows this default-implementation to be non-recursive.
+// CastHeapObject allows this default-implementation to be non-recursive.
// Otherwise the generated CSA code might run into infinite recursion.
-macro cast<A : type>(o: Object): A labels CastError {
- return cast_HeapObject<A>(
- TaggedToHeapObject(o) otherwise CastError) otherwise CastError;
+macro Cast<A: type>(o: Object): A
+ labels CastError {
+ return CastHeapObject<A>(TaggedToHeapObject(o) otherwise CastError)
+ otherwise CastError;
}
-cast<Smi>(o: Object): Smi labels CastError {
+Cast<Smi>(o: Object): Smi
+ labels CastError {
return TaggedToSmi(o) otherwise CastError;
}
-cast<Number>(o: Object): Number labels CastError {
+Cast<Number>(o: Object): Number
+ labels CastError {
return TaggedToNumber(o) otherwise CastError;
}
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
extern macro ChangeInt32ToTagged(int32): Number;
extern macro ChangeUint32ToTagged(uint32): Number;
+extern macro ChangeUintPtrToFloat64(uintptr): float64;
+extern macro ChangeUintPtrToTagged(uintptr): Number;
extern macro Unsigned(int32): uint32;
extern macro Unsigned(intptr): uintptr;
extern macro Unsigned(RawPtr): uintptr;
@@ -396,6 +469,8 @@ extern macro ChangeNumberToFloat64(Number): float64;
extern macro ChangeFloat64ToUintPtr(float64): uintptr;
extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
+extern macro LoadNativeContext(Context): NativeContext;
+extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
extern macro NumberConstant(constexpr float64): Number;
extern macro NumberConstant(constexpr int32): Number;
@@ -409,137 +484,157 @@ extern macro BoolConstant(constexpr bool): bool;
extern macro StringConstant(constexpr string): String;
extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode;
extern macro Int32Constant(constexpr ElementsKind): ElementsKind;
+extern macro IntPtrConstant(constexpr NativeContextSlot): NativeContextSlot;
-macro from_constexpr<A : type>(o: constexpr int31): A;
-from_constexpr<intptr>(i: constexpr int31): intptr {
+macro FromConstexpr<A: type>(o: constexpr int31): A;
+FromConstexpr<intptr>(i: constexpr int31): intptr {
return IntPtrConstant(i);
}
-from_constexpr<int31>(i: constexpr int31): int31 {
+FromConstexpr<int31>(i: constexpr int31): int31 {
return Int32Constant(i);
}
-from_constexpr<int32>(i: constexpr int31): int32 {
+FromConstexpr<int32>(i: constexpr int31): int32 {
return Int32Constant(i);
}
-from_constexpr<uint32>(i: constexpr int31): uint32 {
+FromConstexpr<uint32>(i: constexpr int31): uint32 {
return Unsigned(Int32Constant(i));
}
-from_constexpr<uintptr>(i: constexpr int31): uintptr {
+FromConstexpr<uintptr>(i: constexpr int31): uintptr {
return ChangeUint32ToWord(i);
}
-from_constexpr<Smi>(i: constexpr int31): Smi {
+FromConstexpr<Smi>(i: constexpr int31): Smi {
return SmiConstant(i);
}
-from_constexpr<Number>(i: constexpr int31): Number {
+FromConstexpr<Number>(i: constexpr int31): Number {
return SmiConstant(i);
}
-from_constexpr<float64>(i: constexpr int31): float64 {
+FromConstexpr<float64>(i: constexpr int31): float64 {
return Float64Constant(i);
}
-macro from_constexpr<A : type>(o: constexpr int32): A;
-from_constexpr<intptr>(i: constexpr int32): intptr {
+macro FromConstexpr<A: type>(o: constexpr int32): A;
+FromConstexpr<intptr>(i: constexpr int32): intptr {
return IntPtrConstant(i);
}
-from_constexpr<int32>(i: constexpr int32): int32 {
+FromConstexpr<int32>(i: constexpr int32): int32 {
return Int32Constant(i);
}
-from_constexpr<Number>(i: constexpr int32): Number {
+FromConstexpr<Number>(i: constexpr int32): Number {
return NumberConstant(i);
}
-macro from_constexpr<A : type>(o: constexpr float64): A;
-from_constexpr<Number>(f: constexpr float64): Number {
+macro FromConstexpr<A: type>(o: constexpr float64): A;
+FromConstexpr<Number>(f: constexpr float64): Number {
return NumberConstant(f);
}
-macro from_constexpr<A : type>(b: constexpr bool): A;
-from_constexpr<bool>(b: constexpr bool): bool {
+macro FromConstexpr<A: type>(b: constexpr bool): A;
+FromConstexpr<bool>(b: constexpr bool): bool {
return BoolConstant(b);
}
-macro from_constexpr<A : type>(l: constexpr LanguageMode): A;
-from_constexpr<LanguageMode>(b: constexpr LanguageMode): LanguageMode {
+macro FromConstexpr<A: type>(l: constexpr LanguageMode): A;
+FromConstexpr<LanguageMode>(b: constexpr LanguageMode): LanguageMode {
return LanguageModeConstant(b);
}
-macro from_constexpr<A : type>(e: constexpr ElementsKind): A;
-from_constexpr<ElementsKind>(e: constexpr ElementsKind): ElementsKind {
+macro FromConstexpr<A: type>(e: constexpr ElementsKind): A;
+FromConstexpr<ElementsKind>(e: constexpr ElementsKind): ElementsKind {
return Int32Constant(e);
}
-macro from_constexpr<A : type>(s: constexpr string): A;
-from_constexpr<String>(s: constexpr string): String {
+macro FromConstexpr<A: type>(s: constexpr string): A;
+FromConstexpr<String>(s: constexpr string): String {
return StringConstant(s);
}
-from_constexpr<Object>(s: constexpr string): Object {
+FromConstexpr<Object>(s: constexpr string): Object {
return StringConstant(s);
}
+macro FromConstexpr<A: type>(e: constexpr NativeContextSlot): A;
+FromConstexpr<NativeContextSlot>(c: constexpr NativeContextSlot):
+ NativeContextSlot {
+ return IntPtrConstant(c);
+}
-macro convert<A : type>(i: constexpr int31): A {
+macro Convert<A: type>(i: constexpr int31): A {
return i;
}
-macro convert<A : type>(i: int32): A;
-convert<Number>(i: int32): Number {
+extern macro ConvertElementsKindToInt(ElementsKind): int32;
+
+macro Convert<A: type>(elementsKind: ElementsKind): A;
+Convert<int32>(elementsKind: ElementsKind): int32 {
+ return ConvertElementsKindToInt(elementsKind);
+}
+
+macro Convert<A: type>(i: int32): A;
+Convert<Number>(i: int32): Number {
return ChangeInt32ToTagged(i);
}
-convert<intptr>(i: int32): intptr {
+Convert<intptr>(i: int32): intptr {
return ChangeInt32ToIntPtr(i);
}
-convert<Smi>(i: int32): Smi {
+Convert<Smi>(i: int32): Smi {
return SmiFromInt32(i);
}
-macro convert<A : type>(ui: uint32): A;
-convert<Number>(ui: uint32): Number {
+macro Convert<A: type>(ui: uint32): A;
+Convert<Number>(ui: uint32): Number {
return ChangeUint32ToTagged(ui);
}
-convert<Smi>(ui: uint32): Smi {
+Convert<Smi>(ui: uint32): Smi {
return SmiFromInt32(Signed(ui));
}
-convert<uintptr>(ui: uint32): uintptr {
+Convert<uintptr>(ui: uint32): uintptr {
return ChangeUint32ToWord(ui);
}
-macro convert<A : type>(i: intptr): A;
-convert<int32>(i: intptr): int32 {
+macro Convert<A: type>(i: intptr): A;
+Convert<int32>(i: intptr): int32 {
return TruncateIntPtrToInt32(i);
}
-convert<Smi>(i: intptr): Smi {
+Convert<Smi>(i: intptr): Smi {
return SmiTag(i);
}
-macro convert<A : type>(ui: uintptr): A;
-convert<uint32>(ui: uintptr): uint32 {
+macro Convert<A: type>(ui: uintptr): A;
+Convert<uint32>(ui: uintptr): uint32 {
return Unsigned(TruncateIntPtrToInt32(Signed(ui)));
}
-macro convert<A : type>(s: Smi): A;
-convert<intptr>(s: Smi): intptr {
+macro Convert<A: type>(s: Smi): A;
+Convert<intptr>(s: Smi): intptr {
return SmiUntag(s);
}
-convert<int32>(s: Smi): int32 {
+Convert<int32>(s: Smi): int32 {
return SmiToInt32(s);
}
-macro convert<A : type>(h: HeapNumber): A;
-convert<float64>(h: HeapNumber): float64 {
+macro Convert<A: type>(h: HeapNumber): A;
+Convert<float64>(h: HeapNumber): float64 {
return LoadHeapNumberValue(h);
}
-macro convert<A : type>(n: Number): A;
-convert<float64>(n: Number): float64 {
+macro Convert<A: type>(n: Number): A;
+Convert<float64>(n: Number): float64 {
return ChangeNumberToFloat64(n);
}
-macro convert<A : type>(f: float32): A;
-convert<float64>(f: float32): float64 {
+macro Convert<A: type>(f: float32): A;
+Convert<float64>(f: float32): float64 {
return ChangeFloat32ToFloat64(f);
}
-macro convert<A : type>(d: float64): A;
-convert<Number>(d: float64): Number {
+macro Convert<A: type>(d: float64): A;
+Convert<Number>(d: float64): Number {
return AllocateHeapNumberWithValue(d);
}
-convert<uintptr>(d: float64): uintptr {
+Convert<float64>(ui: uintptr): float64 {
+ return ChangeUintPtrToFloat64(ui);
+}
+Convert<Number>(ui: uintptr): Number {
+ return ChangeUintPtrToTagged(ui);
+}
+Convert<uintptr>(d: float64): uintptr {
return ChangeFloat64ToUintPtr(d);
}
-macro convert<A : type>(r: RawPtr): A;
-convert<uintptr>(r: RawPtr): uintptr {
+macro Convert<A: type>(r: RawPtr): A;
+Convert<uintptr>(r: RawPtr): uintptr {
return Unsigned(r);
}
-convert<intptr>(r: RawPtr): intptr {
+Convert<intptr>(r: RawPtr): intptr {
return Signed(r);
}
extern macro UnsafeCastNumberToHeapNumber(Number): HeapNumber;
extern macro UnsafeCastObjectToFixedArrayBase(Object): FixedArrayBase;
extern macro UnsafeCastObjectToFixedArray(Object): FixedArray;
+extern macro UnsafeCastObjectToContext(Object): Context;
extern macro UnsafeCastObjectToFixedDoubleArray(Object): FixedDoubleArray;
extern macro UnsafeCastObjectToHeapNumber(Object): HeapNumber;
extern macro UnsafeCastObjectToCallable(Object): Callable;
@@ -553,61 +648,109 @@ extern macro UnsafeCastObjectToJSReceiver(Object): JSReceiver;
extern macro UnsafeCastObjectToJSObject(Object): JSObject;
extern macro UnsafeCastObjectToMap(Object): Map;
-macro unsafe_cast<A : type>(n: Number): A;
-unsafe_cast<HeapNumber>(n: Number): HeapNumber {
+macro UnsafeCast<A: type>(n: Number): A;
+UnsafeCast<HeapNumber>(n: Number): HeapNumber {
return UnsafeCastNumberToHeapNumber(n);
}
-macro unsafe_cast<A : type>(o: Object): A;
-unsafe_cast<FixedArray>(o: Object): FixedArray {
+macro UnsafeCast<A: type>(o: Object): A;
+UnsafeCast<Object>(o: Object): Object {
+ return o;
+}
+UnsafeCast<FixedArray>(o: Object): FixedArray {
return UnsafeCastObjectToFixedArray(o);
}
-unsafe_cast<FixedDoubleArray>(o: Object): FixedDoubleArray {
+UnsafeCast<FixedDoubleArray>(o: Object): FixedDoubleArray {
return UnsafeCastObjectToFixedDoubleArray(o);
}
-unsafe_cast<HeapNumber>(o: Object): HeapNumber {
+UnsafeCast<HeapNumber>(o: Object): HeapNumber {
return UnsafeCastObjectToHeapNumber(o);
}
-unsafe_cast<Callable>(o: Object): Callable {
+UnsafeCast<Callable>(o: Object): Callable {
return UnsafeCastObjectToCallable(o);
}
-unsafe_cast<Smi>(o: Object): Smi {
+UnsafeCast<Smi>(o: Object): Smi {
return UnsafeCastObjectToSmi(o);
}
-unsafe_cast<Number>(o: Object): Number {
+UnsafeCast<Number>(o: Object): Number {
return UnsafeCastObjectToNumber(o);
}
-unsafe_cast<HeapObject>(o: Object): HeapObject {
+UnsafeCast<HeapObject>(o: Object): HeapObject {
return UnsafeCastObjectToHeapObject(o);
}
-unsafe_cast<JSArray>(o: Object): JSArray {
+UnsafeCast<JSArray>(o: Object): JSArray {
return UnsafeCastObjectToJSArray(o);
}
-unsafe_cast<FixedTypedArrayBase>(o: Object): FixedTypedArrayBase {
+UnsafeCast<FixedTypedArrayBase>(o: Object): FixedTypedArrayBase {
return UnsafeCastObjectToFixedTypedArrayBase(o);
}
-unsafe_cast<NumberDictionary>(o: Object): NumberDictionary {
+UnsafeCast<NumberDictionary>(o: Object): NumberDictionary {
return UnsafeCastObjectToNumberDictionary(o);
}
-unsafe_cast<JSReceiver>(o: Object): JSReceiver {
+UnsafeCast<JSReceiver>(o: Object): JSReceiver {
return UnsafeCastObjectToJSReceiver(o);
}
-unsafe_cast<JSObject>(o: Object): JSObject {
+UnsafeCast<JSObject>(o: Object): JSObject {
return UnsafeCastObjectToJSObject(o);
}
-unsafe_cast<Map>(o: Object): Map {
+UnsafeCast<Map>(o: Object): Map {
return UnsafeCastObjectToMap(o);
}
-unsafe_cast<FixedArrayBase>(o: Object): FixedArrayBase {
+UnsafeCast<FixedArrayBase>(o: Object): FixedArrayBase {
return UnsafeCastObjectToFixedArrayBase(o);
}
+UnsafeCast<Context>(o: Object): Context {
+ return UnsafeCastObjectToContext(o);
+}
+
+// RawCasts should *never* be used anywhere in Torque code except for
+// in Torque-based UnsafeCast operators preceeded by an appropriate
+// type check().
+extern macro RawCastObjectToJSArgumentsObjectWithLength(Object):
+ JSArgumentsObjectWithLength;
+
+macro BranchIfJSArgumentsObjectWithLength(context: Context, o: Object): never
+ labels True, False {
+ const heapObject: HeapObject = Cast<HeapObject>(o) otherwise False;
+ const map: Map = heapObject.map;
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ if (map == nativeContext[FAST_ALIASED_ARGUMENTS_MAP_INDEX]) goto True;
+ if (map == nativeContext[SLOW_ALIASED_ARGUMENTS_MAP_INDEX]) goto True;
+ if (map == nativeContext[STRICT_ARGUMENTS_MAP_INDEX]) goto True;
+ if (map != nativeContext[SLOPPY_ARGUMENTS_MAP_INDEX]) goto False;
+ goto True;
+}
+
+macro UnsafeCast<A: type>(context: Context, o: Object): A;
+UnsafeCast<JSArgumentsObjectWithLength>(
+ context: Context, o: Object): JSArgumentsObjectWithLength {
+ assert(BranchIfJSArgumentsObjectWithLength(context, o));
+ return RawCastObjectToJSArgumentsObjectWithLength(o);
+}
+
+macro Cast<A: type>(context: Context, o: Object): A
+ labels CastError;
+Cast<JSArgumentsObjectWithLength>(context: Context, o: Object):
+ JSArgumentsObjectWithLength
+ labels CastError {
+ if (BranchIfJSArgumentsObjectWithLength(context, o)) {
+ return UnsafeCast<JSArgumentsObjectWithLength>(context, o);
+ } else {
+ goto CastError;
+ }
+}
-const kCOWMap: Map = unsafe_cast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
+const kCOWMap: Map = UnsafeCast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
const kEmptyFixedArray: FixedArrayBase =
- unsafe_cast<FixedArrayBase>(LoadRoot(kEmptyFixedArrayRootIndex));
+ UnsafeCast<FixedArrayBase>(LoadRoot(kEmptyFixedArrayRootIndex));
-extern macro BranchIfFastJSArray(Object, Context): never labels Taken, NotTaken;
-extern macro BranchIfNotFastJSArray(Object, Context): never labels Taken,
- NotTaken;
+extern macro BranchIfFastJSArray(Object, Context): never
+ labels Taken, NotTaken;
+extern macro BranchIfNotFastJSArray(Object, Context): never
+ labels Taken, NotTaken;
+
+macro EnsureFastJSArray(context: Context, object: Object) labels Bailout {
+ if (BranchIfNotFastJSArray(object, context)) goto Bailout;
+}
extern macro IsPrototypeInitialArrayPrototype(Context, Map): bool;
extern macro IsNoElementsProtectorCellInvalid(): bool;
@@ -615,38 +758,43 @@ extern macro IsArraySpeciesProtectorCellInvalid(): bool;
extern macro IsTypedArraySpeciesProtectorCellInvalid(): bool;
extern macro IsPromiseSpeciesProtectorCellInvalid(): bool;
-extern operator
-'.buffer' macro LoadTypedArrayBuffer(JSTypedArray): JSArrayBuffer;
+extern operator '.buffer' macro LoadTypedArrayBuffer(JSTypedArray):
+ JSArrayBuffer;
extern operator '.data_ptr' macro LoadDataPtr(JSTypedArray): RawPtr;
extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind;
-extern operator
-'.elements_kind' macro LoadElementsKind(JSTypedArray): ElementsKind;
+extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
+ ElementsKind;
extern operator '.elements' macro LoadElements(JSObject): FixedArrayBase;
extern operator '.elements=' macro StoreElements(JSObject, FixedArrayBase);
-extern operator '.length' macro LoadTypedArrayLength(JSTypedArray): Smi;
+extern operator '.length' macro LoadJSTypedArrayLength(JSTypedArray): Smi;
extern operator '.length' macro LoadJSArrayLength(JSArray): Number;
+extern operator '.length' macro LoadJSArgumentsObjectWithLength(
+ JSArgumentsObjectWithLength): Object;
extern operator '.length_fast' macro LoadFastJSArrayLength(JSArray): Smi;
extern operator '.length=' macro StoreJSArrayLength(JSArray, Smi);
extern operator '.length' macro LoadFixedArrayBaseLength(FixedArrayBase): Smi;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, intptr): Object;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, Smi): Object;
-extern operator
-'[]' macro LoadFixedArrayElement(FixedArray, constexpr int31): Object;
-extern operator
-'[]=' macro StoreFixedArrayElement(FixedArray, intptr, Object): void;
-extern operator
-'[]=' macro StoreFixedArrayElement(
+extern operator '[]' macro LoadFixedArrayElement(
+ FixedArray, constexpr int31): Object;
+extern operator '[]=' macro StoreFixedArrayElement(
+ FixedArray, intptr, Object): void;
+extern operator '[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, Object): void;
-extern operator
-'[]=' macro StoreFixedArrayElementSmi(FixedArray, Smi, Object): void;
+extern operator '[]=' macro StoreFixedArrayElementSmi(
+ FixedArray, Smi, Object): void;
+operator '[]=' macro StoreFixedDoubleArrayNumber(
+ a: FixedDoubleArray, index: Smi, value: Number): void {
+ a[index] = Convert<float64>(value);
+}
-extern macro StoreFixedArrayElementSmi(FixedArray, Smi, Object,
- constexpr WriteBarrierMode): void;
+extern macro StoreFixedArrayElementSmi(
+ FixedArray, Smi, Object, constexpr WriteBarrierMode): void;
extern operator '.instance_type' macro LoadMapInstanceType(Map): int32;
@@ -660,9 +808,8 @@ macro StoreFixedDoubleArrayElementWithSmiIndex(
StoreFixedDoubleArrayElement(array, index, value, SMI_PARAMETERS);
}
-extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr):
- Object labels NotData,
- IfHole;
+extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): Object
+ labels NotData, IfHole;
extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object)
labels NotData, IfHole, ReadOnly;
@@ -672,6 +819,28 @@ extern macro IsFastSmiOrTaggedElementsKind(ElementsKind): bool;
extern macro IsFastSmiElementsKind(ElementsKind): bool;
extern macro IsHoleyFastElementsKind(ElementsKind): bool;
+macro AllowDoubleElements(kind: ElementsKind): ElementsKind {
+ if (kind == PACKED_SMI_ELEMENTS) {
+ return PACKED_DOUBLE_ELEMENTS;
+ } else if (kind == HOLEY_SMI_ELEMENTS) {
+ return HOLEY_DOUBLE_ELEMENTS;
+ }
+ return kind;
+}
+
+macro AllowNonNumberElements(kind: ElementsKind): ElementsKind {
+ if (kind == PACKED_SMI_ELEMENTS) {
+ return PACKED_ELEMENTS;
+ } else if (kind == HOLEY_SMI_ELEMENTS) {
+ return HOLEY_ELEMENTS;
+ } else if (kind == PACKED_DOUBLE_ELEMENTS) {
+ return PACKED_ELEMENTS;
+ } else if (kind == HOLEY_DOUBLE_ELEMENTS) {
+ return HOLEY_ELEMENTS;
+ }
+ return kind;
+}
+
extern macro AllocateZeroedFixedArray(intptr): FixedArray;
extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray;
@@ -687,8 +856,12 @@ extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray;
extern macro IsElementsKindGreaterThan(
ElementsKind, constexpr ElementsKind): bool;
+extern operator '[]=' macro StoreFixedDoubleArrayElementSmi(
+ FixedDoubleArray, Smi, float64): void;
+
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
-labels IfHole;
+ labels IfHole;
+extern macro StoreFixedDoubleArrayHoleSmi(FixedDoubleArray, Smi): void;
extern macro Call(Context, Callable, Object): Object;
extern macro Call(Context, Callable, Object, Object): Object;
@@ -699,18 +872,21 @@ extern macro Call(
extern macro Call(
Context, Callable, Object, Object, Object, Object, Object, Object): Object;
+extern macro ExtractFixedArray(FixedArrayBase, Smi, Smi, Smi): FixedArrayBase;
extern macro ExtractFixedArray(
- FixedArray, Smi, Smi, Smi, constexpr ExtractFixedArrayFlags): FixedArray;
+ FixedArrayBase, Smi, Smi, Smi,
+ constexpr ExtractFixedArrayFlags): FixedArrayBase;
extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray;
-macro LoadElementNoHole<T : type>(a: JSArray, index: Smi): Object labels IfHole;
+macro LoadElementNoHole<T: type>(a: JSArray, index: Smi): Object
+ labels IfHole;
LoadElementNoHole<FixedArray>(a: JSArray, index: Smi): Object
-labels IfHole {
+ labels IfHole {
try {
let elements: FixedArray =
- cast<FixedArray>(a.elements) otherwise Unexpected;
+ Cast<FixedArray>(a.elements) otherwise Unexpected;
let e: Object = elements[index];
if (e == Hole) {
goto IfHole;
@@ -723,10 +899,10 @@ labels IfHole {
}
LoadElementNoHole<FixedDoubleArray>(a: JSArray, index: Smi): Object
-labels IfHole {
+ labels IfHole {
try {
let elements: FixedDoubleArray =
- cast<FixedDoubleArray>(a.elements) otherwise Unexpected;
+ Cast<FixedDoubleArray>(a.elements) otherwise Unexpected;
let e: float64 = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
return AllocateHeapNumberWithValue(e);
}
@@ -735,29 +911,39 @@ labels IfHole {
}
}
+extern macro TransitionElementsKind(JSObject, Map, ElementsKind, ElementsKind):
+ void
+ labels Bailout;
+
extern macro IsCallable(HeapObject): bool;
extern macro IsJSArray(HeapObject): bool;
+extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
+extern macro IsNumber(Object): bool;
extern macro IsExtensibleMap(Map): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
+extern macro IsFastJSArray(Object, Context): bool;
extern macro Typeof(Object): Object;
+extern macro LoadTargetFromFrame(): JSFunction;
// Return true iff number is NaN.
macro NumberIsNaN(number: Number): bool {
- typeswitch(number) {
- case (Smi) {
+ typeswitch (number) {
+ case (Smi): {
return false;
- } case (hn : HeapNumber) {
- let value: float64 = convert<float64>(hn);
+ }
+ case (hn: HeapNumber): {
+ let value: float64 = Convert<float64>(hn);
return value != value;
}
}
}
-extern macro BranchIfToBooleanIsTrue(Object): never labels Taken, NotTaken;
+extern macro BranchIfToBooleanIsTrue(Object): never
+ labels Taken, NotTaken;
macro ToBoolean(obj: Object): bool {
if (BranchIfToBooleanIsTrue(obj)) {
@@ -767,7 +953,8 @@ macro ToBoolean(obj: Object): bool {
}
}
-macro ToIndex(input: Object, context: Context): Number labels RangeError {
+macro ToIndex(input: Object, context: Context): Number
+ labels RangeError {
if (input == Undefined) {
return 0;
}
@@ -779,3 +966,21 @@ macro ToIndex(input: Object, context: Context): Number labels RangeError {
return value;
}
+
+macro GetLengthProperty(context: Context, o: Object): Number {
+ try {
+ return (Cast<JSArray>(o) otherwise CheckArgs).length;
+ }
+ label CheckArgs {
+ const a: JSArgumentsObjectWithLength =
+ Cast<JSArgumentsObjectWithLength>(context, o) otherwise Slow;
+ const length: Object = a.length;
+ return Cast<Smi>(length) otherwise goto ToLength(length);
+ }
+ label Slow deferred {
+ goto ToLength(GetProperty(context, o, kLengthString));
+ }
+ label ToLength(length: Object) deferred {
+ return ToLength_Inline(context, length);
+ }
+}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 95f5f2ebbd..8bc5c0b5ac 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -149,7 +149,7 @@ class RelocatableArguments : public BuiltinArguments, public Relocatable {
RelocatableArguments(Isolate* isolate, int length, Object** arguments)
: BuiltinArguments(length, arguments), Relocatable(isolate) {}
- virtual inline void IterateInstance(RootVisitor* v) {
+ inline void IterateInstance(RootVisitor* v) override {
if (length() == 0) return;
v->VisitRootPointers(Root::kRelocatable, nullptr, lowest_address(),
highest_address() + 1);
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index c82cef3919..0e22db2598 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -89,7 +89,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
Node* result = Allocate(size);
Comment("Initialize arguments object");
StoreMapNoWriteBarrier(result, map);
- Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray);
StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array);
Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
@@ -99,7 +99,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
arguments = InnerAllocate(result, elements_offset);
StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
smi_arguments_count);
- Node* fixed_array_map = LoadRoot(Heap::kFixedArrayMapRootIndex);
+ Node* fixed_array_map = LoadRoot(RootIndex::kFixedArrayMap);
StoreMapNoWriteBarrier(arguments, fixed_array_map);
}
Node* parameter_map = nullptr;
@@ -110,7 +110,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
parameter_map);
Node* sloppy_elements_map =
- LoadRoot(Heap::kSloppyArgumentsElementsMapRootIndex);
+ LoadRoot(RootIndex::kSloppyArgumentsElementsMap);
StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map);
parameter_map_count = ParameterToTagged(parameter_map_count, mode);
StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset,
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index d61a516422..4aebe2e02b 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -212,7 +212,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
context(), original_array, length, method_name);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
- CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadTypedArrayLength(a)));
+ CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadJSTypedArrayLength(a)));
fast_typed_array_target_ =
Word32Equal(LoadInstanceType(LoadElements(original_array)),
LoadInstanceType(LoadElements(a)));
@@ -530,10 +530,11 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
TNode<JSTypedArray> typed_array = CAST(receiver_);
o_ = typed_array;
- TNode<JSArrayBuffer> array_buffer = LoadArrayBufferViewBuffer(typed_array);
+ TNode<JSArrayBuffer> array_buffer =
+ LoadJSArrayBufferViewBuffer(typed_array);
ThrowIfArrayBufferIsDetached(context_, array_buffer, name_);
- len_ = LoadTypedArrayLength(typed_array);
+ len_ = LoadJSTypedArrayLength(typed_array);
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
@@ -964,8 +965,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
// 3) Check that the elements backing store isn't copy-on-write.
Node* elements = LoadElements(array_receiver);
- GotoIf(WordEqual(LoadMap(elements),
- LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+ GotoIf(WordEqual(LoadMap(elements), LoadRoot(RootIndex::kFixedCOWArrayMap)),
&runtime);
Node* new_length = IntPtrSub(length, IntPtrConstant(1));
@@ -1378,8 +1378,8 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Goto(&generic_length);
BIND(&load_arguments_length);
- Node* arguments_length =
- LoadObjectField(array_receiver, JSArgumentsObject::kLengthOffset);
+ Node* arguments_length = LoadObjectField(
+ array_receiver, JSArgumentsObjectWithLength::kLengthOffset);
GotoIf(TaggedIsNotSmi(arguments_length), &generic_length);
o = CAST(receiver);
len.Bind(arguments_length);
@@ -1524,19 +1524,23 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
{
TNode<JSArray> array_receiver = CAST(receiver);
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+
+ // 2) Ensure that the length is writable.
+ // This check needs to happen before the check for length zero.
+ // The spec requires a "SetProperty(array, 'length', 0)" call when
+ // the length is zero. This must throw an exception in the case of a
+ // read-only length.
+ EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
+
Node* length =
LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
fast_elements_smi(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
- // 2) Ensure that the length is writable.
- EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
-
// 3) Check that the elements backing store isn't copy-on-write.
Node* elements = LoadElements(array_receiver);
- GotoIf(WordEqual(LoadMap(elements),
- LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+ GotoIf(WordEqual(LoadMap(elements), LoadRoot(RootIndex::kFixedCOWArrayMap)),
&runtime);
Node* new_length = IntPtrSub(length, IntPtrConstant(1));
@@ -1679,15 +1683,38 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* array = Parameter(Descriptor::kSource);
+ TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
- CSA_ASSERT(this, IsJSArray(array));
- CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
+ CSA_ASSERT(this,
+ Word32Or(Word32BinaryNot(IsHoleyFastElementsKind(
+ LoadMapElementsKind(LoadMap(array)))),
+ Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
ParameterMode mode = OptimalParameterMode();
Return(CloneFastJSArray(context, array, mode));
}
+// This builtin copies the backing store of fast arrays, while converting any
+// holes to undefined.
+// - If there are no holes in the source, its ElementsKind will be preserved. In
+// that case, this builtin should perform as fast as CloneFastJSArray. (In fact,
+// for fast packed arrays, the behavior is equivalent to CloneFastJSArray.)
+// - If there are holes in the source, the ElementsKind of the "copy" will be
+// PACKED_ELEMENTS (such that undefined can be stored).
+TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
+
+ CSA_ASSERT(this,
+ Word32Or(Word32BinaryNot(IsHoleyFastElementsKind(
+ LoadMapElementsKind(LoadMap(array)))),
+ Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
+
+ ParameterMode mode = OptimalParameterMode();
+ Return(CloneFastJSArray(context, array, mode, nullptr,
+ HoleConversionMode::kConvertToUndefined));
+}
+
TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
@@ -3582,6 +3609,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_hole);
{
GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context, array_map),
+ &if_generic);
var_value.Bind(UndefinedConstant());
Goto(&allocate_entry_if_needed);
}
@@ -3654,7 +3683,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// [[ArrayIteratorNextIndex]] anymore, since a JSTypedArray's
// length cannot change anymore, so this {iterator} will never
// produce values again anyways.
- TNode<Smi> length = LoadTypedArrayLength(CAST(array));
+ TNode<Smi> length = LoadJSTypedArrayLength(CAST(array));
GotoIfNot(SmiBelow(CAST(index), length), &allocate_iterator_result);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiInc(CAST(index)));
@@ -3701,8 +3730,6 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
}
}
-namespace {
-
class ArrayFlattenAssembler : public CodeStubAssembler {
public:
explicit ArrayFlattenAssembler(compiler::CodeAssemblerState* state)
@@ -3843,8 +3870,6 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
}
};
-} // namespace
-
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
Node* const context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index ceeee5f37d..1e9de3dbe3 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -23,34 +23,6 @@ namespace internal {
namespace {
-inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
- // This is an extended version of ECMA-262 7.1.11 handling signed values
- // Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
- if (object->IsSmi()) {
- *out = Smi::ToInt(object);
- return true;
- } else if (object->IsHeapNumber()) {
- double value = HeapNumber::cast(object)->value();
- if (std::isnan(value)) {
- *out = 0;
- } else if (value > kMaxInt) {
- *out = kMaxInt;
- } else if (value < kMinInt) {
- *out = kMinInt;
- } else {
- *out = static_cast<int>(value);
- }
- return true;
- } else if (object->IsNullOrUndefined(isolate)) {
- *out = 0;
- return true;
- } else if (object->IsBoolean()) {
- *out = object->IsTrue(isolate);
- return true;
- }
- return false;
-}
-
inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
JSArray* receiver) {
return JSObject::PrototypeHasNoElements(isolate, receiver);
@@ -79,36 +51,20 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
return true;
}
-// Returns |false| if not applicable.
-// TODO(szuend): Refactor this function because it is getting hard to
-// understand what each call-site actually checks.
-V8_WARN_UNUSED_RESULT
-inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
- Handle<Object> receiver,
- BuiltinArguments* args,
- int first_arg_index,
- int num_arguments) {
- if (!receiver->IsJSArray()) return false;
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- ElementsKind origin_kind = array->GetElementsKind();
- if (IsDictionaryElementsKind(origin_kind)) return false;
- if (!array->map()->is_extensible()) return false;
- if (args == nullptr) return true;
-
- // If there may be elements accessors in the prototype chain, the fast path
- // cannot be used if there arguments to add to the array.
- if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false;
+// This method may transition the elements kind of the JSArray once, to make
+// sure that all elements provided as arguments in the specified range can be
+// added without further elements kinds transitions.
+void MatchArrayElementsKindToArguments(Isolate* isolate, Handle<JSArray> array,
+ BuiltinArguments* args,
+ int first_arg_index, int num_arguments) {
+ int args_length = args->length();
+ if (first_arg_index >= args_length) return;
- // Adding elements to the array prototype would break code that makes sure
- // it has no elements. Handle that elsewhere.
- if (isolate->IsAnyInitialArrayPrototype(array)) return false;
+ ElementsKind origin_kind = array->GetElementsKind();
- // Need to ensure that the arguments passed in args can be contained in
- // the array.
- int args_length = args->length();
- if (first_arg_index >= args_length) return true;
+ // We do not need to transition for PACKED/HOLEY_ELEMENTS.
+ if (IsObjectElementsKind(origin_kind)) return;
- if (IsObjectElementsKind(origin_kind)) return true;
ElementsKind target_kind = origin_kind;
{
DisallowHeapAllocation no_gc;
@@ -131,20 +87,37 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
HandleScope scope(isolate);
JSObject::TransitionElementsKind(array, target_kind);
}
- return true;
}
-V8_WARN_UNUSED_RESULT static Object* CallJsIntrinsic(
- Isolate* isolate, Handle<JSFunction> function, BuiltinArguments args) {
- HandleScope handleScope(isolate);
- int argc = args.length() - 1;
- ScopedVector<Handle<Object>> argv(argc);
- for (int i = 0; i < argc; ++i) {
- argv[i] = args.at(i + 1);
- }
- RETURN_RESULT_OR_FAILURE(
- isolate,
- Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
+// Returns |false| if not applicable.
+// TODO(szuend): Refactor this function because it is getting hard to
+// understand what each call-site actually checks.
+V8_WARN_UNUSED_RESULT
+inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
+ Handle<Object> receiver,
+ BuiltinArguments* args,
+ int first_arg_index,
+ int num_arguments) {
+ if (!receiver->IsJSArray()) return false;
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ ElementsKind origin_kind = array->GetElementsKind();
+ if (IsDictionaryElementsKind(origin_kind)) return false;
+ if (!array->map()->is_extensible()) return false;
+ if (args == nullptr) return true;
+
+ // If there may be elements accessors in the prototype chain, the fast path
+ // cannot be used if there arguments to add to the array.
+ if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false;
+
+ // Adding elements to the array prototype would break code that makes sure
+ // it has no elements. Handle that elsewhere.
+ if (isolate->IsAnyInitialArrayPrototype(array)) return false;
+
+ // Need to ensure that the arguments passed in args can be contained in
+ // the array.
+ MatchArrayElementsKindToArguments(isolate, array, args, first_arg_index,
+ num_arguments);
+ return true;
}
// If |index| is Undefined, returns init_if_undefined.
@@ -189,6 +162,24 @@ V8_WARN_UNUSED_RESULT Maybe<double> GetLengthProperty(
return Just(raw_length_number->Number());
}
+// Set "length" property, has "fast-path" for JSArrays.
+// Returns Nothing if something went wrong.
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetLengthProperty(
+ Isolate* isolate, Handle<JSReceiver> receiver, double length) {
+ if (receiver->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ if (!JSArray::HasReadOnlyLength(array)) {
+ DCHECK_LE(length, kMaxUInt32);
+ JSArray::SetLength(array, static_cast<uint32_t>(length));
+ return receiver;
+ }
+ }
+
+ return Object::SetProperty(
+ isolate, receiver, isolate->factory()->length_string(),
+ isolate->factory()->NewNumber(length), LanguageMode::kStrict);
+}
+
V8_WARN_UNUSED_RESULT Object* GenericArrayFill(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<Object> value,
@@ -350,7 +341,7 @@ V8_WARN_UNUSED_RESULT Object* GenericArrayPush(Isolate* isolate,
// Must succeed since we always pass a valid key.
DCHECK(success);
MAYBE_RETURN(Object::SetProperty(&it, element, LanguageMode::kStrict,
- Object::MAY_BE_STORE_FROM_KEYED),
+ StoreOrigin::kMaybeKeyed),
ReadOnlyRoots(isolate).exception());
}
@@ -485,110 +476,141 @@ BUILTIN(ArrayPop) {
return *result;
}
-BUILTIN(ArrayShift) {
- HandleScope scope(isolate);
- Heap* heap = isolate->heap();
- Handle<Object> receiver = args.receiver();
+namespace {
+
+// Returns true, iff we can use ElementsAccessor for shifting.
+V8_WARN_UNUSED_RESULT bool CanUseFastArrayShift(Isolate* isolate,
+ Handle<JSReceiver> receiver) {
if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0,
0) ||
!IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
- return CallJsIntrinsic(isolate, isolate->array_shift(), args);
+ return false;
}
+
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ return !JSArray::HasReadOnlyLength(array);
+}
- int len = Smi::ToInt(array->length());
- if (len == 0) return ReadOnlyRoots(heap).undefined_value();
+V8_WARN_UNUSED_RESULT Object* GenericArrayShift(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ double length) {
+ // 4. Let first be ? Get(O, "0").
+ Handle<Object> first;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, first,
+ Object::GetElement(isolate, receiver, 0));
+
+ // 5. Let k be 1.
+ double k = 1;
+
+ // 6. Repeat, while k < len.
+ while (k < length) {
+ // a. Let from be ! ToString(k).
+ Handle<String> from =
+ isolate->factory()->NumberToString(isolate->factory()->NewNumber(k));
+
+ // b. Let to be ! ToString(k-1).
+ Handle<String> to = isolate->factory()->NumberToString(
+ isolate->factory()->NewNumber(k - 1));
+
+ // c. Let fromPresent be ? HasProperty(O, from).
+ bool from_present;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, from_present, JSReceiver::HasProperty(receiver, from));
+
+ // d. If fromPresent is true, then.
+ if (from_present) {
+ // i. Let fromVal be ? Get(O, from).
+ Handle<Object> from_val;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, from_val,
+ Object::GetPropertyOrElement(isolate, receiver, from));
+
+ // ii. Perform ? Set(O, to, fromVal, true).
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Object::SetPropertyOrElement(isolate, receiver, to, from_val,
+ LanguageMode::kStrict));
+ } else { // e. Else fromPresent is false,
+ // i. Perform ? DeletePropertyOrThrow(O, to).
+ MAYBE_RETURN(JSReceiver::DeletePropertyOrElement(receiver, to,
+ LanguageMode::kStrict),
+ ReadOnlyRoots(isolate).exception());
+ }
- if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_shift(), args);
+ // f. Increase k by 1.
+ ++k;
}
- Handle<Object> first = array->GetElementsAccessor()->Shift(array);
+ // 7. Perform ? DeletePropertyOrThrow(O, ! ToString(len-1)).
+ Handle<String> new_length = isolate->factory()->NumberToString(
+ isolate->factory()->NewNumber(length - 1));
+ MAYBE_RETURN(JSReceiver::DeletePropertyOrElement(receiver, new_length,
+ LanguageMode::kStrict),
+ ReadOnlyRoots(isolate).exception());
+
+ // 8. Perform ? Set(O, "length", len-1, true).
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ SetLengthProperty(isolate, receiver, length - 1));
+
+ // 9. Return first.
return *first;
}
+} // namespace
-BUILTIN(ArrayUnshift) {
+BUILTIN(ArrayShift) {
HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1,
- args.length() - 1)) {
- return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
- }
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- int to_add = args.length() - 1;
- if (to_add == 0) return array->length();
- // Currently fixed arrays cannot grow too big, so we should never hit this.
- DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length()));
+ // 1. Let O be ? ToObject(this value).
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ToObject(isolate, args.receiver()));
- if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ double length;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, length, GetLengthProperty(isolate, receiver));
+
+ // 3. If len is zero, then.
+ if (length == 0) {
+ // a. Perform ? Set(O, "length", 0, true).
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ SetLengthProperty(isolate, receiver, length));
+
+ // b. Return undefined.
+ return ReadOnlyRoots(isolate).undefined_value();
}
- ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Unshift(array, &args, to_add);
- return Smi::FromInt(new_length);
+ if (CanUseFastArrayShift(isolate, receiver)) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ return *array->GetElementsAccessor()->Shift(array);
+ }
+
+ return GenericArrayShift(isolate, receiver, length);
}
-BUILTIN(ArraySplice) {
+BUILTIN(ArrayUnshift) {
HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (V8_UNLIKELY(
- !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3,
- args.length() - 3) ||
- // If this is a subclass of Array, then call out to JS.
- !Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
- // If anything with @@species has been messed with, call out to JS.
- !isolate->IsArraySpeciesLookupChainIntact())) {
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ DCHECK(args.receiver()->IsJSArray());
+ Handle<JSArray> array = Handle<JSArray>::cast(args.receiver());
- int argument_count = args.length() - 1;
- int relative_start = 0;
- if (argument_count > 0) {
- DisallowHeapAllocation no_gc;
- if (!ClampedToInteger(isolate, args[1], &relative_start)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- }
- int len = Smi::ToInt(array->length());
- // clip relative start to [0, len]
- int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- int actual_delete_count;
- if (argument_count == 1) {
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for compatibility.
- DCHECK_GE(len - actual_start, 0);
- actual_delete_count = len - actual_start;
- } else {
- int delete_count = 0;
- DisallowHeapAllocation no_gc;
- if (argument_count > 1) {
- if (!ClampedToInteger(isolate, args[2], &delete_count)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- }
- actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
- }
+ // These are checked in the Torque builtin.
+ DCHECK(array->map()->is_extensible());
+ DCHECK(!IsDictionaryElementsKind(array->GetElementsKind()));
+ DCHECK(IsJSArrayFastElementMovingAllowed(isolate, *array));
+ DCHECK(!isolate->IsAnyInitialArrayPrototype(array));
- int add_count = (argument_count > 1) ? (argument_count - 2) : 0;
- int new_length = len - actual_delete_count + add_count;
+ MatchArrayElementsKindToArguments(isolate, array, &args, 1,
+ args.length() - 1);
+
+ int to_add = args.length() - 1;
+ if (to_add == 0) return array->length();
+
+ // Currently fixed arrays cannot grow too big, so we should never hit this.
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length()));
+ DCHECK(!JSArray::HasReadOnlyLength(array));
- if (new_length != len && JSArray::HasReadOnlyLength(array)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
ElementsAccessor* accessor = array->GetElementsAccessor();
- Handle<JSArray> result_array = accessor->Splice(
- array, actual_start, actual_delete_count, &args, add_count);
- return *result_array;
+ int new_length = accessor->Unshift(array, &args, to_add);
+ return Smi::FromInt(new_length);
}
// Array Concat -------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 808c34e43b..a4de98eb97 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -31,10 +31,12 @@ Object* ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
bool initialize) {
Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
size_t byte_length;
- if (!TryNumberToSize(*length, &byte_length)) {
+ if (!TryNumberToSize(*length, &byte_length) ||
+ byte_length > JSArrayBuffer::kMaxByteLength) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
@@ -98,7 +100,7 @@ BUILTIN(ArrayBufferPrototypeGetByteLength) {
CHECK_SHARED(false, array_buffer, kMethodName);
// TODO(franzih): According to the ES6 spec, we should throw a TypeError
// here if the JSArrayBuffer is detached.
- return array_buffer->byte_length();
+ return *isolate->factory()->NewNumberFromSize(array_buffer->byte_length());
}
// ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength
@@ -108,7 +110,7 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
CHECK_RECEIVER(JSArrayBuffer, array_buffer,
"get SharedArrayBuffer.prototype.byteLength");
CHECK_SHARED(true, array_buffer, kMethodName);
- return array_buffer->byte_length();
+ return *isolate->factory()->NewNumberFromSize(array_buffer->byte_length());
}
// ES6 section 24.1.3.1 ArrayBuffer.isView ( arg )
@@ -143,7 +145,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [AB] Let len be O.[[ArrayBufferByteLength]].
// * [SAB] Let len be O.[[ArrayBufferByteLength]].
- double const len = array_buffer->byte_length()->Number();
+ double const len = array_buffer->byte_length();
// * Let relativeStart be ? ToInteger(start).
Handle<Object> relative_start;
@@ -242,7 +244,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
}
// * If new.[[ArrayBufferByteLength]] < newLen, throw a TypeError exception.
- if (new_array_buffer->byte_length()->Number() < new_len) {
+ if (new_array_buffer->byte_length() < new_len) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(is_shared ? MessageTemplate::kSharedArrayBufferTooShort
@@ -264,10 +266,10 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
size_t first_size = 0, new_len_size = 0;
CHECK(TryNumberToSize(*first_obj, &first_size));
CHECK(TryNumberToSize(*new_len_obj, &new_len_size));
- DCHECK(NumberToSize(new_array_buffer->byte_length()) >= new_len_size);
+ DCHECK(new_array_buffer->byte_length() >= new_len_size);
if (new_len_size != 0) {
- size_t from_byte_length = NumberToSize(array_buffer->byte_length());
+ size_t from_byte_length = array_buffer->byte_length();
USE(from_byte_length);
DCHECK(first_size <= from_byte_length);
DCHECK(from_byte_length - first_size >= new_len_size);
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index cf5e18f6a0..74d6077764 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -21,36 +21,19 @@ class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
void AsyncFunctionAwait(Node* const context, Node* const generator,
Node* const awaited, Node* const outer_promise,
const bool is_predicted_as_caught);
- void AsyncFunctionAwaitOptimized(Node* const context, Node* const generator,
- Node* const awaited,
- Node* const outer_promise,
- const bool is_predicted_as_caught);
void AsyncFunctionAwaitResumeClosure(
Node* const context, Node* const sent_value,
JSGeneratorObject::ResumeMode resume_mode);
};
-namespace {
-
-// Describe fields of Context associated with AsyncFunctionAwait resume
-// closures.
-// TODO(jgruber): Refactor to reuse code for upcoming async-generators.
-class AwaitContext {
- public:
- enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
-};
-
-} // anonymous namespace
-
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
Node* context, Node* sent_value,
JSGeneratorObject::ResumeMode resume_mode) {
DCHECK(resume_mode == JSGeneratorObject::kNext ||
resume_mode == JSGeneratorObject::kThrow);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
// Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
@@ -113,11 +96,6 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
// TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
// the awaited promise if it is already a promise. Reuse is non-spec compliant
// but part of our old behavior gives us a couple of percent
@@ -130,8 +108,8 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Goto(&after_debug_hook);
BIND(&after_debug_hook);
- Await(context, generator, awaited, outer_promise, AwaitContext::kLength,
- init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
+ Await(context, generator, awaited, outer_promise,
+ Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
is_predicted_as_caught);
@@ -144,43 +122,6 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Goto(&after_debug_hook);
}
-void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitOptimized(
- Node* const context, Node* const generator, Node* const awaited,
- Node* const outer_promise, const bool is_predicted_as_caught) {
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
- // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
- // the awaited promise if it is already a promise. Reuse is non-spec compliant
- // but part of our old behavior gives us a couple of percent
- // performance boost.
- // TODO(jgruber): Use a faster specialized version of
- // InternalPerformPromiseThen.
-
- Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
- GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
- Goto(&after_debug_hook);
- BIND(&after_debug_hook);
-
- AwaitOptimized(
- context, generator, awaited, outer_promise, AwaitContext::kLength,
- init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
- Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, is_predicted_as_caught);
-
- // Return outer promise to avoid adding an load of the outer promise before
- // suspending in BytecodeGenerator.
- Return(outer_promise);
-
- BIND(&call_debug_hook);
- CallRuntime(Runtime::kDebugAsyncFunctionSuspended, context, outer_promise);
- Goto(&after_debug_hook);
-}
-
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
@@ -196,19 +137,6 @@ TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
kIsPredictedAsCaught);
}
-TF_BUILTIN(AsyncFunctionAwaitCaughtOptimized, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
- Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
- Node* const context = Parameter(Descriptor::kContext);
-
- static const bool kIsPredictedAsCaught = true;
-
- AsyncFunctionAwaitOptimized(context, generator, awaited, outer_promise,
- kIsPredictedAsCaught);
-}
-
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
@@ -224,20 +152,6 @@ TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
kIsPredictedAsCaught);
}
-TF_BUILTIN(AsyncFunctionAwaitUncaughtOptimized,
- AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
- Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
- Node* const context = Parameter(Descriptor::kContext);
-
- static const bool kIsPredictedAsCaught = false;
-
- AsyncFunctionAwaitOptimized(context, generator, awaited, outer_promise,
- kIsPredictedAsCaught);
-}
-
TF_BUILTIN(AsyncFunctionPromiseCreate, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 0);
Node* const context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 4568507a9f..dda781b1a8 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -23,16 +23,15 @@ class ValueUnwrapContext {
} // namespace
-Node* AsyncBuiltinsAssembler::Await(
- Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length, const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
- DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
-
+Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
+ Node* value, Node* outer_promise,
+ Node* on_resolve_context_index,
+ Node* on_reject_context_index,
+ Node* is_predicted_as_caught) {
Node* const native_context = LoadNativeContext(context);
- static const int kWrappedPromiseOffset = FixedArray::SizeFor(context_length);
+ static const int kWrappedPromiseOffset =
+ FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
static const int kThrowawayPromiseOffset =
kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kResolveClosureOffset =
@@ -45,9 +44,20 @@ Node* AsyncBuiltinsAssembler::Await(
Node* const base = AllocateInNewSpace(kTotalSize);
Node* const closure_context = base;
{
- // Initialize closure context
- InitializeFunctionContext(native_context, closure_context, context_length);
- init_closure_context(closure_context);
+ // Initialize the await context, storing the {generator} as extension.
+ StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
+ StoreObjectFieldNoWriteBarrier(closure_context, Context::kLengthOffset,
+ SmiConstant(Context::MIN_CONTEXT_SLOTS));
+ Node* const empty_scope_info =
+ LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
+ StoreContextElementNoWriteBarrier(
+ closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
+ StoreContextElementNoWriteBarrier(closure_context, Context::PREVIOUS_INDEX,
+ native_context);
+ StoreContextElementNoWriteBarrier(closure_context, Context::EXTENSION_INDEX,
+ generator);
+ StoreContextElementNoWriteBarrier(
+ closure_context, Context::NATIVE_CONTEXT_INDEX, native_context);
}
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
@@ -149,11 +159,8 @@ Node* AsyncBuiltinsAssembler::Await(
Node* AsyncBuiltinsAssembler::AwaitOptimized(
Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length, const ContextInitializer& init_closure_context,
Node* on_resolve_context_index, Node* on_reject_context_index,
Node* is_predicted_as_caught) {
- DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
-
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
@@ -161,7 +168,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(
CSA_ASSERT(this, IsConstructor(promise_fun));
static const int kThrowawayPromiseOffset =
- FixedArray::SizeFor(context_length);
+ FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
static const int kResolveClosureOffset =
kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kRejectClosureOffset =
@@ -176,9 +183,20 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(
Node* const base = AllocateInNewSpace(kTotalSize);
Node* const closure_context = base;
{
- // Initialize closure context
- InitializeFunctionContext(native_context, closure_context, context_length);
- init_closure_context(closure_context);
+ // Initialize the await context, storing the {generator} as extension.
+ StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
+ StoreObjectFieldNoWriteBarrier(closure_context, Context::kLengthOffset,
+ SmiConstant(Context::MIN_CONTEXT_SLOTS));
+ Node* const empty_scope_info =
+ LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
+ StoreContextElementNoWriteBarrier(
+ closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
+ StoreContextElementNoWriteBarrier(closure_context, Context::PREVIOUS_INDEX,
+ native_context);
+ StoreContextElementNoWriteBarrier(closure_context, Context::EXTENSION_INDEX,
+ generator);
+ StoreContextElementNoWriteBarrier(
+ closure_context, Context::NATIVE_CONTEXT_INDEX, native_context);
}
Node* const promise_map =
@@ -261,6 +279,39 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(
on_resolve, on_reject, throwaway);
}
+Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
+ Node* outer_promise,
+ Node* on_resolve_context_index,
+ Node* on_reject_context_index,
+ Node* is_predicted_as_caught) {
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Label if_old(this), if_new(this), done(this);
+
+ STATIC_ASSERT(sizeof(FLAG_harmony_await_optimization) == 1);
+
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
+ MachineType::Uint8(),
+ ExternalConstant(
+ ExternalReference::address_of_harmony_await_optimization_flag())));
+
+ Branch(Word32Equal(flag_value, Int32Constant(0)), &if_old, &if_new);
+
+ BIND(&if_old);
+ result.Bind(AwaitOld(context, generator, value, outer_promise,
+ on_resolve_context_index, on_reject_context_index,
+ is_predicted_as_caught));
+ Goto(&done);
+
+ BIND(&if_new);
+ result.Bind(AwaitOptimized(context, generator, value, outer_promise,
+ on_resolve_context_index, on_reject_context_index,
+ is_predicted_as_caught));
+ Goto(&done);
+
+ BIND(&done);
+ return result.value();
+}
+
void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
Node* native_context,
Node* function,
@@ -275,11 +326,11 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreMapNoWriteBarrier(function, function_map);
StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(function, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(function, JSFunction::kFeedbackCellOffset,
- Heap::kManyClosuresCellRootIndex);
+ RootIndex::kManyClosuresCell);
Node* shared_info = LoadContextElement(native_context, context_index);
CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index e5f487d8cc..9dafddef21 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -16,66 +16,29 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
: PromiseBuiltinsAssembler(state) {}
protected:
- typedef std::function<void(Node*)> ContextInitializer;
-
// Perform steps to resume generator after `value` is resolved.
// `on_reject_context_index` is an index into the Native Context, which should
// point to a SharedFunctioninfo instance used to create the closure. The
// value following the reject index should be a similar value for the resolve
// closure. Returns the Promise-wrapped `value`.
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
Node* on_resolve_context_index, Node* on_reject_context_index,
Node* is_predicted_as_caught);
- Node* AwaitOptimized(Node* context, Node* generator, Node* value,
- Node* outer_promise, int context_length,
- const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index,
- Node* on_reject_context_index,
- Node* is_predicted_as_caught);
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
int on_resolve_context_index, int on_reject_context_index,
Node* is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, IntPtrConstant(on_resolve_context_index),
+ return Await(context, generator, value, outer_promise,
+ IntPtrConstant(on_resolve_context_index),
IntPtrConstant(on_reject_context_index),
is_predicted_as_caught);
}
- Node* AwaitOptimized(Node* context, Node* generator, Node* value,
- Node* outer_promise, int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index,
- int on_reject_context_index,
- Node* is_predicted_as_caught) {
- return AwaitOptimized(
- context, generator, value, outer_promise, context_length,
- init_closure_context, IntPtrConstant(on_resolve_context_index),
- IntPtrConstant(on_reject_context_index), is_predicted_as_caught);
- }
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
int on_resolve_context_index, int on_reject_context_index,
bool is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, on_resolve_context_index,
- on_reject_context_index,
+ return Await(context, generator, value, outer_promise,
+ on_resolve_context_index, on_reject_context_index,
BooleanConstant(is_predicted_as_caught));
}
- Node* AwaitOptimized(Node* context, Node* generator, Node* value,
- Node* outer_promise, int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index,
- int on_reject_context_index,
- bool is_predicted_as_caught) {
- return AwaitOptimized(context, generator, value, outer_promise,
- context_length, init_closure_context,
- on_resolve_context_index, on_reject_context_index,
- BooleanConstant(is_predicted_as_caught));
- }
// Return a new built-in function object as defined in
// Async Iterator Value Unwrap Functions
@@ -86,6 +49,14 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
Node* function, Node* context_index);
Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
Node* done);
+
+ Node* AwaitOld(Node* context, Node* generator, Node* value,
+ Node* outer_promise, Node* on_resolve_context_index,
+ Node* on_reject_context_index, Node* is_predicted_as_caught);
+ Node* AwaitOptimized(Node* context, Node* generator, Node* value,
+ Node* outer_promise, Node* on_resolve_context_index,
+ Node* on_reject_context_index,
+ Node* is_predicted_as_caught);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index bbb2571691..bff2de486a 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -17,13 +17,6 @@ using compiler::Node;
namespace {
-// Describe fields of Context associated with AsyncGeneratorAwait resume
-// closures.
-class AwaitContext {
- public:
- enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
-};
-
class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
public:
explicit AsyncGeneratorBuiltinsAssembler(CodeAssemblerState* state)
@@ -207,7 +200,7 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
Node* promise) {
CSA_SLOW_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
Node* request = Allocate(AsyncGeneratorRequest::kSize);
- StoreMapNoWriteBarrier(request, Heap::kAsyncGeneratorRequestMapRootIndex);
+ StoreMapNoWriteBarrier(request, RootIndex::kAsyncGeneratorRequestMap);
StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kNextOffset,
UndefinedConstant());
StoreObjectFieldNoWriteBarrier(request,
@@ -218,15 +211,14 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kPromiseOffset,
promise);
StoreObjectFieldRoot(request, AsyncGeneratorRequest::kNextOffset,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
return request;
}
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
Node* context, Node* value,
JSAsyncGeneratorObject::ResumeMode resume_mode) {
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
SetGeneratorNotAwaiting(generator);
@@ -254,11 +246,6 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
CSA_ASSERT(this, IsNotUndefined(request));
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
Node* outer_promise =
LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset);
@@ -266,8 +253,8 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
SetGeneratorAwaiting(generator);
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, resolve_index, reject_index, is_catchable);
+ Await(context, generator, value, outer_promise, resolve_index, reject_index,
+ is_catchable);
Return(UndefinedConstant());
}
@@ -519,9 +506,9 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
Context::ITERATOR_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(iter_result, map);
StoreObjectFieldRoot(iter_result, JSIteratorResult::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(iter_result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(iter_result, JSIteratorResult::kValueOffset,
value);
StoreObjectFieldNoWriteBarrier(iter_result, JSIteratorResult::kDoneOffset,
@@ -585,25 +572,19 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request);
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN;
const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
SetGeneratorAwaiting(generator);
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, on_resolve, on_reject, is_caught);
+ Await(context, generator, value, outer_promise, on_resolve, on_reject,
+ is_caught);
Return(UndefinedConstant());
}
TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
SetGeneratorNotAwaiting(generator);
@@ -656,17 +637,11 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
BIND(&perform_await);
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
SetGeneratorAwaiting(generator);
Node* const context = Parameter(Descriptor::kContext);
Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, var_on_resolve.value(), var_on_reject.value(),
- is_caught);
+ Await(context, generator, value, outer_promise, var_on_resolve.value(),
+ var_on_reject.value(), is_caught);
Return(UndefinedConstant());
}
@@ -689,8 +664,7 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
SetGeneratorNotAwaiting(generator);
@@ -707,8 +681,7 @@ TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
SetGeneratorNotAwaiting(generator);
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 82db5fbd5d..3672bda9d1 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -333,20 +333,6 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
"[Async-from-Sync Iterator].prototype.next");
}
-TF_BUILTIN(AsyncFromSyncIteratorPrototypeNextOptimized,
- AsyncFromSyncBuiltinsAssembler) {
- Node* const iterator = Parameter(Descriptor::kReceiver);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
-
- auto get_method = [=](Node* const unused) {
- return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset);
- };
- Generate_AsyncFromSyncIteratorMethodOptimized(
- context, iterator, value, get_method, UndefinedMethodHandler(),
- "[Async-from-Sync Iterator].prototype.next");
-}
-
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.return
TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
@@ -374,31 +360,6 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
"[Async-from-Sync Iterator].prototype.return");
}
-TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturnOptimized,
- AsyncFromSyncBuiltinsAssembler) {
- Node* const iterator = Parameter(Descriptor::kReceiver);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
-
- auto if_return_undefined = [=](Node* const native_context,
- Node* const promise, Label* if_exception) {
- // If return is undefined, then
- // Let iterResult be ! CreateIterResultObject(value, true)
- Node* const iter_result = CallBuiltin(Builtins::kCreateIterResultObject,
- context, value, TrueConstant());
-
- // Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
- // IfAbruptRejectPromise(nextDone, promiseCapability).
- // Return promiseCapability.[[Promise]].
- CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
- Return(promise);
- };
-
- Generate_AsyncFromSyncIteratorMethodOptimized(
- context, iterator, value, factory()->return_string(), if_return_undefined,
- "[Async-from-Sync Iterator].prototype.return");
-}
-
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.throw
TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
@@ -416,20 +377,5 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
reason);
}
-TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrowOptimized,
- AsyncFromSyncBuiltinsAssembler) {
- Node* const iterator = Parameter(Descriptor::kReceiver);
- Node* const reason = Parameter(Descriptor::kReason);
- Node* const context = Parameter(Descriptor::kContext);
-
- auto if_throw_undefined = [=](Node* const native_context, Node* const promise,
- Label* if_exception) { Goto(if_exception); };
-
- Generate_AsyncFromSyncIteratorMethodOptimized(
- context, iterator, reason, factory()->throw_string(), if_throw_undefined,
- "[Async-from-Sync Iterator].prototype.throw", Label::kNonDeferred,
- reason);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index 52645cbaa0..52ed0563c6 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -26,8 +26,9 @@ BUILTIN(BooleanConstructor) {
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
DCHECK(*target == target->native_context()->boolean_function());
Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
Handle<JSValue>::cast(result)->set_value(
isolate->heap()->ToBoolean(value->BooleanValue(isolate)));
return *result;
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 4defe28cb7..e23f13d2b3 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -17,45 +17,75 @@ namespace internal {
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallFunction(masm, ConvertReceiverMode::kAny);
}
void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallBoundFunctionImpl(masm);
}
void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_Call(masm, ConvertReceiverMode::kAny);
}
void Builtins::Generate_CallVarargs(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallOrConstructVarargs(masm, masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallOrConstructForwardVarargs(masm, CallOrConstructMode::kCall,
masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kCall,
masm->isolate()->builtins()->CallFunction());
@@ -163,9 +193,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_arguments);
{
TNode<JSArgumentsObject> js_arguments = CAST(arguments_list);
- // Try to extract the elements from an JSArgumentsObject.
- TNode<Object> length =
- LoadObjectField(js_arguments, JSArgumentsObject::kLengthOffset);
+ // Try to extract the elements from an JSArgumentsObjectWithLength.
+ TNode<Object> length = LoadObjectField(
+ js_arguments, JSArgumentsObjectWithLength::kLengthOffset);
TNode<FixedArrayBase> elements = LoadElements(js_arguments);
TNode<Smi> elements_length = LoadFixedArrayBaseLength(elements);
GotoIfNot(WordEqual(length, elements_length), &if_runtime);
@@ -211,11 +241,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
{
if (new_target == nullptr) {
Callable callable = CodeFactory::CallVarargs(isolate());
- TailCallStub(callable, context, target, args_count, elements, length);
+ TailCallStub(callable, context, target, args_count, length, elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count,
- elements, length);
+ TailCallStub(callable, context, target, new_target, args_count, length,
+ elements);
}
}
@@ -266,11 +296,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
{
if (new_target == nullptr) {
Callable callable = CodeFactory::CallVarargs(isolate());
- TailCallStub(callable, context, target, args_count, new_elements, length);
+ TailCallStub(callable, context, target, args_count, length, new_elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count,
- new_elements, length);
+ TailCallStub(callable, context, target, new_target, args_count, length,
+ new_elements);
}
}
}
@@ -299,7 +329,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
// Check that the Array.prototype hasn't been modified in a way that would
// affect iteration.
TNode<PropertyCell> protector_cell =
- CAST(LoadRoot(Heap::kArrayIteratorProtectorRootIndex));
+ CAST(LoadRoot(RootIndex::kArrayIteratorProtector));
GotoIf(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Isolate::kProtectorInvalid)),
&if_generic);
@@ -325,8 +355,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
TNode<Object> iterator_fn =
GetProperty(context, spread, IteratorSymbolConstant());
GotoIfNot(TaggedIsCallable(iterator_fn), &if_iterator_fn_not_callable);
- TNode<JSArray> list = CAST(
- CallBuiltin(Builtins::kIterableToList, context, spread, iterator_fn));
+ TNode<JSArray> list =
+ CAST(CallBuiltin(Builtins::kIterableToListMayPreserveHoles, context,
+ spread, iterator_fn));
var_length = LoadAndUntagToWord32ObjectField(list, JSArray::kLengthOffset);
var_elements = LoadElements(list);
@@ -346,11 +377,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
if (new_target == nullptr) {
Callable callable = CodeFactory::CallVarargs(isolate());
- TailCallStub(callable, context, target, args_count, elements, length);
+ TailCallStub(callable, context, target, args_count, length, elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, elements,
- length);
+ TailCallStub(callable, context, target, new_target, args_count, length,
+ elements);
}
}
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index c41626dfd4..b580bb181f 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -137,6 +137,14 @@ BUILTIN(CallSitePrototypeGetTypeName) {
return *it.Frame()->GetTypeName();
}
+BUILTIN(CallSitePrototypeIsAsync) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "isAsync");
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return isolate->heap()->ToBoolean(it.Frame()->IsAsync());
+}
+
BUILTIN(CallSitePrototypeIsConstructor) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "isConstructor");
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index aab0475e21..1ff64b0877 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -24,7 +24,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
explicit BaseCollectionsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- virtual ~BaseCollectionsAssembler() {}
+ virtual ~BaseCollectionsAssembler() = default;
protected:
enum Variant { kMap, kSet, kWeakMap, kWeakSet };
@@ -628,7 +628,7 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
Node* AllocateJSCollectionIterator(Node* context, int map_index,
Node* collection);
TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
- TNode<IntPtrT> at_least_space_for);
+ TNode<IntPtrT> at_least_space_for) override;
Node* GetHash(Node* const key);
Node* CallGetHashRaw(Node* const key);
Node* CallGetOrCreateHashRaw(Node* const key);
@@ -731,9 +731,9 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
Node* const iterator = AllocateInNewSpace(IteratorType::kSize);
StoreMapNoWriteBarrier(iterator, iterator_map);
StoreObjectFieldRoot(iterator, IteratorType::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(iterator, IteratorType::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kTableOffset, table);
StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset,
SmiConstant(0));
@@ -770,7 +770,7 @@ TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
Node* const function_addr =
- ExternalConstant(ExternalReference::get_or_create_hash_raw(isolate()));
+ ExternalConstant(ExternalReference::get_or_create_hash_raw());
Node* const isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
@@ -1699,7 +1699,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
BIND(&return_end);
{
StoreObjectFieldRoot(receiver, JSMapIterator::kTableOffset,
- Heap::kEmptyOrderedHashMapRootIndex);
+ RootIndex::kEmptyOrderedHashMap);
Goto(&return_value);
}
}
@@ -1907,7 +1907,7 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
BIND(&return_end);
{
StoreObjectFieldRoot(receiver, JSSetIterator::kTableOffset,
- Heap::kEmptyOrderedHashSetRootIndex);
+ RootIndex::kEmptyOrderedHashSet);
Goto(&return_value);
}
}
@@ -1986,7 +1986,7 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
TNode<IntPtrT> number_of_elements);
TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
- TNode<IntPtrT> at_least_space_for);
+ TNode<IntPtrT> at_least_space_for) override;
// Generates and sets the identity for a JSRececiver.
TNode<Smi> CreateIdentityHash(TNode<Object> receiver);
@@ -2062,8 +2062,7 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
TNode<FixedArray> table = CAST(
AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation));
- Heap::RootListIndex map_root_index = static_cast<Heap::RootListIndex>(
- EphemeronHashTableShape::GetMapRootIndex());
+ RootIndex map_root_index = EphemeronHashTableShape::GetMapRootIndex();
StoreMapNoWriteBarrier(table, map_root_index);
StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
@@ -2075,14 +2074,14 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
TNode<IntPtrT> start = KeyIndexFromEntry(IntPtrConstant(0));
FillFixedArrayWithValue(HOLEY_ELEMENTS, table, start, length,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
return table;
}
TNode<Smi> WeakCollectionsBuiltinsAssembler::CreateIdentityHash(
TNode<Object> key) {
- TNode<ExternalReference> function_addr = ExternalConstant(
- ExternalReference::jsreceiver_create_identity_hash(isolate()));
+ TNode<ExternalReference> function_addr =
+ ExternalConstant(ExternalReference::jsreceiver_create_identity_hash());
TNode<ExternalReference> isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index e9f252cb6a..d6819d8f66 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -32,7 +32,8 @@ namespace internal {
V(CountReset, countReset) \
V(Assert, assert) \
V(Profile, profile) \
- V(ProfileEnd, profileEnd)
+ V(ProfileEnd, profileEnd) \
+ V(TimeLog, timeLog)
namespace {
void ConsoleCall(
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 8e54c4c369..26c97ba681 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -19,17 +19,26 @@ namespace v8 {
namespace internal {
void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallOrConstructVarargs(masm,
BUILTIN_CODE(masm->isolate(), Construct));
}
void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kConstruct,
BUILTIN_CODE(masm->isolate(), Construct));
}
void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kConstruct,
BUILTIN_CODE(masm->isolate(), ConstructFunction));
@@ -77,11 +86,11 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Goto(&cell_done);
BIND(&no_closures);
- StoreMapNoWriteBarrier(feedback_cell, Heap::kOneClosureCellMapRootIndex);
+ StoreMapNoWriteBarrier(feedback_cell, RootIndex::kOneClosureCellMap);
Goto(&cell_done);
BIND(&one_closure);
- StoreMapNoWriteBarrier(feedback_cell, Heap::kManyClosuresCellMapRootIndex);
+ StoreMapNoWriteBarrier(feedback_cell, RootIndex::kManyClosuresCellMap);
Goto(&cell_done);
BIND(&cell_done);
@@ -116,9 +125,9 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
// Initialize the rest of the function.
StoreObjectFieldRoot(result, JSObject::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(result, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
{
// Set function prototype if necessary.
Label done(this), init_prototype(this);
@@ -127,7 +136,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
BIND(&init_prototype);
StoreObjectFieldRoot(result, JSFunction::kPrototypeOrInitialMapOffset,
- Heap::kTheHoleValueRootIndex);
+ RootIndex::kTheHoleValue);
Goto(&done);
BIND(&done);
}
@@ -236,13 +245,13 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
TNode<Context> function_context =
UncheckedCast<Context>(AllocateInNewSpace(size));
- Heap::RootListIndex context_type;
+ RootIndex context_type;
switch (scope_type) {
case EVAL_SCOPE:
- context_type = Heap::kEvalContextMapRootIndex;
+ context_type = RootIndex::kEvalContextMap;
break;
case FUNCTION_SCOPE:
- context_type = Heap::kFunctionContextMapRootIndex;
+ context_type = RootIndex::kFunctionContextMap;
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 7bdc2759c4..b898056658 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -48,10 +48,8 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
if_resultisnotprimitive(this, Label::kDeferred);
GotoIf(TaggedIsSmi(result), &if_resultisprimitive);
Node* result_instance_type = LoadInstanceType(result);
- STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- Branch(Int32LessThanOrEqual(result_instance_type,
- Int32Constant(LAST_PRIMITIVE_TYPE)),
- &if_resultisprimitive, &if_resultisnotprimitive);
+ Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive,
+ &if_resultisnotprimitive);
BIND(&if_resultisprimitive);
{
@@ -108,7 +106,62 @@ TF_BUILTIN(ToName, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Return(ToName(context, input));
+ VARIABLE(var_input, MachineRepresentation::kTagged, input);
+ Label loop(this, &var_input);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Load the current {input} value.
+ Node* input = var_input.value();
+
+ // Dispatch based on the type of the {input.}
+ Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this),
+ if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(input), &if_inputisnumber);
+ Node* input_instance_type = LoadInstanceType(input);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname);
+ GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver);
+ GotoIf(IsHeapNumberInstanceType(input_instance_type), &if_inputisnumber);
+ Branch(IsBigIntInstanceType(input_instance_type), &if_inputisbigint,
+ &if_inputisoddball);
+
+ BIND(&if_inputisbigint);
+ {
+ // We don't have a fast-path for BigInt currently, so just
+ // tail call to the %ToString runtime function here for now.
+ TailCallRuntime(Runtime::kToString, context, input);
+ }
+
+ BIND(&if_inputisname);
+ {
+ // The {input} is already a Name.
+ Return(input);
+ }
+
+ BIND(&if_inputisnumber);
+ {
+ // Convert the String {input} to a Number.
+ TailCallBuiltin(Builtins::kNumberToString, context, input);
+ }
+
+ BIND(&if_inputisoddball);
+ {
+ // Just return the {input}'s string representation.
+ CSA_ASSERT(this, IsOddballInstanceType(input_instance_type));
+ Return(LoadObjectField(input, Oddball::kToStringOffset));
+ }
+
+ BIND(&if_inputisreceiver);
+ {
+ // Convert the JSReceiver {input} to a primitive first,
+ // and then run the loop again with the new {input},
+ // which is then a primitive value.
+ var_input.Bind(CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String,
+ context, input));
+ Goto(&loop);
+ }
+ }
}
TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) {
@@ -205,10 +258,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
// Return the {result} if it is a primitive.
GotoIf(TaggedIsSmi(result), &return_result);
Node* result_instance_type = LoadInstanceType(result);
- STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- GotoIf(Int32LessThanOrEqual(result_instance_type,
- Int32Constant(LAST_PRIMITIVE_TYPE)),
- &return_result);
+ GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result);
}
// Just continue with the next {name} if the {method} is not callable.
@@ -384,9 +434,9 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
Node* js_value = Allocate(JSValue::kSize);
StoreMapNoWriteBarrier(js_value, initial_map);
StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectField(js_value, JSValue::kValueOffset, object);
Return(js_value);
diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h
index 6c755c4d08..4a55a90eef 100644
--- a/deps/v8/src/builtins/builtins-data-view-gen.h
+++ b/deps/v8/src/builtins/builtins-data-view-gen.h
@@ -17,25 +17,17 @@ class DataViewBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state)
: BaseBuiltinsFromDSLAssembler(state) {}
- TNode<Number> LoadDataViewByteOffset(TNode<JSDataView> data_view) {
- return CAST(LoadObjectField(data_view, JSDataView::kByteOffsetOffset));
- }
-
- TNode<Number> LoadDataViewByteLength(TNode<JSDataView> data_view) {
- return CAST(LoadObjectField(data_view, JSDataView::kByteLengthOffset));
- }
-
- TNode<Int32T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) {
+ TNode<Int32T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
return UncheckedCast<Int32T>(
Load(MachineType::Uint8(), data_pointer, offset));
}
- TNode<Int32T> LoadInt8(TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) {
+ TNode<Int32T> LoadInt8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
return UncheckedCast<Int32T>(
Load(MachineType::Int8(), data_pointer, offset));
}
- void StoreWord8(TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset,
+ void StoreWord8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset,
TNode<Word32T> value) {
StoreNoWriteBarrier(MachineRepresentation::kWord8, data_pointer, offset,
value);
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 72ea685982..f40cd0f68e 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -43,51 +43,52 @@ BUILTIN(DataViewConstructor) {
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
// 4. Let offset be ? ToIndex(byteOffset).
- Handle<Object> offset;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, offset,
+ isolate, byte_offset,
Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
+ size_t view_byte_offset = byte_offset->Number();
// 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
// We currently violate the specification at this point. TODO: Fix that.
// 6. Let bufferByteLength be the value of buffer's
// [[ArrayBufferByteLength]] internal slot.
- double const buffer_byte_length = array_buffer->byte_length()->Number();
+ size_t const buffer_byte_length = array_buffer->byte_length();
// 7. If offset > bufferByteLength, throw a RangeError exception.
- if (offset->Number() > buffer_byte_length) {
+ if (view_byte_offset > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
+ isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset));
}
- Handle<Object> view_byte_length;
+ size_t view_byte_length;
if (byte_length->IsUndefined(isolate)) {
// 8. If byteLength is either not present or undefined, then
// a. Let viewByteLength be bufferByteLength - offset.
- view_byte_length =
- isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+ view_byte_length = buffer_byte_length - view_byte_offset;
} else {
// 9. Else,
// a. Let viewByteLength be ? ToIndex(byteLength).
// b. If offset+viewByteLength > bufferByteLength, throw a
// RangeError exception.
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, view_byte_length,
+ isolate, byte_length,
Object::ToIndex(isolate, byte_length,
MessageTemplate::kInvalidDataViewLength));
- if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ if (view_byte_offset + byte_length->Number() > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
}
+ view_byte_length = byte_length->Number();
}
// 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
// "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
// [[ByteLength]], [[ByteOffset]]»).
Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::kZero);
}
@@ -96,10 +97,10 @@ BUILTIN(DataViewConstructor) {
Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
// 12. Set O's [[ByteLength]] internal slot to viewByteLength.
- Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+ Handle<JSDataView>::cast(result)->set_byte_length(view_byte_length);
// 13. Set O's [[ByteOffset]] internal slot to offset.
- Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+ Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
// 14. Return O.
return *result;
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 4d3c7faa53..e0cb199920 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -193,11 +193,11 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
hint_is_invalid(this, Label::kDeferred);
// Fast cases for internalized strings.
- Node* number_string = LoadRoot(Heap::knumber_stringRootIndex);
+ Node* number_string = LoadRoot(RootIndex::knumber_string);
GotoIf(WordEqual(hint, number_string), &hint_is_number);
- Node* default_string = LoadRoot(Heap::kdefault_stringRootIndex);
+ Node* default_string = LoadRoot(RootIndex::kdefault_string);
GotoIf(WordEqual(hint, default_string), &hint_is_string);
- Node* string_string = LoadRoot(Heap::kstring_stringRootIndex);
+ Node* string_string = LoadRoot(RootIndex::kstring_string);
GotoIf(WordEqual(hint, string_string), &hint_is_string);
// Slow-case with actual string comparisons.
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 569a5807e2..91da4d6d7d 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -10,6 +10,10 @@
#include "src/counters.h"
#include "src/dateparser-inl.h"
#include "src/objects-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-date-time-format.h"
+#endif
namespace v8 {
namespace internal {
@@ -835,6 +839,65 @@ BUILTIN(DatePrototypeToTimeString) {
isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
}
+#ifdef V8_INTL_SUPPORT
+// ecma402 #sup-date.prototype.tolocaledatestring
+BUILTIN(DatePrototypeToLocaleDateString) {
+ HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleDateString);
+
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleDateString");
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSDateTimeFormat::ToLocaleDateTime(
+ isolate,
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kDate, // required
+ JSDateTimeFormat::DefaultsOption::kDate, // defaults
+ "dateformatdate")); // service
+}
+
+// ecma402 #sup-date.prototype.tolocalestring
+BUILTIN(DatePrototypeToLocaleString) {
+ HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleString);
+
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleString");
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSDateTimeFormat::ToLocaleDateTime(
+ isolate,
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kAny, // required
+ JSDateTimeFormat::DefaultsOption::kAll, // defaults
+ "dateformatall")); // service
+}
+
+// ecma402 #sup-date.prototype.tolocaletimestring
+BUILTIN(DatePrototypeToLocaleTimeString) {
+ HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleTimeString);
+
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleTimeString");
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSDateTimeFormat::ToLocaleDateTime(
+ isolate,
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kTime, // required
+ JSDateTimeFormat::DefaultsOption::kTime, // defaults
+ "dateformattime")); // service
+}
+#endif // V8_INTL_SUPPORT
+
// ES6 section 20.3.4.43 Date.prototype.toUTCString ( )
BUILTIN(DatePrototypeToUTCString) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 62765b802f..c47fa7b19b 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_DEFINITIONS_H_
#define V8_BUILTINS_BUILTINS_DEFINITIONS_H_
-#include "src/interpreter/bytecodes.h"
+#include "builtins-generated/bytecodes-builtins-list.h"
// include generated header
#include "torque-generated/builtin-definitions-from-dsl.h"
@@ -26,11 +26,13 @@ namespace internal {
// TFH: Handlers in Turbofan, with CodeStub linkage.
// Args: name, interface descriptor
// BCH: Bytecode Handlers, with bytecode dispatch linkage.
-// Args: name
+// Args: name, OperandScale, Bytecode
+// DLH: Deserialize Lazy Handlers, with bytecode dispatch linkage.
+// Args: name, OperandScale
// ASM: Builtin in platform-dependent assembly.
// Args: name
-#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \
/* GC write barrirer */ \
TFC(RecordWrite, RecordWrite, 1) \
\
@@ -127,6 +129,10 @@ namespace internal {
TFC(CompileLazy, JSTrampoline, 1) \
TFC(CompileLazyDeoptimizedCode, JSTrampoline, 1) \
TFC(DeserializeLazy, JSTrampoline, 1) \
+ /* The three lazy bytecode handlers do not declare a bytecode. */ \
+ DLH(DeserializeLazyHandler, interpreter::OperandScale::kSingle) \
+ DLH(DeserializeLazyWideHandler, interpreter::OperandScale::kDouble) \
+ DLH(DeserializeLazyExtraWideHandler, interpreter::OperandScale::kQuadruple) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \
\
@@ -155,8 +161,6 @@ namespace internal {
ASM(ContinueToJavaScriptBuiltin) \
ASM(ContinueToJavaScriptBuiltinWithResult) \
\
- ASM(OnStackReplacement) \
- \
/* API callback handling */ \
API(HandleApiCall) \
API(HandleApiCallAsFunction) \
@@ -204,7 +208,6 @@ namespace internal {
TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
\
/* Handlers */ \
- TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
TFH(KeyedLoadIC_Slow, LoadWithVector) \
TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \
@@ -319,12 +322,11 @@ namespace internal {
TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.slice */ \
TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.splice */ \
- CPP(ArraySplice) \
/* ES6 #sec-array.prototype.unshift */ \
CPP(ArrayUnshift) \
/* Support for Array.from and other array-copying idioms */ \
TFS(CloneFastJSArray, kSource) \
+ TFS(CloneFastJSArrayFillingHoles, kSource) \
TFS(ExtractFastJSArray, kSource, kBegin, kCount) \
/* ES6 #sec-array.prototype.every */ \
TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
@@ -427,12 +429,8 @@ namespace internal {
/* AsyncFunction */ \
TFJ(AsyncFunctionAwaitCaught, 3, kReceiver, kGenerator, kAwaited, \
kOuterPromise) \
- TFJ(AsyncFunctionAwaitCaughtOptimized, 3, kReceiver, kGenerator, kAwaited, \
- kOuterPromise) \
TFJ(AsyncFunctionAwaitUncaught, 3, kReceiver, kGenerator, kAwaited, \
kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaughtOptimized, 3, kReceiver, kGenerator, kAwaited, \
- kOuterPromise) \
TFJ(AsyncFunctionAwaitRejectClosure, 1, kReceiver, kSentError) \
TFJ(AsyncFunctionAwaitResolveClosure, 1, kReceiver, kSentValue) \
TFJ(AsyncFunctionPromiseCreate, 0, kReceiver) \
@@ -466,6 +464,7 @@ namespace internal {
CPP(CallSitePrototypeGetScriptNameOrSourceURL) \
CPP(CallSitePrototypeGetThis) \
CPP(CallSitePrototypeGetTypeName) \
+ CPP(CallSitePrototypeIsAsync) \
CPP(CallSitePrototypeIsConstructor) \
CPP(CallSitePrototypeIsEval) \
CPP(CallSitePrototypeIsNative) \
@@ -493,6 +492,7 @@ namespace internal {
CPP(ConsoleProfile) \
CPP(ConsoleProfileEnd) \
CPP(ConsoleTime) \
+ CPP(ConsoleTimeLog) \
CPP(ConsoleTimeEnd) \
CPP(ConsoleTimeStamp) \
CPP(ConsoleContext) \
@@ -629,10 +629,14 @@ namespace internal {
\
/* ICs */ \
TFH(LoadIC, LoadWithVector) \
+ TFH(LoadIC_Megamorphic, LoadWithVector) \
TFH(LoadIC_Noninlined, LoadWithVector) \
TFH(LoadICTrampoline, Load) \
+ TFH(LoadICTrampoline_Megamorphic, Load) \
TFH(KeyedLoadIC, LoadWithVector) \
+ TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
TFH(KeyedLoadICTrampoline, Load) \
+ TFH(KeyedLoadICTrampoline_Megamorphic, Load) \
TFH(StoreGlobalIC, StoreGlobalWithVector) \
TFH(StoreGlobalICTrampoline, StoreGlobal) \
TFH(StoreIC, StoreWithVector) \
@@ -645,6 +649,13 @@ namespace internal {
TFH(LoadGlobalICTrampoline, LoadGlobal) \
TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \
TFH(CloneObjectIC, CloneObjectWithVector) \
+ TFH(CloneObjectIC_Slow, CloneObjectWithVector) \
+ \
+ /* IterableToList */ \
+ /* ES #sec-iterabletolist */ \
+ TFS(IterableToList, kIterable, kIteratorFn) \
+ TFS(IterableToListWithSymbolLookup, kIterable) \
+ TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \
\
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
@@ -1013,6 +1024,7 @@ namespace internal {
TFJ(AtomicsAnd, 3, kReceiver, kArray, kIndex, kValue) \
TFJ(AtomicsOr, 3, kReceiver, kArray, kIndex, kValue) \
TFJ(AtomicsXor, 3, kReceiver, kArray, kIndex, kValue) \
+ CPP(AtomicsNotify) \
CPP(AtomicsIsLockFree) \
CPP(AtomicsWait) \
CPP(AtomicsWake) \
@@ -1111,6 +1123,7 @@ namespace internal {
/* StringIterator */ \
/* ES6 #sec-%stringiteratorprototype%.next */ \
TFJ(StringIteratorPrototypeNext, 0, kReceiver) \
+ TFS(StringToList, kSource) \
\
/* Symbol */ \
/* ES #sec-symbol-constructor */ \
@@ -1129,7 +1142,6 @@ namespace internal {
TFJ(SymbolPrototypeValueOf, 0, kReceiver) \
\
/* TypedArray */ \
- TFS(IterableToList, kIterable, kIteratorFn) \
TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize, \
kBufferConstructor) \
TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
@@ -1208,11 +1220,11 @@ namespace internal {
/* Wasm */ \
ASM(WasmCompileLazy) \
TFC(WasmAllocateHeapNumber, AllocateHeapNumber, 1) \
- TFC(WasmArgumentsAdaptor, ArgumentAdaptor, 1) \
TFC(WasmCallJavaScript, CallTrampoline, 1) \
TFC(WasmGrowMemory, WasmGrowMemory, 1) \
TFC(WasmStackGuard, NoContext, 1) \
TFC(WasmToNumber, TypeConversion, 1) \
+ TFC(WasmThrow, WasmThrow, 1) \
TFS(ThrowWasmTrapUnreachable) \
TFS(ThrowWasmTrapMemOutOfBounds) \
TFS(ThrowWasmTrapUnalignedAccess) \
@@ -1282,13 +1294,10 @@ namespace internal {
/* See tc39.github.io/proposal-async-iteration/ */ \
/* #sec-%asyncfromsynciteratorprototype%-object) */ \
TFJ(AsyncFromSyncIteratorPrototypeNext, 1, kReceiver, kValue) \
- TFJ(AsyncFromSyncIteratorPrototypeNextOptimized, 1, kReceiver, kValue) \
/* #sec-%asyncfromsynciteratorprototype%.throw */ \
TFJ(AsyncFromSyncIteratorPrototypeThrow, 1, kReceiver, kReason) \
- TFJ(AsyncFromSyncIteratorPrototypeThrowOptimized, 1, kReceiver, kReason) \
/* #sec-%asyncfromsynciteratorprototype%.return */ \
TFJ(AsyncFromSyncIteratorPrototypeReturn, 1, kReceiver, kValue) \
- TFJ(AsyncFromSyncIteratorPrototypeReturnOptimized, 1, kReceiver, kValue) \
/* #sec-async-iterator-value-unwrap-functions */ \
TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \
\
@@ -1305,10 +1314,9 @@ namespace internal {
ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit) \
\
/* String helpers */ \
- TFS(StringAdd_CheckNone_NotTenured, kLeft, kRight) \
- TFS(StringAdd_CheckNone_Tenured, kLeft, kRight) \
- TFS(StringAdd_ConvertLeft_NotTenured, kLeft, kRight) \
- TFS(StringAdd_ConvertRight_NotTenured, kLeft, kRight) \
+ TFS(StringAdd_CheckNone, kLeft, kRight) \
+ TFS(StringAdd_ConvertLeft, kLeft, kRight) \
+ TFS(StringAdd_ConvertRight, kLeft, kRight) \
TFS(SubString, kString, kFrom, kTo) \
\
/* Miscellaneous */ \
@@ -1328,73 +1336,119 @@ namespace internal {
#define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
/* ecma402 #sec-intl.collator */ \
CPP(CollatorConstructor) \
- TFS(StringToLowerCaseIntl, kString) \
- /* ES #sec-string.prototype.tolowercase */ \
- TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \
- /* ES #sec-string.prototype.touppercase */ \
- CPP(StringPrototypeToUpperCaseIntl) \
- /* ES #sec-string.prototype.normalize */ \
- CPP(StringPrototypeNormalizeIntl) \
- /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \
- CPP(NumberFormatPrototypeFormatToParts) \
+ /* ecma 402 #sec-collator-compare-functions*/ \
+ CPP(CollatorInternalCompare) \
+ /* ecma402 #sec-intl.collator.prototype.compare */ \
+ CPP(CollatorPrototypeCompare) \
+ /* ecma402 #sec-intl.collator.supportedlocalesof */ \
+ CPP(CollatorSupportedLocalesOf) \
+ CPP(CollatorPrototypeResolvedOptions) \
+ /* ecma402 #sup-date.prototype.tolocaledatestring */ \
+ CPP(DatePrototypeToLocaleDateString) \
+ /* ecma402 #sup-date.prototype.tolocalestring */ \
+ CPP(DatePrototypeToLocaleString) \
+ /* ecma402 #sup-date.prototype.tolocaletimestring */ \
+ CPP(DatePrototypeToLocaleTimeString) \
+ /* ecma402 #sec-intl.datetimeformat */ \
+ CPP(DateTimeFormatConstructor) \
+ /* ecma402 #sec-datetime-format-functions */ \
+ CPP(DateTimeFormatInternalFormat) \
+ /* ecma402 #sec-intl.datetimeformat.prototype.format */ \
+ CPP(DateTimeFormatPrototypeFormat) \
/* ecma402 #sec-intl.datetimeformat.prototype.formattoparts */ \
CPP(DateTimeFormatPrototypeFormatToParts) \
- /* ecma402 #new proposal */ \
+ /* ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions */ \
+ CPP(DateTimeFormatPrototypeResolvedOptions) \
+ /* ecma402 #sec-intl.datetimeformat.supportedlocalesof */ \
+ CPP(DateTimeFormatSupportedLocalesOf) \
/* ecma402 #sec-intl-listformat-constructor */ \
CPP(ListFormatConstructor) \
- /* ecma402 #sec-intl.listformat.prototype.resolvedoptions */ \
- CPP(ListFormatPrototypeResolvedOptions) \
/* ecma402 #sec-intl-list-format.prototype.format */ \
TFJ(ListFormatPrototypeFormat, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ecma402 #sec-intl-list-format.prototype.formattoparts */ \
TFJ(ListFormatPrototypeFormatToParts, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ecma402 #sec-intl.listformat.prototype.resolvedoptions */ \
+ CPP(ListFormatPrototypeResolvedOptions) \
+ /* ecma402 #sec-intl.ListFormat.supportedlocalesof */ \
+ CPP(ListFormatSupportedLocalesOf) \
/* ecma402 #sec-intl-locale-constructor */ \
CPP(LocaleConstructor) \
- CPP(LocalePrototypeLanguage) \
- CPP(LocalePrototypeScript) \
- CPP(LocalePrototypeRegion) \
CPP(LocalePrototypeBaseName) \
CPP(LocalePrototypeCalendar) \
CPP(LocalePrototypeCaseFirst) \
CPP(LocalePrototypeCollation) \
CPP(LocalePrototypeHourCycle) \
- CPP(LocalePrototypeNumeric) \
- CPP(LocalePrototypeNumberingSystem) \
- CPP(LocalePrototypeToString) \
+ CPP(LocalePrototypeLanguage) \
/* ecma402 #sec-Intl.Locale.prototype.maximize */ \
CPP(LocalePrototypeMaximize) \
/* ecma402 #sec-Intl.Locale.prototype.minimize */ \
CPP(LocalePrototypeMinimize) \
+ CPP(LocalePrototypeNumeric) \
+ CPP(LocalePrototypeNumberingSystem) \
+ CPP(LocalePrototypeRegion) \
+ CPP(LocalePrototypeScript) \
+ CPP(LocalePrototypeToString) \
+ /* ecma402 #sec-intl.numberformat */ \
+ CPP(NumberFormatConstructor) \
/* ecma402 #sec-number-format-functions */ \
CPP(NumberFormatInternalFormatNumber) \
/* ecma402 #sec-intl.numberformat.prototype.format */ \
CPP(NumberFormatPrototypeFormatNumber) \
- /* ecma402 #sec-datetime-format-functions */ \
- CPP(DateTimeFormatInternalFormat) \
- /* ecma402 #sec-intl.datetimeformat.prototype.format */ \
- CPP(DateTimeFormatPrototypeFormat) \
+ /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \
+ CPP(NumberFormatPrototypeFormatToParts) \
+ /* ecma402 #sec-intl.numberformat.prototype.resolvedoptions */ \
+ CPP(NumberFormatPrototypeResolvedOptions) \
+ /* ecma402 #sec-intl.numberformat.supportedlocalesof */ \
+ CPP(NumberFormatSupportedLocalesOf) \
/* ecma402 #sec-intl.pluralrules */ \
CPP(PluralRulesConstructor) \
+ CPP(PluralRulesPrototypeResolvedOptions) \
+ /* ecma402 #sec-intl.pluralrules.prototype.select */ \
+ CPP(PluralRulesPrototypeSelect) \
+ /* ecma402 #sec-intl.pluralrules.supportedlocalesof */ \
+ CPP(PluralRulesSupportedLocalesOf) \
/* ecma402 #sec-intl.RelativeTimeFormat.constructor */ \
CPP(RelativeTimeFormatConstructor) \
- /* ecma402 #sec-intl.RelativeTimeFormat.prototype.resolvedOptions */ \
- CPP(RelativeTimeFormatPrototypeResolvedOptions) \
/* ecma402 #sec-intl.RelativeTimeFormat.prototype.format */ \
CPP(RelativeTimeFormatPrototypeFormat) \
/* ecma402 #sec-intl.RelativeTimeFormat.prototype.formatToParts */ \
CPP(RelativeTimeFormatPrototypeFormatToParts) \
+ /* ecma402 #sec-intl.RelativeTimeFormat.prototype.resolvedOptions */ \
+ CPP(RelativeTimeFormatPrototypeResolvedOptions) \
+ /* ecma402 #sec-intl.RelativeTimeFormat.supportedlocalesof */ \
+ CPP(RelativeTimeFormatSupportedLocalesOf) \
+ /* ES #sec-string.prototype.normalize */ \
+ CPP(StringPrototypeNormalizeIntl) \
/* ecma402 #sup-string.prototype.tolocalelowercase */ \
CPP(StringPrototypeToLocaleLowerCase) \
/* ecma402 #sup-string.prototype.tolocaleuppercase */ \
CPP(StringPrototypeToLocaleUpperCase) \
- /* ecma402 #sec-intl.collator.prototype.compare */ \
- CPP(CollatorPrototypeCompare) \
- /* ecma 402 #sec-collator-compare-functions*/ \
- CPP(CollatorInternalCompare) \
- CPP(BreakIteratorInternalAdoptText) \
- CPP(BreakIteratorPrototypeAdoptText)
+ /* ES #sec-string.prototype.tolowercase */ \
+ TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \
+ /* ES #sec-string.prototype.touppercase */ \
+ CPP(StringPrototypeToUpperCaseIntl) \
+ TFS(StringToLowerCaseIntl, kString) \
+ /* ecma402 #sec-Intl.Segmenter */ \
+ CPP(SegmenterConstructor) \
+ /* ecma402 #sec-Intl.Segmenter.prototype.resolvedOptions */ \
+ CPP(SegmenterPrototypeResolvedOptions) \
+ /* ecma402 #sec-Intl.Segmenter.supportedLocalesOf */ \
+ CPP(SegmenterSupportedLocalesOf) \
+ CPP(V8BreakIteratorConstructor) \
+ CPP(V8BreakIteratorInternalAdoptText) \
+ CPP(V8BreakIteratorInternalBreakType) \
+ CPP(V8BreakIteratorInternalCurrent) \
+ CPP(V8BreakIteratorInternalFirst) \
+ CPP(V8BreakIteratorInternalNext) \
+ CPP(V8BreakIteratorPrototypeAdoptText) \
+ CPP(V8BreakIteratorPrototypeBreakType) \
+ CPP(V8BreakIteratorPrototypeCurrent) \
+ CPP(V8BreakIteratorPrototypeFirst) \
+ CPP(V8BreakIteratorPrototypeNext) \
+ CPP(V8BreakIteratorPrototypeResolvedOptions) \
+ CPP(V8BreakIteratorSupportedLocalesOf)
#else
#define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
/* no-op fallback version */ \
@@ -1409,16 +1463,10 @@ namespace internal {
CPP(StringPrototypeToUpperCase)
#endif // V8_INTL_SUPPORT
-#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
-#define BUILTIN_LIST_BYTECODE_HANDLERS(BCH) BYTECODE_LIST(BCH)
-#else
-#define BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
-#endif // V8_EMBEDDED_BYTECODE_HANDLERS
-
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
+#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM) \
+ BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \
+ BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
// The exception thrown in the following builtins are caught
@@ -1426,14 +1474,9 @@ namespace internal {
#define BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(V) \
V(AsyncFromSyncIteratorPrototypeNext) \
V(AsyncFromSyncIteratorPrototypeReturn) \
- V(AsyncFromSyncIteratorPrototypeNextOptimized) \
- V(AsyncFromSyncIteratorPrototypeThrowOptimized) \
- V(AsyncFromSyncIteratorPrototypeReturnOptimized) \
V(AsyncFromSyncIteratorPrototypeThrow) \
V(AsyncFunctionAwaitCaught) \
- V(AsyncFunctionAwaitCaughtOptimized) \
V(AsyncFunctionAwaitUncaught) \
- V(AsyncFunctionAwaitUncaughtOptimized) \
V(AsyncGeneratorResolve) \
V(AsyncGeneratorAwaitCaught) \
V(AsyncGeneratorAwaitUncaught) \
@@ -1449,11 +1492,11 @@ namespace internal {
#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
FOREACH_WASM_TRAPREASON(VTRAP) \
V(WasmAllocateHeapNumber) \
- V(WasmArgumentsAdaptor) \
V(WasmCallJavaScript) \
V(WasmGrowMemory) \
V(WasmStackGuard) \
V(WasmToNumber) \
+ V(WasmThrow) \
V(DoubleToI)
// The exception thrown in the following builtins are caught internally and will
@@ -1464,23 +1507,27 @@ namespace internal {
#define BUILTIN_LIST_C(V) \
BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_A(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ V)
#define BUILTIN_LIST_TFS(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN)
-#define BUILTIN_LIST_TFJ(V) \
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#define BUILTIN_LIST_TFJ(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN)
-#define BUILTIN_LIST_TFC(V) \
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#define BUILTIN_LIST_TFC(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index 97b85bc295..2961a61f63 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -43,7 +43,8 @@ namespace internal {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR,
DEFINE_TFC_INTERFACE_DESCRIPTOR, DEFINE_TFS_INTERFACE_DESCRIPTOR,
- DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN,
+ IGNORE_BUILTIN)
#undef DEFINE_TFJ_INTERFACE_DESCRIPTOR
#undef DEFINE_TFC_INTERFACE_DESCRIPTOR
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index eb4ace31e4..2f3c876852 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -62,7 +62,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
const int length_index = JSFunction::kLengthDescriptorIndex;
TNode<Name> maybe_length = CAST(LoadWeakFixedArrayElement(
descriptors, DescriptorArray::ToKeyIndex(length_index)));
- GotoIf(WordNotEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)),
+ GotoIf(WordNotEqual(maybe_length, LoadRoot(RootIndex::klength_string)),
&slow);
TNode<Object> maybe_length_accessor = CAST(LoadWeakFixedArrayElement(
@@ -74,8 +74,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
const int name_index = JSFunction::kNameDescriptorIndex;
TNode<Name> maybe_name = CAST(LoadWeakFixedArrayElement(
descriptors, DescriptorArray::ToKeyIndex(name_index)));
- GotoIf(WordNotEqual(maybe_name, LoadRoot(Heap::kname_stringRootIndex)),
- &slow);
+ GotoIf(WordNotEqual(maybe_name, LoadRoot(RootIndex::kname_string)), &slow);
TNode<Object> maybe_name_accessor = CAST(LoadWeakFixedArrayElement(
descriptors, DescriptorArray::ToValueIndex(name_index)));
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index c97bd8a587..43a3853715 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -41,11 +41,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
IncrementalStringBuilder builder(isolate);
builder.AppendCharacter('(');
builder.AppendCString(token);
- if (FLAG_harmony_function_tostring) {
- builder.AppendCString(" anonymous(");
- } else {
- builder.AppendCharacter('(');
- }
+ builder.AppendCString(" anonymous(");
bool parenthesis_in_arg_string = false;
if (argc > 1) {
for (int i = 1; i < argc; ++i) {
@@ -55,31 +51,10 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
isolate, param, Object::ToString(isolate, args.at(i)), Object);
param = String::Flatten(isolate, param);
builder.AppendString(param);
- if (!FLAG_harmony_function_tostring) {
- // If the formal parameters string include ) - an illegal
- // character - it may make the combined function expression
- // compile. We avoid this problem by checking for this early on.
- DisallowHeapAllocation no_gc; // Ensure vectors stay valid.
- String::FlatContent param_content = param->GetFlatContent();
- for (int i = 0, length = param->length(); i < length; ++i) {
- if (param_content.Get(i) == ')') {
- parenthesis_in_arg_string = true;
- break;
- }
- }
- }
}
- if (!FLAG_harmony_function_tostring) {
- // If the formal parameters include an unbalanced block comment, the
- // function must be rejected. Since JavaScript does not allow nested
- // comments we can include a trailing block comment to catch this.
- builder.AppendCString("\n/*``*/");
- }
- }
- if (FLAG_harmony_function_tostring) {
- builder.AppendCharacter('\n');
- parameters_end_pos = builder.Length();
}
+ builder.AppendCharacter('\n');
+ parameters_end_pos = builder.Length();
builder.AppendCString(") {\n");
if (argc > 0) {
Handle<String> body;
@@ -303,7 +278,7 @@ BUILTIN(FunctionPrototypeToString) {
}
// With the revised toString behavior, all callable objects are valid
// receivers for this method.
- if (FLAG_harmony_function_tostring && receiver->IsJSReceiver() &&
+ if (receiver->IsJSReceiver() &&
JSReceiver::cast(*receiver)->map()->is_callable()) {
return ReadOnlyRoots(isolate).function_native_code_string();
}
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 29422ab72c..8b5dc182cf 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -28,7 +28,7 @@ TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) {
Node* name = Parameter(Descriptor::kName);
Node* context = Parameter(Descriptor::kContext);
- TailCallRuntime(Runtime::kKeyedGetProperty, context, receiver, name);
+ TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
}
void Builtins::Generate_KeyedStoreIC_Megamorphic(
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index bbfabc7a0d..94d75a8f32 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -21,13 +21,16 @@ namespace internal {
}
IC_BUILTIN(LoadIC)
+IC_BUILTIN(LoadIC_Megamorphic)
IC_BUILTIN(LoadIC_Noninlined)
IC_BUILTIN(LoadIC_Uninitialized)
-IC_BUILTIN(KeyedLoadIC)
IC_BUILTIN(LoadICTrampoline)
-IC_BUILTIN(KeyedLoadICTrampoline)
+IC_BUILTIN(LoadICTrampoline_Megamorphic)
+IC_BUILTIN(KeyedLoadIC)
IC_BUILTIN(KeyedLoadIC_Megamorphic)
IC_BUILTIN(KeyedLoadIC_PolymorphicName)
+IC_BUILTIN(KeyedLoadICTrampoline)
+IC_BUILTIN(KeyedLoadICTrampoline_Megamorphic)
IC_BUILTIN(StoreGlobalIC)
IC_BUILTIN(StoreGlobalICTrampoline)
IC_BUILTIN(StoreIC)
@@ -36,6 +39,7 @@ IC_BUILTIN(KeyedStoreIC)
IC_BUILTIN(KeyedStoreICTrampoline)
IC_BUILTIN(StoreInArrayLiteralIC)
IC_BUILTIN(CloneObjectIC)
+IC_BUILTIN(CloneObjectIC_Slow)
IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 7ff88c5a53..44a18099bf 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -24,10 +24,16 @@ using TNode = compiler::TNode<T>;
// Interrupt and stack checks.
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
masm->TailCallRuntime(Runtime::kInterrupt);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
masm->TailCallRuntime(Runtime::kStackGuard);
}
@@ -350,18 +356,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
};
TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
- Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
- Node* slot = Parameter(Descriptor::kSlot);
- Node* isolate = Parameter(Descriptor::kIsolate);
- Node* remembered_set = Parameter(Descriptor::kRememberedSet);
- Node* fp_mode = Parameter(Descriptor::kFPMode);
-
- Node* value = Load(MachineType::Pointer(), slot);
-
Label generational_wb(this);
Label incremental_wb(this);
Label exit(this);
+ Node* remembered_set = Parameter(Descriptor::kRememberedSet);
Branch(ShouldEmitRememberSet(remembered_set), &generational_wb,
&incremental_wb);
@@ -369,40 +368,58 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
Label test_old_to_new_flags(this);
Label store_buffer_exit(this), store_buffer_incremental_wb(this);
+
// When incremental marking is not on, we skip cross generation pointer
// checking here, because there are checks for
// `kPointersFromHereAreInterestingMask` and
// `kPointersToHereAreInterestingMask` in
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
// which serves as the cross generation checking.
+ Node* slot = Parameter(Descriptor::kSlot);
Branch(IsMarking(), &test_old_to_new_flags, &store_buffer_exit);
BIND(&test_old_to_new_flags);
{
+ Node* value = Load(MachineType::Pointer(), slot);
+
// TODO(albertnetymk): Try to cache the page flag for value and object,
// instead of calling IsPageFlagSet each time.
Node* value_in_new_space =
IsPageFlagSet(value, MemoryChunk::kIsInNewSpaceMask);
GotoIfNot(value_in_new_space, &incremental_wb);
+ Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
Node* object_in_new_space =
IsPageFlagSet(object, MemoryChunk::kIsInNewSpaceMask);
- GotoIf(object_in_new_space, &incremental_wb);
-
- Goto(&store_buffer_incremental_wb);
+ Branch(object_in_new_space, &incremental_wb,
+ &store_buffer_incremental_wb);
}
BIND(&store_buffer_exit);
- { InsertToStoreBufferAndGoto(isolate, slot, fp_mode, &exit); }
+ {
+ Node* isolate_constant =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ Node* fp_mode = Parameter(Descriptor::kFPMode);
+ InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit);
+ }
BIND(&store_buffer_incremental_wb);
- { InsertToStoreBufferAndGoto(isolate, slot, fp_mode, &incremental_wb); }
+ {
+ Node* isolate_constant =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ Node* fp_mode = Parameter(Descriptor::kFPMode);
+ InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode,
+ &incremental_wb);
+ }
}
BIND(&incremental_wb);
{
Label call_incremental_wb(this);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* value = Load(MachineType::Pointer(), slot);
+
// There are two cases we need to call incremental write barrier.
// 1) value_is_white
GotoIf(IsWhite(value), &call_incremental_wb);
@@ -411,20 +428,23 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
// is_compacting = true when is_marking = true
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
&exit);
- GotoIf(
- IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
- &exit);
- Goto(&call_incremental_wb);
+ Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ Branch(
+ IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
+ &exit, &call_incremental_wb);
BIND(&call_incremental_wb);
{
Node* function = ExternalConstant(
ExternalReference::incremental_marking_record_write_function());
+ Node* isolate_constant =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ Node* fp_mode = Parameter(Descriptor::kFPMode);
CallCFunction3WithCallerSavedRegistersMode(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, object, slot, isolate, fp_mode,
- &exit);
+ MachineType::Pointer(), function, object, slot, isolate_constant,
+ fp_mode, &exit);
}
}
@@ -454,7 +474,7 @@ class DeletePropertyBaseAssembler : public AccessorAssembler {
dont_delete);
// Overwrite the entry itself (see NameDictionary::SetEntry).
TNode<HeapObject> filler = TheHoleConstant();
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kTheHoleValueRootIndex));
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kTheHoleValue));
StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER);
StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
SKIP_WRITE_BARRIER);
@@ -609,11 +629,14 @@ class InternalBuiltinsAssembler : public CodeStubAssembler {
explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- TNode<IntPtrT> GetPendingMicrotaskCount();
- void SetPendingMicrotaskCount(TNode<IntPtrT> count);
-
- TNode<FixedArray> GetMicrotaskQueue();
- void SetMicrotaskQueue(TNode<FixedArray> queue);
+ TNode<MicrotaskQueue> GetDefaultMicrotaskQueue();
+ TNode<IntPtrT> GetPendingMicrotaskCount(
+ TNode<MicrotaskQueue> microtask_queue);
+ void SetPendingMicrotaskCount(TNode<MicrotaskQueue> microtask_queue,
+ TNode<IntPtrT> new_num_tasks);
+ TNode<FixedArray> GetQueuedMicrotasks(TNode<MicrotaskQueue> microtask_queue);
+ void SetQueuedMicrotasks(TNode<MicrotaskQueue> microtask_queue,
+ TNode<FixedArray> new_queue);
TNode<Context> GetCurrentContext();
void SetCurrentContext(TNode<Context> context);
@@ -700,37 +723,34 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, InternalBuiltinsAssembler) {
GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::BUILTIN_EXIT);
}
-TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount() {
- auto ref = ExternalReference::pending_microtask_count_address(isolate());
- if (kIntSize == 8) {
- return TNode<IntPtrT>::UncheckedCast(
- Load(MachineType::Int64(), ExternalConstant(ref)));
- } else {
- Node* const value = Load(MachineType::Int32(), ExternalConstant(ref));
- return ChangeInt32ToIntPtr(value);
- }
+TNode<MicrotaskQueue> InternalBuiltinsAssembler::GetDefaultMicrotaskQueue() {
+ return TNode<MicrotaskQueue>::UncheckedCast(
+ LoadRoot(RootIndex::kDefaultMicrotaskQueue));
}
-void InternalBuiltinsAssembler::SetPendingMicrotaskCount(TNode<IntPtrT> count) {
- auto ref = ExternalReference::pending_microtask_count_address(isolate());
- auto rep = kIntSize == 8 ? MachineRepresentation::kWord64
- : MachineRepresentation::kWord32;
- if (kIntSize == 4 && kPointerSize == 8) {
- Node* const truncated_count =
- TruncateInt64ToInt32(TNode<Int64T>::UncheckedCast(count));
- StoreNoWriteBarrier(rep, ExternalConstant(ref), truncated_count);
- } else {
- StoreNoWriteBarrier(rep, ExternalConstant(ref), count);
- }
+TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount(
+ TNode<MicrotaskQueue> microtask_queue) {
+ TNode<IntPtrT> result = LoadAndUntagObjectField(
+ microtask_queue, MicrotaskQueue::kPendingMicrotaskCountOffset);
+ return result;
}
-TNode<FixedArray> InternalBuiltinsAssembler::GetMicrotaskQueue() {
- return TNode<FixedArray>::UncheckedCast(
- LoadRoot(Heap::kMicrotaskQueueRootIndex));
+void InternalBuiltinsAssembler::SetPendingMicrotaskCount(
+ TNode<MicrotaskQueue> microtask_queue, TNode<IntPtrT> new_num_tasks) {
+ StoreObjectField(microtask_queue,
+ MicrotaskQueue::kPendingMicrotaskCountOffset,
+ SmiFromIntPtr(new_num_tasks));
}
-void InternalBuiltinsAssembler::SetMicrotaskQueue(TNode<FixedArray> queue) {
- StoreRoot(Heap::kMicrotaskQueueRootIndex, queue);
+TNode<FixedArray> InternalBuiltinsAssembler::GetQueuedMicrotasks(
+ TNode<MicrotaskQueue> microtask_queue) {
+ return LoadObjectField<FixedArray>(microtask_queue,
+ MicrotaskQueue::kQueueOffset);
+}
+
+void InternalBuiltinsAssembler::SetQueuedMicrotasks(
+ TNode<MicrotaskQueue> microtask_queue, TNode<FixedArray> new_queue) {
+ StoreObjectField(microtask_queue, MicrotaskQueue::kQueueOffset, new_queue);
}
TNode<Context> InternalBuiltinsAssembler::GetCurrentContext() {
@@ -819,9 +839,10 @@ void InternalBuiltinsAssembler::RunPromiseHook(
TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
Node* microtask = Parameter(Descriptor::kMicrotask);
- TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
+ TNode<MicrotaskQueue> microtask_queue = GetDefaultMicrotaskQueue();
+ TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(microtask_queue);
TNode<IntPtrT> new_num_tasks = IntPtrAdd(num_tasks, IntPtrConstant(1));
- TNode<FixedArray> queue = GetMicrotaskQueue();
+ TNode<FixedArray> queue = GetQueuedMicrotasks(microtask_queue);
TNode<IntPtrT> queue_length = LoadAndUntagFixedArrayBaseLength(queue);
Label if_append(this), if_grow(this), done(this);
@@ -851,8 +872,8 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
StoreFixedArrayElement(new_queue, num_tasks, microtask,
SKIP_WRITE_BARRIER);
FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
- new_queue_length, Heap::kUndefinedValueRootIndex);
- SetMicrotaskQueue(new_queue);
+ new_queue_length, RootIndex::kUndefinedValue);
+ SetQueuedMicrotasks(microtask_queue, new_queue);
Goto(&done);
}
@@ -865,8 +886,8 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks);
StoreFixedArrayElement(new_queue, num_tasks, microtask);
FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
- new_queue_length, Heap::kUndefinedValueRootIndex);
- SetMicrotaskQueue(new_queue);
+ new_queue_length, RootIndex::kUndefinedValue);
+ SetQueuedMicrotasks(microtask_queue, new_queue);
Goto(&done);
}
}
@@ -878,13 +899,14 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
}
BIND(&done);
- SetPendingMicrotaskCount(new_num_tasks);
+ SetPendingMicrotaskCount(microtask_queue, new_num_tasks);
Return(UndefinedConstant());
}
TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
// Load the current context from the isolate.
TNode<Context> current_context = GetCurrentContext();
+ TNode<MicrotaskQueue> microtask_queue = GetDefaultMicrotaskQueue();
Label init_queue_loop(this);
Goto(&init_queue_loop);
@@ -893,17 +915,17 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
TVARIABLE(IntPtrT, index, IntPtrConstant(0));
Label loop(this, &index), loop_next(this);
- TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
+ TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(microtask_queue);
ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant());
- TNode<FixedArray> queue = GetMicrotaskQueue();
+ TNode<FixedArray> queue = GetQueuedMicrotasks(microtask_queue);
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
LoadAndUntagFixedArrayBaseLength(queue), num_tasks));
CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
- SetPendingMicrotaskCount(IntPtrConstant(0));
- SetMicrotaskQueue(EmptyFixedArrayConstant());
+ SetQueuedMicrotasks(microtask_queue, EmptyFixedArrayConstant());
+ SetPendingMicrotaskCount(microtask_queue, IntPtrConstant(0));
Goto(&loop);
BIND(&loop);
@@ -1099,20 +1121,20 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
}
TF_BUILTIN(AllocateInNewSpace, CodeStubAssembler) {
- TNode<Int32T> requested_size =
- UncheckedCast<Int32T>(Parameter(Descriptor::kRequestedSize));
+ TNode<IntPtrT> requested_size =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
TailCallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(),
- SmiFromInt32(requested_size));
+ SmiFromIntPtr(requested_size));
}
TF_BUILTIN(AllocateInOldSpace, CodeStubAssembler) {
- TNode<Int32T> requested_size =
- UncheckedCast<Int32T>(Parameter(Descriptor::kRequestedSize));
+ TNode<IntPtrT> requested_size =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
int flags = AllocateTargetSpace::encode(OLD_SPACE);
TailCallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
- SmiFromInt32(requested_size), SmiConstant(flags));
+ SmiFromIntPtr(requested_size), SmiConstant(flags));
}
TF_BUILTIN(Abort, CodeStubAssembler) {
@@ -1178,6 +1200,9 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
}
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
// CallApiGetterStub only exists as a stub to avoid duplicating code between
// here and code-stubs-<arch>.cc. For example, see CallApiFunctionAndReturn.
// Here we abuse the instantiated stub to generate code.
@@ -1186,6 +1211,9 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
}
void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
// The common variants of CallApiCallbackStub (i.e. all that are embedded into
// the snapshot) are generated as builtins. The rest remain available as code
// stubs. Here we abuse the instantiated stub to generate code and avoid
@@ -1196,6 +1224,9 @@ void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) {
}
void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
// The common variants of CallApiCallbackStub (i.e. all that are embedded into
// the snapshot) are generated as builtins. The rest remain available as code
// stubs. Here we abuse the instantiated stub to generate code and avoid
@@ -1207,27 +1238,23 @@ void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) {
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
- Label call_runtime(this, Label::kDeferred), return_undefined(this), end(this);
-
Node* object = Parameter(Descriptor::kObject);
Node* key = Parameter(Descriptor::kKey);
Node* context = Parameter(Descriptor::kContext);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_notfound(this), if_proxy(this, Label::kDeferred),
+ if_slow(this, Label::kDeferred);
CodeStubAssembler::LookupInHolder lookup_property_in_holder =
- [=, &var_result, &end](Node* receiver, Node* holder, Node* holder_map,
- Node* holder_instance_type, Node* unique_name,
- Label* next_holder, Label* if_bailout) {
+ [=](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name, Label* next_holder,
+ Label* if_bailout) {
VARIABLE(var_value, MachineRepresentation::kTagged);
Label if_found(this);
TryGetOwnProperty(context, receiver, holder, holder_map,
holder_instance_type, unique_name, &if_found,
&var_value, next_holder, if_bailout);
BIND(&if_found);
- {
- var_result.Bind(var_value.value());
- Goto(&end);
- }
+ Return(var_value.value());
};
CodeStubAssembler::LookupInHolder lookup_element_in_holder =
@@ -1240,23 +1267,26 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
};
TryPrototypeChainLookup(object, key, lookup_property_in_holder,
- lookup_element_in_holder, &return_undefined,
- &call_runtime);
+ lookup_element_in_holder, &if_notfound, &if_slow,
+ &if_proxy);
- BIND(&return_undefined);
- {
- var_result.Bind(UndefinedConstant());
- Goto(&end);
- }
+ BIND(&if_notfound);
+ Return(UndefinedConstant());
+
+ BIND(&if_slow);
+ TailCallRuntime(Runtime::kGetProperty, context, object, key);
- BIND(&call_runtime);
+ BIND(&if_proxy);
{
- var_result.Bind(CallRuntime(Runtime::kGetProperty, context, object, key));
- Goto(&end);
+ // Convert the {key} to a Name first.
+ Node* name = CallBuiltin(Builtins::kToName, context, key);
+
+ // The {object} is a JSProxy instance, look up the {name} on it, passing
+ // {object} both as receiver and holder. If {name} is absent we can safely
+ // return undefined from here.
+ TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name, object,
+ SmiConstant(OnNonExistent::kReturnUndefined));
}
-
- BIND(&end);
- Return(var_result.value());
}
// ES6 [[Set]] operation.
diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc
index f0d5160330..fa1684c54b 100644
--- a/deps/v8/src/builtins/builtins-interpreter-gen.cc
+++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc
@@ -10,12 +10,18 @@ namespace v8 {
namespace internal {
void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kNullOrUndefined,
InterpreterPushArgsMode::kOther);
@@ -23,24 +29,36 @@ void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread(
MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsThenConstruct(MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushArgsThenConstructWithFinalSpread(
MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsThenConstructArrayFunction(
MacroAssembler* masm) {
+#ifdef V8_TARGET_ARCH_IA32
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kArrayFunction);
}
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 77e2e81a6c..49405141c1 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -41,8 +41,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
// Early exit on empty strings.
- TNode<Smi> const length = LoadStringLengthAsSmi(string);
- GotoIf(SmiEqual(length, SmiConstant(0)), &return_string);
+ TNode<Uint32T> const length = LoadStringLengthAsWord32(string);
+ GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string);
// Unpack strings if possible, and bail to runtime unless we get a one-byte
// flat string.
@@ -60,7 +60,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Node* const dst = AllocateSeqOneByteString(context, length);
const int kMaxShortStringLength = 24; // Determined empirically.
- GotoIf(SmiGreaterThan(length, SmiConstant(kMaxShortStringLength)), &call_c);
+ GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)),
+ &call_c);
{
Node* const dst_ptr = PointerToSeqStringData(dst);
@@ -69,7 +70,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Node* const start_address = to_direct.PointerToData(&call_c);
TNode<IntPtrT> const end_address =
- Signed(IntPtrAdd(start_address, SmiUntag(length)));
+ Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length)));
Node* const to_lower_table_addr =
ExternalConstant(ExternalReference::intl_to_latin1_lower_table());
@@ -177,10 +178,8 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context,
BIND(&has_list);
{
// 5. Let x be ? IterableToList(list).
- IteratorBuiltinsAssembler iterator_assembler(state());
- // TODO(adamk): Consider exposing IterableToList as a buitin and calling
- // it from here instead of inlining the operation.
- TNode<JSArray> x = iterator_assembler.IterableToList(context, list);
+ TNode<Object> x =
+ CallBuiltin(Builtins::kIterableToListWithSymbolLookup, context, list);
// 6. Return ? FormatList(lf, x).
args.PopAndReturn(CallRuntime(format_func_id, context, list_format, x));
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 1d54d0da80..01c8a9ddcd 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -10,7 +10,6 @@
#include <list>
#include <memory>
-#include "src/builtins/builtins-intl.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/date.h"
@@ -19,11 +18,16 @@
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-break-iterator-inl.h"
#include "src/objects/js-collator-inl.h"
+#include "src/objects/js-date-time-format-inl.h"
#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-number-format-inl.h"
#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segmenter-inl.h"
+#include "src/property-descriptor.h"
#include "unicode/datefmt.h"
#include "unicode/decimfmt.h"
@@ -32,12 +36,10 @@
#include "unicode/listformatter.h"
#include "unicode/normalizer2.h"
#include "unicode/numfmt.h"
-#include "unicode/reldatefmt.h"
#include "unicode/smpdtfmt.h"
#include "unicode/udat.h"
#include "unicode/ufieldpositer.h"
#include "unicode/unistr.h"
-#include "unicode/ureldatefmt.h"
#include "unicode/ustring.h"
namespace v8 {
@@ -128,329 +130,30 @@ BUILTIN(StringPrototypeNormalizeIntl) {
result.length())));
}
-namespace {
-
-// The list comes from third_party/icu/source/i18n/unicode/unum.h.
-// They're mapped to NumberFormat part types mentioned throughout
-// https://tc39.github.io/ecma402/#sec-partitionnumberpattern .
-Handle<String> IcuNumberFieldIdToNumberType(int32_t field_id, double number,
- Isolate* isolate) {
- switch (static_cast<UNumberFormatFields>(field_id)) {
- case UNUM_INTEGER_FIELD:
- if (std::isfinite(number)) return isolate->factory()->integer_string();
- if (std::isnan(number)) return isolate->factory()->nan_string();
- return isolate->factory()->infinity_string();
- case UNUM_FRACTION_FIELD:
- return isolate->factory()->fraction_string();
- case UNUM_DECIMAL_SEPARATOR_FIELD:
- return isolate->factory()->decimal_string();
- case UNUM_GROUPING_SEPARATOR_FIELD:
- return isolate->factory()->group_string();
- case UNUM_CURRENCY_FIELD:
- return isolate->factory()->currency_string();
- case UNUM_PERCENT_FIELD:
- return isolate->factory()->percentSign_string();
- case UNUM_SIGN_FIELD:
- return number < 0 ? isolate->factory()->minusSign_string()
- : isolate->factory()->plusSign_string();
-
- case UNUM_EXPONENT_SYMBOL_FIELD:
- case UNUM_EXPONENT_SIGN_FIELD:
- case UNUM_EXPONENT_FIELD:
- // We should never get these because we're not using any scientific
- // formatter.
- UNREACHABLE();
- return Handle<String>();
-
- case UNUM_PERMILL_FIELD:
- // We're not creating any permill formatter, and it's not even clear how
- // that would be possible with the ICU API.
- UNREACHABLE();
- return Handle<String>();
-
- default:
- UNREACHABLE();
- return Handle<String>();
- }
-}
-
-// The list comes from third_party/icu/source/i18n/unicode/udat.h.
-// They're mapped to DateTimeFormat components listed at
-// https://tc39.github.io/ecma402/#sec-datetimeformat-abstracts .
-
-Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
- switch (field_id) {
- case -1:
- return isolate->factory()->literal_string();
- case UDAT_YEAR_FIELD:
- case UDAT_EXTENDED_YEAR_FIELD:
- case UDAT_YEAR_NAME_FIELD:
- return isolate->factory()->year_string();
- case UDAT_MONTH_FIELD:
- case UDAT_STANDALONE_MONTH_FIELD:
- return isolate->factory()->month_string();
- case UDAT_DATE_FIELD:
- return isolate->factory()->day_string();
- case UDAT_HOUR_OF_DAY1_FIELD:
- case UDAT_HOUR_OF_DAY0_FIELD:
- case UDAT_HOUR1_FIELD:
- case UDAT_HOUR0_FIELD:
- return isolate->factory()->hour_string();
- case UDAT_MINUTE_FIELD:
- return isolate->factory()->minute_string();
- case UDAT_SECOND_FIELD:
- return isolate->factory()->second_string();
- case UDAT_DAY_OF_WEEK_FIELD:
- case UDAT_DOW_LOCAL_FIELD:
- case UDAT_STANDALONE_DAY_FIELD:
- return isolate->factory()->weekday_string();
- case UDAT_AM_PM_FIELD:
- return isolate->factory()->dayperiod_string();
- case UDAT_TIMEZONE_FIELD:
- case UDAT_TIMEZONE_RFC_FIELD:
- case UDAT_TIMEZONE_GENERIC_FIELD:
- case UDAT_TIMEZONE_SPECIAL_FIELD:
- case UDAT_TIMEZONE_LOCALIZED_GMT_OFFSET_FIELD:
- case UDAT_TIMEZONE_ISO_FIELD:
- case UDAT_TIMEZONE_ISO_LOCAL_FIELD:
- return isolate->factory()->timeZoneName_string();
- case UDAT_ERA_FIELD:
- return isolate->factory()->era_string();
- default:
- // Other UDAT_*_FIELD's cannot show up because there is no way to specify
- // them via options of Intl.DateTimeFormat.
- UNREACHABLE();
- // To prevent MSVC from issuing C4715 warning.
- return Handle<String>();
- }
-}
-
-bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
- const NumberFormatSpan& b) {
- // Regions that start earlier should be encountered earlier.
- if (a.begin_pos < b.begin_pos) return true;
- if (a.begin_pos > b.begin_pos) return false;
- // For regions that start in the same place, regions that last longer should
- // be encountered earlier.
- if (a.end_pos < b.end_pos) return false;
- if (a.end_pos > b.end_pos) return true;
- // For regions that are exactly the same, one of them must be the "literal"
- // backdrop we added, which has a field_id of -1, so consider higher field_ids
- // to be later.
- return a.field_id < b.field_id;
-}
-
-MaybeHandle<Object> FormatNumberToParts(Isolate* isolate,
- icu::NumberFormat* fmt, double number) {
- Factory* factory = isolate->factory();
-
- icu::UnicodeString formatted;
- icu::FieldPositionIterator fp_iter;
- UErrorCode status = U_ZERO_ERROR;
- fmt->format(number, formatted, &fp_iter, status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
- }
-
- Handle<JSArray> result = factory->NewJSArray(0);
- int32_t length = formatted.length();
- if (length == 0) return result;
-
- std::vector<NumberFormatSpan> regions;
- // Add a "literal" backdrop for the entire string. This will be used if no
- // other region covers some part of the formatted string. It's possible
- // there's another field with exactly the same begin and end as this backdrop,
- // in which case the backdrop's field_id of -1 will give it lower priority.
- regions.push_back(NumberFormatSpan(-1, 0, formatted.length()));
-
- {
- icu::FieldPosition fp;
- while (fp_iter.next(fp)) {
- regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(),
- fp.getEndIndex()));
- }
- }
-
- std::vector<NumberFormatSpan> parts = FlattenRegionsToParts(&regions);
-
- int index = 0;
- for (auto it = parts.begin(); it < parts.end(); it++) {
- NumberFormatSpan part = *it;
- Handle<String> field_type_string =
- part.field_id == -1
- ? isolate->factory()->literal_string()
- : IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
- Handle<String> substring;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, substring,
- Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos),
- Object);
- Intl::AddElement(isolate, result, index, field_type_string, substring);
- ++index;
- }
- JSObject::ValidateElements(*result);
-
- return result;
-}
-
-MaybeHandle<Object> FormatDateToParts(Isolate* isolate, icu::DateFormat* format,
- double date_value) {
- Factory* factory = isolate->factory();
-
- icu::UnicodeString formatted;
- icu::FieldPositionIterator fp_iter;
- icu::FieldPosition fp;
- UErrorCode status = U_ZERO_ERROR;
- format->format(date_value, formatted, &fp_iter, status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
- }
+BUILTIN(V8BreakIteratorSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
- Handle<JSArray> result = factory->NewJSArray(0);
- int32_t length = formatted.length();
- if (length == 0) return result;
-
- int index = 0;
- int32_t previous_end_pos = 0;
- Handle<String> substring;
- while (fp_iter.next(fp)) {
- int32_t begin_pos = fp.getBeginIndex();
- int32_t end_pos = fp.getEndIndex();
-
- if (previous_end_pos < begin_pos) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, substring,
- Intl::ToString(isolate, formatted, previous_end_pos, begin_pos),
- Object);
- Intl::AddElement(isolate, result, index,
- IcuDateFieldIdToDateType(-1, isolate), substring);
- ++index;
- }
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, substring,
- Intl::ToString(isolate, formatted, begin_pos, end_pos), Object);
- Intl::AddElement(isolate, result, index,
- IcuDateFieldIdToDateType(fp.getField(), isolate),
- substring);
- previous_end_pos = end_pos;
- ++index;
- }
- if (previous_end_pos < length) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, substring,
- Intl::ToString(isolate, formatted, previous_end_pos, length), Object);
- Intl::AddElement(isolate, result, index,
- IcuDateFieldIdToDateType(-1, isolate), substring);
- }
- JSObject::ValidateElements(*result);
- return result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::SupportedLocalesOf(isolate, ICUService::kBreakIterator,
+ locales, options));
}
-} // namespace
-
-// Flattens a list of possibly-overlapping "regions" to a list of
-// non-overlapping "parts". At least one of the input regions must span the
-// entire space of possible indexes. The regions parameter will sorted in-place
-// according to some criteria; this is done for performance to avoid copying the
-// input.
-std::vector<NumberFormatSpan> FlattenRegionsToParts(
- std::vector<NumberFormatSpan>* regions) {
- // The intention of this algorithm is that it's used to translate ICU "fields"
- // to JavaScript "parts" of a formatted string. Each ICU field and JavaScript
- // part has an integer field_id, which corresponds to something like "grouping
- // separator", "fraction", or "percent sign", and has a begin and end
- // position. Here's a diagram of:
-
- // var nf = new Intl.NumberFormat(['de'], {style:'currency',currency:'EUR'});
- // nf.formatToParts(123456.78);
-
- // : 6
- // input regions: 0000000211 7
- // ('-' means -1): ------------
- // formatted string: "123.456,78 €"
- // output parts: 0006000211-7
-
- // To illustrate the requirements of this algorithm, here's a contrived and
- // convoluted example of inputs and expected outputs:
-
- // : 4
- // : 22 33 3
- // : 11111 22
- // input regions: 0000000 111
- // : ------------
- // formatted string: "abcdefghijkl"
- // output parts: 0221340--231
- // (The characters in the formatted string are irrelevant to this function.)
-
- // We arrange the overlapping input regions like a mountain range where
- // smaller regions are "on top" of larger regions, and we output a birds-eye
- // view of the mountains, so that smaller regions take priority over larger
- // regions.
- std::sort(regions->begin(), regions->end(), cmp_NumberFormatSpan);
- std::vector<size_t> overlapping_region_index_stack;
- // At least one item in regions must be a region spanning the entire string.
- // Due to the sorting above, the first item in the vector will be one of them.
- overlapping_region_index_stack.push_back(0);
- NumberFormatSpan top_region = regions->at(0);
- size_t region_iterator = 1;
- int32_t entire_size = top_region.end_pos;
-
- std::vector<NumberFormatSpan> out_parts;
-
- // The "climber" is a cursor that advances from left to right climbing "up"
- // and "down" the mountains. Whenever the climber moves to the right, that
- // represents an item of output.
- int32_t climber = 0;
- while (climber < entire_size) {
- int32_t next_region_begin_pos;
- if (region_iterator < regions->size()) {
- next_region_begin_pos = regions->at(region_iterator).begin_pos;
- } else {
- // finish off the rest of the input by proceeding to the end.
- next_region_begin_pos = entire_size;
- }
+BUILTIN(NumberFormatSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
- if (climber < next_region_begin_pos) {
- while (top_region.end_pos < next_region_begin_pos) {
- if (climber < top_region.end_pos) {
- // step down
- out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
- top_region.end_pos));
- climber = top_region.end_pos;
- } else {
- // drop down
- }
- overlapping_region_index_stack.pop_back();
- top_region = regions->at(overlapping_region_index_stack.back());
- }
- if (climber < next_region_begin_pos) {
- // cross a plateau/mesa/valley
- out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
- next_region_begin_pos));
- climber = next_region_begin_pos;
- }
- }
- if (region_iterator < regions->size()) {
- overlapping_region_index_stack.push_back(region_iterator++);
- top_region = regions->at(overlapping_region_index_stack.back());
- }
- }
- return out_parts;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::SupportedLocalesOf(isolate, ICUService::kNumberFormat,
+ locales, options));
}
BUILTIN(NumberFormatPrototypeFormatToParts) {
const char* const method = "Intl.NumberFormat.prototype.formatToParts";
HandleScope handle_scope(isolate);
- CHECK_RECEIVER(JSObject, number_format_holder, method);
-
- if (!Intl::IsObjectOfType(isolate, number_format_holder,
- Intl::Type::kNumberFormat)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->NewStringFromAsciiChecked(method),
- number_format_holder));
- }
+ CHECK_RECEIVER(JSNumberFormat, number_format, method);
Handle<Object> x;
if (args.length() >= 2) {
@@ -460,12 +163,33 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
x = isolate->factory()->nan_value();
}
- icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(number_format_holder);
- CHECK_NOT_NULL(number_format);
+ RETURN_RESULT_OR_FAILURE(isolate, JSNumberFormat::FormatToParts(
+ isolate, number_format, x->Number()));
+}
+
+BUILTIN(DateTimeFormatPrototypeResolvedOptions) {
+ const char* const method = "Intl.DateTimeFormat.prototype.resolvedOptions";
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSReceiver, format_holder, method);
+
+ // 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
+ Handle<JSDateTimeFormat> date_time_format;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, date_time_format,
+ JSDateTimeFormat::UnwrapDateTimeFormat(isolate, format_holder));
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSDateTimeFormat::ResolvedOptions(isolate, date_time_format));
+}
+
+BUILTIN(DateTimeFormatSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, FormatNumberToParts(isolate, number_format, x->Number()));
+ isolate, Intl::SupportedLocalesOf(isolate, ICUService::kDateFormat,
+ locales, options));
}
BUILTIN(DateTimeFormatPrototypeFormatToParts) {
@@ -474,13 +198,14 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
CHECK_RECEIVER(JSObject, date_format_holder, method);
Factory* factory = isolate->factory();
- if (!Intl::IsObjectOfType(isolate, date_format_holder,
- Intl::Type::kDateTimeFormat)) {
+ if (!date_format_holder->IsJSDateTimeFormat()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
factory->NewStringFromAsciiChecked(method),
date_format_holder));
}
+ Handle<JSDateTimeFormat> dtf =
+ Handle<JSDateTimeFormat>::cast(date_format_holder);
Handle<Object> x = args.atOrUndefined(isolate, 1);
if (x->IsUndefined(isolate)) {
@@ -496,12 +221,142 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
}
- icu::SimpleDateFormat* date_format =
- DateFormat::UnpackDateFormat(date_format_holder);
- CHECK_NOT_NULL(date_format);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSDateTimeFormat::FormatToParts(isolate, dtf, date_value));
+}
+
+namespace {
+Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
+ Handle<JSObject> object,
+ Builtins::Name builtin_id, int len) {
+ Handle<NativeContext> native_context(isolate->context()->native_context(),
+ isolate);
+ Handle<Context> context = isolate->factory()->NewBuiltinContext(
+ native_context,
+ static_cast<int>(Intl::BoundFunctionContextSlot::kLength));
+
+ context->set(static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction),
+ *object);
+
+ Handle<SharedFunctionInfo> info =
+ isolate->factory()->NewSharedFunctionInfoForBuiltin(
+ isolate->factory()->empty_string(), builtin_id, kNormalFunction);
+ info->set_internal_formal_parameter_count(len);
+ info->set_length(len);
+
+ Handle<Map> map = isolate->strict_function_without_prototype_map();
+
+ Handle<JSFunction> new_bound_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+ return new_bound_function;
+}
+
+/**
+ * Common code shared between DateTimeFormatConstructor and
+ * NumberFormatConstrutor
+ */
+template <class T>
+Object* FormatConstructor(BuiltinArguments args, Isolate* isolate,
+ Handle<Object> constructor, const char* method) {
+ Handle<JSReceiver> new_target;
+ // 1. If NewTarget is undefined, let newTarget be the active
+ // function object, else let newTarget be NewTarget.
+ if (args.new_target()->IsUndefined(isolate)) {
+ new_target = args.target();
+ } else {
+ new_target = Handle<JSReceiver>::cast(args.new_target());
+ }
+
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ // 2. Let format be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%<T>Prototype%", ...).
+
+ Handle<JSObject> format_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, format_obj,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<T> format = Handle<T>::cast(format_obj);
+
+ // 3. Perform ? Initialize<T>(Format, locales, options).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, format, T::Initialize(isolate, format, locales, options));
+ // 4. Let this be the this value.
+ Handle<Object> receiver = args.receiver();
+
+ // 5. If NewTarget is undefined and ? InstanceofOperator(this, %<T>%)
+ // is true, then
+ //
+ // Look up the intrinsic value that has been stored on the context.
+ // Call the instanceof function
+ Handle<Object> is_instance_of_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, is_instance_of_obj,
+ Object::InstanceOf(isolate, receiver, constructor));
+
+ // Get the boolean value of the result
+ bool is_instance_of = is_instance_of_obj->BooleanValue(isolate);
+
+ if (args.new_target()->IsUndefined(isolate) && is_instance_of) {
+ if (!receiver->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(method),
+ receiver));
+ }
+ Handle<JSReceiver> rec = Handle<JSReceiver>::cast(receiver);
+ // a. Perform ? DefinePropertyOrThrow(this,
+ // %Intl%.[[FallbackSymbol]], PropertyDescriptor{ [[Value]]: format,
+ // [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }).
+ PropertyDescriptor desc;
+ desc.set_value(format);
+ desc.set_writable(false);
+ desc.set_enumerable(false);
+ desc.set_configurable(false);
+ Maybe<bool> success = JSReceiver::DefineOwnProperty(
+ isolate, rec, isolate->factory()->intl_fallback_symbol(), &desc,
+ kThrowOnError);
+ MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
+ CHECK(success.FromJust());
+ // b. b. Return this.
+ return *receiver;
+ }
+ // 6. Return format.
+ return *format;
+}
+
+} // namespace
+
+BUILTIN(NumberFormatConstructor) {
+ HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberFormat);
+
+ return FormatConstructor<JSNumberFormat>(
+ args, isolate, isolate->intl_number_format_function(),
+ "Intl.NumberFormat");
+}
+
+BUILTIN(NumberFormatPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ const char* const method = "Intl.NumberFormat.prototype.resolvedOptions";
+
+ // 1. Let nf be the this value.
+ // 2. If Type(nf) is not Object, throw a TypeError exception.
+ CHECK_RECEIVER(JSReceiver, number_format_holder, method);
+
+ // 3. Let nf be ? UnwrapNumberFormat(nf)
+ Handle<JSNumberFormat> number_format;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, number_format,
+ JSNumberFormat::UnwrapNumberFormat(isolate, number_format_holder));
- RETURN_RESULT_OR_FAILURE(isolate,
- FormatDateToParts(isolate, date_format, date_value));
+ return *JSNumberFormat::ResolvedOptions(isolate, number_format);
}
BUILTIN(NumberFormatPrototypeFormatNumber) {
@@ -513,17 +368,12 @@ BUILTIN(NumberFormatPrototypeFormatNumber) {
CHECK_RECEIVER(JSReceiver, receiver, method);
// 3. Let nf be ? UnwrapNumberFormat(nf).
- Handle<JSObject> number_format_holder;
+ Handle<JSNumberFormat> number_format;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, number_format_holder,
- NumberFormat::Unwrap(isolate, receiver, method));
+ isolate, number_format,
+ JSNumberFormat::UnwrapNumberFormat(isolate, receiver));
- DCHECK(Intl::IsObjectOfType(isolate, number_format_holder,
- Intl::Type::kNumberFormat));
-
- Handle<Object> bound_format = Handle<Object>(
- number_format_holder->GetEmbedderField(NumberFormat::kBoundFormatIndex),
- isolate);
+ Handle<Object> bound_format(number_format->bound_format(), isolate);
// 4. If nf.[[BoundFormat]] is undefined, then
if (!bound_format->IsUndefined(isolate)) {
@@ -532,29 +382,11 @@ BUILTIN(NumberFormatPrototypeFormatNumber) {
return *bound_format;
}
- Handle<NativeContext> native_context(isolate->context()->native_context(),
- isolate);
-
- Handle<Context> context = isolate->factory()->NewBuiltinContext(
- native_context, NumberFormat::ContextSlot::kLength);
-
- // 4. b. Set F.[[NumberFormat]] to nf.
- context->set(NumberFormat::ContextSlot::kNumberFormat, *number_format_holder);
-
- Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
- native_context->number_format_internal_format_number_shared_fun(),
- isolate);
-
- Handle<Map> map = isolate->strict_function_without_prototype_map();
-
- // 4. a. Let F be a new built-in function object as defined in
- // Number Format Functions (11.1.4).
- Handle<JSFunction> new_bound_format_function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+ Handle<JSFunction> new_bound_format_function = CreateBoundFunction(
+ isolate, number_format, Builtins::kNumberFormatInternalFormatNumber, 1);
// 4. c. Set nf.[[BoundFormat]] to F.
- number_format_holder->SetEmbedderField(NumberFormat::kBoundFormatIndex,
- *new_bound_format_function);
+ number_format->set_bound_format(*new_bound_format_function);
// 5. Return nf.[[BoundFormat]].
return *new_bound_format_function;
@@ -566,14 +398,12 @@ BUILTIN(NumberFormatInternalFormatNumber) {
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
// 1. Let nf be F.[[NumberFormat]].
- Handle<JSObject> number_format_holder = Handle<JSObject>(
- JSObject::cast(context->get(NumberFormat::ContextSlot::kNumberFormat)),
- isolate);
-
// 2. Assert: Type(nf) is Object and nf has an
// [[InitializedNumberFormat]] internal slot.
- DCHECK(Intl::IsObjectOfType(isolate, number_format_holder,
- Intl::Type::kNumberFormat));
+ Handle<JSNumberFormat> number_format = Handle<JSNumberFormat>(
+ JSNumberFormat::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
+ isolate);
// 3. If value is not provided, let value be undefined.
Handle<Object> value = args.atOrUndefined(isolate, 1);
@@ -590,8 +420,18 @@ BUILTIN(NumberFormatInternalFormatNumber) {
double number = number_obj->Number();
// Return FormatNumber(nf, x).
- RETURN_RESULT_OR_FAILURE(isolate, NumberFormat::FormatNumber(
- isolate, number_format_holder, number));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSNumberFormat::FormatNumber(isolate, number_format, number));
+}
+
+BUILTIN(DateTimeFormatConstructor) {
+ HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormat);
+
+ return FormatConstructor<JSDateTimeFormat>(
+ args, isolate, isolate->intl_date_time_format_function(),
+ "Intl.DateTimeFormat");
}
BUILTIN(DateTimeFormatPrototypeFormat) {
@@ -603,16 +443,12 @@ BUILTIN(DateTimeFormatPrototypeFormat) {
CHECK_RECEIVER(JSReceiver, receiver, method);
// 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
- Handle<JSObject> date_format_holder;
+ Handle<JSDateTimeFormat> format;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, date_format_holder,
- DateFormat::Unwrap(isolate, receiver, method));
- DCHECK(Intl::IsObjectOfType(isolate, date_format_holder,
- Intl::Type::kDateTimeFormat));
+ isolate, format,
+ JSDateTimeFormat::UnwrapDateTimeFormat(isolate, receiver));
- Handle<Object> bound_format = Handle<Object>(
- date_format_holder->GetEmbedderField(DateFormat::kBoundFormatIndex),
- isolate);
+ Handle<Object> bound_format = Handle<Object>(format->bound_format(), isolate);
// 4. If dtf.[[BoundFormat]] is undefined, then
if (!bound_format->IsUndefined(isolate)) {
@@ -621,26 +457,11 @@ BUILTIN(DateTimeFormatPrototypeFormat) {
return *bound_format;
}
- Handle<NativeContext> native_context(isolate->context()->native_context(),
- isolate);
- Handle<Context> context = isolate->factory()->NewBuiltinContext(
- native_context, DateFormat::ContextSlot::kLength);
-
- // 4.b. Set F.[[DateTimeFormat]] to dtf.
- context->set(DateFormat::ContextSlot::kDateFormat, *date_format_holder);
-
- Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
- native_context->date_format_internal_format_shared_fun(), isolate);
- Handle<Map> map = isolate->strict_function_without_prototype_map();
-
- // 4.a. Let F be a new built-in function object as defined in DateTime Format
- // Functions (12.1.5).
- Handle<JSFunction> new_bound_format_function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+ Handle<JSFunction> new_bound_format_function = CreateBoundFunction(
+ isolate, format, Builtins::kDateTimeFormatInternalFormat, 1);
// 4.c. Set dtf.[[BoundFormat]] to F.
- date_format_holder->SetEmbedderField(DateFormat::kBoundFormatIndex,
- *new_bound_format_function);
+ format->set_bound_format(*new_bound_format_function);
// 5. Return dtf.[[BoundFormat]].
return *new_bound_format_function;
@@ -651,23 +472,24 @@ BUILTIN(DateTimeFormatInternalFormat) {
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
// 1. Let dtf be F.[[DateTimeFormat]].
- Handle<JSObject> date_format_holder = Handle<JSObject>(
- JSObject::cast(context->get(DateFormat::ContextSlot::kDateFormat)),
- isolate);
-
// 2. Assert: Type(dtf) is Object and dtf has an [[InitializedDateTimeFormat]]
// internal slot.
- DCHECK(Intl::IsObjectOfType(isolate, date_format_holder,
- Intl::Type::kDateTimeFormat));
+ Handle<JSDateTimeFormat> date_format_holder = Handle<JSDateTimeFormat>(
+ JSDateTimeFormat::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
+ isolate);
Handle<Object> date = args.atOrUndefined(isolate, 1);
- RETURN_RESULT_OR_FAILURE(
- isolate, DateFormat::DateTimeFormat(isolate, date_format_holder, date));
+ RETURN_RESULT_OR_FAILURE(isolate, JSDateTimeFormat::DateTimeFormat(
+ isolate, date_format_holder, date));
}
BUILTIN(ListFormatConstructor) {
HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kListFormat);
+
// 1. If NewTarget is undefined, throw a TypeError exception.
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -682,8 +504,9 @@ BUILTIN(ListFormatConstructor) {
Handle<JSObject> result;
// 2. Let listFormat be OrdinaryCreateFromConstructor(NewTarget,
// "%ListFormatPrototype%").
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
Handle<JSListFormat> format = Handle<JSListFormat>::cast(result);
format->set_flags(0);
@@ -691,8 +514,8 @@ BUILTIN(ListFormatConstructor) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
// 3. Return InitializeListFormat(listFormat, locales, options).
- RETURN_RESULT_OR_FAILURE(isolate, JSListFormat::InitializeListFormat(
- isolate, format, locales, options));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSListFormat::Initialize(isolate, format, locales, options));
}
BUILTIN(ListFormatPrototypeResolvedOptions) {
@@ -702,42 +525,62 @@ BUILTIN(ListFormatPrototypeResolvedOptions) {
return *JSListFormat::ResolvedOptions(isolate, format_holder);
}
+BUILTIN(ListFormatSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::SupportedLocalesOf(isolate, ICUService::kListFormatter,
+ locales, options));
+}
+
namespace {
MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<JSReceiver> new_target,
Handle<Object> tag, Handle<Object> options) {
- Handle<JSObject> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- JSObject::New(constructor, new_target), JSLocale);
-
- // First parameter is a locale, as a string/object. Can't be empty.
+ Handle<JSObject> locale;
+ // 6. Let locale be ? OrdinaryCreateFromConstructor(NewTarget,
+ // %LocalePrototype%, internalSlotsList).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, locale,
+ JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
+ JSLocale);
+
+ // 7. If Type(tag) is not String or Object, throw a TypeError exception.
if (!tag->IsString() && !tag->IsJSReceiver()) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kLocaleNotEmpty),
JSLocale);
}
Handle<String> locale_string;
+ // 8. If Type(tag) is Object and tag has an [[InitializedLocale]] internal
+ // slot, then
if (tag->IsJSLocale() && Handle<JSLocale>::cast(tag)->locale()->IsString()) {
+ // a. Let tag be tag.[[Locale]].
locale_string =
Handle<String>(Handle<JSLocale>::cast(tag)->locale(), isolate);
- } else {
+ } else { // 9. Else,
+ // a. Let tag be ? ToString(tag).
ASSIGN_RETURN_ON_EXCEPTION(isolate, locale_string,
Object::ToString(isolate, tag), JSLocale);
}
Handle<JSReceiver> options_object;
- if (options->IsNullOrUndefined(isolate)) {
- // Make empty options bag.
+ // 10. If options is undefined, then
+ if (options->IsUndefined(isolate)) {
+ // a. Let options be ! ObjectCreate(null).
options_object = isolate->factory()->NewJSObjectWithNullProto();
- } else {
+ } else { // 11. Else
+ // a. Let options be ? ToObject(options).
ASSIGN_RETURN_ON_EXCEPTION(isolate, options_object,
Object::ToObject(isolate, options), JSLocale);
}
- return JSLocale::InitializeLocale(isolate, Handle<JSLocale>::cast(result),
- locale_string, options_object);
+ return JSLocale::Initialize(isolate, Handle<JSLocale>::cast(locale),
+ locale_string, options_object);
}
} // namespace
@@ -745,6 +588,9 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
// Intl.Locale implementation
BUILTIN(LocaleConstructor) {
HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kLocale);
+
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
@@ -786,186 +632,17 @@ BUILTIN(LocalePrototypeMinimize) {
isolate->factory()->NewJSObjectWithNullProto()));
}
-namespace {
-
-MaybeHandle<JSArray> GenerateRelativeTimeFormatParts(
- Isolate* isolate, icu::UnicodeString formatted,
- icu::UnicodeString integer_part, Handle<String> unit) {
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(0);
- int32_t found = formatted.indexOf(integer_part);
-
- Handle<String> substring;
- if (found < 0) {
- // Cannot find the integer_part in the formatted.
- // Return [{'type': 'literal', 'value': formatted}]
- ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
- Intl::ToString(isolate, formatted), JSArray);
- Intl::AddElement(isolate, array,
- 0, // index
- factory->literal_string(), // field_type_string
- substring);
- } else {
- // Found the formatted integer in the result.
- int index = 0;
-
- // array.push({
- // 'type': 'literal',
- // 'value': formatted.substring(0, found)})
- if (found > 0) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
- Intl::ToString(isolate, formatted, 0, found),
- JSArray);
- Intl::AddElement(isolate, array, index++,
- factory->literal_string(), // field_type_string
- substring);
- }
-
- // array.push({
- // 'type': 'integer',
- // 'value': formatted.substring(found, found + integer_part.length),
- // 'unit': unit})
- ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
- Intl::ToString(isolate, formatted, found,
- found + integer_part.length()),
- JSArray);
- Intl::AddElement(isolate, array, index++,
- factory->integer_string(), // field_type_string
- substring, factory->unit_string(), unit);
-
- // array.push({
- // 'type': 'literal',
- // 'value': formatted.substring(
- // found + integer_part.length, formatted.length)})
- if (found + integer_part.length() < formatted.length()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, substring,
- Intl::ToString(isolate, formatted, found + integer_part.length(),
- formatted.length()),
- JSArray);
- Intl::AddElement(isolate, array, index,
- factory->literal_string(), // field_type_string
- substring);
- }
- }
- return array;
-}
-
-bool GetURelativeDateTimeUnit(Handle<String> unit,
- URelativeDateTimeUnit* unit_enum) {
- std::unique_ptr<char[]> unit_str = unit->ToCString();
- if ((strcmp("second", unit_str.get()) == 0) ||
- (strcmp("seconds", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_SECOND;
- } else if ((strcmp("minute", unit_str.get()) == 0) ||
- (strcmp("minutes", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_MINUTE;
- } else if ((strcmp("hour", unit_str.get()) == 0) ||
- (strcmp("hours", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_HOUR;
- } else if ((strcmp("day", unit_str.get()) == 0) ||
- (strcmp("days", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_DAY;
- } else if ((strcmp("week", unit_str.get()) == 0) ||
- (strcmp("weeks", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_WEEK;
- } else if ((strcmp("month", unit_str.get()) == 0) ||
- (strcmp("months", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_MONTH;
- } else if ((strcmp("quarter", unit_str.get()) == 0) ||
- (strcmp("quarters", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_QUARTER;
- } else if ((strcmp("year", unit_str.get()) == 0) ||
- (strcmp("years", unit_str.get()) == 0)) {
- *unit_enum = UDAT_REL_UNIT_YEAR;
- } else {
- return false;
- }
- return true;
-}
-
-MaybeHandle<Object> RelativeTimeFormatPrototypeFormatCommon(
- BuiltinArguments args, Isolate* isolate,
- Handle<JSRelativeTimeFormat> format_holder, const char* func_name,
- bool to_parts) {
- Factory* factory = isolate->factory();
- Handle<Object> value_obj = args.atOrUndefined(isolate, 1);
- Handle<Object> unit_obj = args.atOrUndefined(isolate, 2);
-
- // 3. Let value be ? ToNumber(value).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
- Object::ToNumber(isolate, value_obj), Object);
- double number = value->Number();
- // 4. Let unit be ? ToString(unit).
- Handle<String> unit;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, unit, Object::ToString(isolate, unit_obj),
- Object);
-
- // 4. If isFinite(value) is false, then throw a RangeError exception.
- if (!std::isfinite(number)) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kNotFiniteNumber,
- isolate->factory()->NewStringFromAsciiChecked(func_name)),
- Object);
- }
-
- icu::RelativeDateTimeFormatter* formatter =
- JSRelativeTimeFormat::UnpackFormatter(format_holder);
- CHECK_NOT_NULL(formatter);
-
- URelativeDateTimeUnit unit_enum;
- if (!GetURelativeDateTimeUnit(unit, &unit_enum)) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kInvalidUnit,
- isolate->factory()->NewStringFromAsciiChecked(func_name),
- unit),
- Object);
- }
-
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString formatted;
- if (unit_enum == UDAT_REL_UNIT_QUARTER) {
- // ICU have not yet implement UDAT_REL_UNIT_QUARTER.
- } else {
- if (format_holder->numeric() == JSRelativeTimeFormat::Numeric::ALWAYS) {
- formatter->formatNumeric(number, unit_enum, formatted, status);
- } else {
- DCHECK_EQ(JSRelativeTimeFormat::Numeric::AUTO, format_holder->numeric());
- formatter->format(number, unit_enum, formatted, status);
- }
- }
-
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
- }
-
- if (to_parts) {
- icu::UnicodeString integer;
- icu::FieldPosition pos;
- formatter->getNumberFormat().format(std::abs(number), integer, pos, status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError),
- Object);
- }
-
- Handle<JSArray> elements;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, elements,
- GenerateRelativeTimeFormatParts(isolate, formatted, integer, unit),
- Object);
- return elements;
- }
+BUILTIN(RelativeTimeFormatSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
- return factory->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(formatted.getBuffer()),
- formatted.length()));
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Intl::SupportedLocalesOf(isolate, ICUService::kRelativeDateTimeFormatter,
+ locales, options));
}
-} // namespace
-
BUILTIN(RelativeTimeFormatPrototypeFormat) {
HandleScope scope(isolate);
// 1. Let relativeTimeFormat be the this value.
@@ -974,9 +651,12 @@ BUILTIN(RelativeTimeFormatPrototypeFormat) {
// true, throw a TypeError exception.
CHECK_RECEIVER(JSRelativeTimeFormat, format_holder,
"Intl.RelativeTimeFormat.prototype.format");
- RETURN_RESULT_OR_FAILURE(isolate,
- RelativeTimeFormatPrototypeFormatCommon(
- args, isolate, format_holder, "format", false));
+ Handle<Object> value_obj = args.atOrUndefined(isolate, 1);
+ Handle<Object> unit_obj = args.atOrUndefined(isolate, 2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSRelativeTimeFormat::Format(isolate, value_obj, unit_obj,
+ format_holder, "format", false));
}
BUILTIN(RelativeTimeFormatPrototypeFormatToParts) {
@@ -987,9 +667,11 @@ BUILTIN(RelativeTimeFormatPrototypeFormatToParts) {
// true, throw a TypeError exception.
CHECK_RECEIVER(JSRelativeTimeFormat, format_holder,
"Intl.RelativeTimeFormat.prototype.formatToParts");
- RETURN_RESULT_OR_FAILURE(
- isolate, RelativeTimeFormatPrototypeFormatCommon(
- args, isolate, format_holder, "formatToParts", true));
+ Handle<Object> value_obj = args.atOrUndefined(isolate, 1);
+ Handle<Object> unit_obj = args.atOrUndefined(isolate, 2);
+ RETURN_RESULT_OR_FAILURE(isolate, JSRelativeTimeFormat::Format(
+ isolate, value_obj, unit_obj,
+ format_holder, "formatToParts", true));
}
// Locale getters.
@@ -1033,7 +715,7 @@ BUILTIN(LocalePrototypeCaseFirst) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.caseFirst");
- return locale_holder->case_first();
+ return *(locale_holder->CaseFirstAsString());
}
BUILTIN(LocalePrototypeCollation) {
@@ -1047,14 +729,23 @@ BUILTIN(LocalePrototypeHourCycle) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.hourCycle");
- return locale_holder->hour_cycle();
+ return *(locale_holder->HourCycleAsString());
}
BUILTIN(LocalePrototypeNumeric) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.numeric");
- return locale_holder->numeric();
+ switch (locale_holder->numeric()) {
+ case JSLocale::Numeric::TRUE_VALUE:
+ return *(isolate->factory()->true_value());
+ case JSLocale::Numeric::FALSE_VALUE:
+ return *(isolate->factory()->false_value());
+ case JSLocale::Numeric::NOTSET:
+ return *(isolate->factory()->undefined_value());
+ case JSLocale::Numeric::COUNT:
+ UNREACHABLE();
+ }
}
BUILTIN(LocalePrototypeNumberingSystem) {
@@ -1074,6 +765,9 @@ BUILTIN(LocalePrototypeToString) {
BUILTIN(RelativeTimeFormatConstructor) {
HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kRelativeTimeFormat);
+
// 1. If NewTarget is undefined, throw a TypeError exception.
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -1089,8 +783,9 @@ BUILTIN(RelativeTimeFormatConstructor) {
// 2. Let relativeTimeFormat be
// ! OrdinaryCreateFromConstructor(NewTarget,
// "%RelativeTimeFormatPrototype%").
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
Handle<JSRelativeTimeFormat> format =
Handle<JSRelativeTimeFormat>::cast(result);
format->set_flags(0);
@@ -1100,9 +795,8 @@ BUILTIN(RelativeTimeFormatConstructor) {
// 3. Return ? InitializeRelativeTimeFormat(relativeTimeFormat, locales,
// options).
- RETURN_RESULT_OR_FAILURE(isolate,
- JSRelativeTimeFormat::InitializeRelativeTimeFormat(
- isolate, format, locales, options));
+ RETURN_RESULT_OR_FAILURE(isolate, JSRelativeTimeFormat::Initialize(
+ isolate, format, locales, options));
}
BUILTIN(RelativeTimeFormatPrototypeResolvedOptions) {
@@ -1114,7 +808,11 @@ BUILTIN(RelativeTimeFormatPrototypeResolvedOptions) {
BUILTIN(StringPrototypeToLocaleLowerCase) {
HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringToLocaleLowerCase);
+
TO_THIS_STRING(string, "String.prototype.toLocaleLowerCase");
+
RETURN_RESULT_OR_FAILURE(
isolate, Intl::StringLocaleConvertCase(isolate, string, false,
args.atOrUndefined(isolate, 1)));
@@ -1122,7 +820,11 @@ BUILTIN(StringPrototypeToLocaleLowerCase) {
BUILTIN(StringPrototypeToLocaleUpperCase) {
HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringToLocaleUpperCase);
+
TO_THIS_STRING(string, "String.prototype.toLocaleUpperCase");
+
RETURN_RESULT_OR_FAILURE(
isolate, Intl::StringLocaleConvertCase(isolate, string, true,
args.atOrUndefined(isolate, 1)));
@@ -1131,6 +833,8 @@ BUILTIN(StringPrototypeToLocaleUpperCase) {
BUILTIN(PluralRulesConstructor) {
HandleScope scope(isolate);
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kPluralRules);
+
// 1. If NewTarget is undefined, throw a TypeError exception.
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -1152,19 +856,61 @@ BUILTIN(PluralRulesConstructor) {
// [[MinimumFractionDigits]], [[MaximumFractionDigits]],
// [[MinimumSignificantDigits]], [[MaximumSignificantDigits]] »).
Handle<JSObject> plural_rules_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, plural_rules_obj,
- JSObject::New(target, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, plural_rules_obj,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
Handle<JSPluralRules> plural_rules =
Handle<JSPluralRules>::cast(plural_rules_obj);
// 3. Return ? InitializePluralRules(pluralRules, locales, options).
RETURN_RESULT_OR_FAILURE(
- isolate, JSPluralRules::InitializePluralRules(isolate, plural_rules,
- locales, options));
+ isolate,
+ JSPluralRules::Initialize(isolate, plural_rules, locales, options));
+}
+
+BUILTIN(PluralRulesPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSPluralRules, plural_rules_holder,
+ "Intl.PluralRules.prototype.resolvedOptions");
+ return *JSPluralRules::ResolvedOptions(isolate, plural_rules_holder);
+}
+
+BUILTIN(PluralRulesPrototypeSelect) {
+ HandleScope scope(isolate);
+
+ // 1. Let pr be the this value.
+ // 2. If Type(pr) is not Object, throw a TypeError exception.
+ // 3. If pr does not have an [[InitializedPluralRules]] internal slot, throw a
+ // TypeError exception.
+ CHECK_RECEIVER(JSPluralRules, plural_rules,
+ "Intl.PluralRules.prototype.select");
+
+ // 4. Let n be ? ToNumber(value).
+ Handle<Object> number = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number,
+ Object::ToNumber(isolate, number));
+ double number_double = number->Number();
+
+ // 5. Return ? ResolvePlural(pr, n).
+ RETURN_RESULT_OR_FAILURE(isolate, JSPluralRules::ResolvePlural(
+ isolate, plural_rules, number_double));
+}
+
+BUILTIN(PluralRulesSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::SupportedLocalesOf(isolate, ICUService::kPluralRules,
+ locales, options));
}
BUILTIN(CollatorConstructor) {
HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kCollator);
+
Handle<JSReceiver> new_target;
// 1. If NewTarget is undefined, let newTarget be the active
// function object, else let newTarget be NewTarget.
@@ -1183,14 +929,31 @@ BUILTIN(CollatorConstructor) {
// 5. Let collator be ? OrdinaryCreateFromConstructor(newTarget,
// "%CollatorPrototype%", internalSlotsList).
Handle<JSObject> collator_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, collator_obj,
- JSObject::New(target, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, collator_obj,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
Handle<JSCollator> collator = Handle<JSCollator>::cast(collator_obj);
- collator->set_flags(0);
// 6. Return ? InitializeCollator(collator, locales, options).
- RETURN_RESULT_OR_FAILURE(isolate, JSCollator::InitializeCollator(
- isolate, collator, locales, options));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSCollator::Initialize(isolate, collator, locales, options));
+}
+
+BUILTIN(CollatorPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSCollator, collator_holder,
+ "Intl.Collator.prototype.resolvedOptions");
+ return *JSCollator::ResolvedOptions(isolate, collator_holder);
+}
+
+BUILTIN(CollatorSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::SupportedLocalesOf(isolate, ICUService::kCollator, locales,
+ options));
}
BUILTIN(CollatorPrototypeCompare) {
@@ -1211,21 +974,8 @@ BUILTIN(CollatorPrototypeCompare) {
return *bound_compare;
}
- Handle<NativeContext> native_context(isolate->context()->native_context(),
- isolate);
- Handle<Context> context = isolate->factory()->NewBuiltinContext(
- native_context, JSCollator::ContextSlot::kLength);
-
- // 4.b. Set F.[[Collator]] to collator.
- context->set(JSCollator::ContextSlot::kCollator, *collator);
-
- Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
- native_context->collator_internal_compare_shared_fun(), isolate);
- Handle<Map> map = isolate->strict_function_without_prototype_map();
-
- // 4.a. Let F be a new built-in function object as defined in 10.3.3.1.
- Handle<JSFunction> new_bound_compare_function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+ Handle<JSFunction> new_bound_compare_function = CreateBoundFunction(
+ isolate, collator, Builtins::kCollatorInternalCompare, 2);
// 4.c. Set collator.[[BoundCompare]] to F.
collator->set_bound_compare(*new_bound_compare_function);
@@ -1242,7 +992,8 @@ BUILTIN(CollatorInternalCompare) {
// 2. Assert: Type(collator) is Object and collator has an
// [[InitializedCollator]] internal slot.
Handle<JSCollator> collator_holder = Handle<JSCollator>(
- JSCollator::cast(context->get(JSCollator::ContextSlot::kCollator)),
+ JSCollator::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
// 3. If x is not provided, let x be undefined.
@@ -1263,71 +1014,280 @@ BUILTIN(CollatorInternalCompare) {
return *Intl::CompareStrings(isolate, collator_holder, string_x, string_y);
}
-BUILTIN(BreakIteratorPrototypeAdoptText) {
- const char* const method = "get Intl.v8BreakIterator.prototype.adoptText";
+BUILTIN(SegmenterConstructor) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSObject, break_iterator_holder, method);
- if (!Intl::IsObjectOfType(isolate, break_iterator_holder,
- Intl::Type::kBreakIterator)) {
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kSegmenter);
+
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->NewStringFromAsciiChecked(method),
- break_iterator_holder));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromStaticChars(
+ "Intl.Segmenter")));
}
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> bound_adopt_text =
- Handle<Object>(break_iterator_holder->GetEmbedderField(
- V8BreakIterator::kBoundAdoptTextIndex),
- isolate);
+ Handle<JSObject> result;
+ // 2. Let segmenter be OrdinaryCreateFromConstructor(NewTarget,
+ // "%SegmenterPrototype%").
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<JSSegmenter> segmenter = Handle<JSSegmenter>::cast(result);
+ segmenter->set_flags(0);
- if (!bound_adopt_text->IsUndefined(isolate)) {
- DCHECK(bound_adopt_text->IsJSFunction());
- return *bound_adopt_text;
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSSegmenter::Initialize(isolate, segmenter, locales, options));
+}
+
+BUILTIN(SegmenterSupportedLocalesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::SupportedLocalesOf(isolate, ICUService::kSegmenter,
+ locales, options));
+}
+
+BUILTIN(SegmenterPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSSegmenter, segmenter_holder,
+ "Intl.Segmenter.prototype.resolvedOptions");
+ return *JSSegmenter::ResolvedOptions(isolate, segmenter_holder);
+}
+
+BUILTIN(V8BreakIteratorConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSReceiver> new_target;
+
+ if (args.new_target()->IsUndefined(isolate)) {
+ new_target = args.target();
+ } else {
+ new_target = Handle<JSReceiver>::cast(args.new_target());
}
- Handle<NativeContext> native_context(isolate->context()->native_context(),
- isolate);
- Handle<Context> context = isolate->factory()->NewBuiltinContext(
- native_context, static_cast<int>(V8BreakIterator::ContextSlot::kLength));
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
- context->set(static_cast<int>(V8BreakIterator::ContextSlot::kV8BreakIterator),
- *break_iterator_holder);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
- Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
- native_context->break_iterator_internal_adopt_text_shared_fun(), isolate);
- Handle<Map> map = isolate->strict_function_without_prototype_map();
+ Handle<JSObject> break_iterator_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, break_iterator_obj,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<JSV8BreakIterator> break_iterator =
+ Handle<JSV8BreakIterator>::cast(break_iterator_obj);
- Handle<JSFunction> new_bound_adopt_text_function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ JSV8BreakIterator::Initialize(isolate, break_iterator, locales, options));
+}
- break_iterator_holder->SetEmbedderField(V8BreakIterator::kBoundAdoptTextIndex,
- *new_bound_adopt_text_function);
+BUILTIN(V8BreakIteratorPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator,
+ "Intl.v8BreakIterator.prototype.resolvedOptions");
+ return *JSV8BreakIterator::ResolvedOptions(isolate, break_iterator);
+}
+
+BUILTIN(V8BreakIteratorPrototypeAdoptText) {
+ const char* const method = "get Intl.v8BreakIterator.prototype.adoptText";
+ HandleScope scope(isolate);
+
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+
+ Handle<Object> bound_adopt_text(break_iterator->bound_adopt_text(), isolate);
+ if (!bound_adopt_text->IsUndefined(isolate)) {
+ DCHECK(bound_adopt_text->IsJSFunction());
+ return *bound_adopt_text;
+ }
+ Handle<JSFunction> new_bound_adopt_text_function = CreateBoundFunction(
+ isolate, break_iterator, Builtins::kV8BreakIteratorInternalAdoptText, 1);
+ break_iterator->set_bound_adopt_text(*new_bound_adopt_text_function);
return *new_bound_adopt_text_function;
}
-BUILTIN(BreakIteratorInternalAdoptText) {
+BUILTIN(V8BreakIteratorInternalAdoptText) {
HandleScope scope(isolate);
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
- Handle<JSObject> break_iterator_holder = Handle<JSObject>(
- JSObject::cast(context->get(
- static_cast<int>(V8BreakIterator::ContextSlot::kV8BreakIterator))),
+ Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ JSV8BreakIterator::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
- DCHECK(Intl::IsObjectOfType(isolate, break_iterator_holder,
- Intl::Type::kBreakIterator));
-
Handle<Object> input_text = args.atOrUndefined(isolate, 1);
Handle<String> text;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, text,
Object::ToString(isolate, input_text));
- V8BreakIterator::AdoptText(isolate, break_iterator_holder, text);
+ JSV8BreakIterator::AdoptText(isolate, break_iterator_holder, text);
return ReadOnlyRoots(isolate).undefined_value();
}
+BUILTIN(V8BreakIteratorPrototypeFirst) {
+ const char* const method = "get Intl.v8BreakIterator.prototype.first";
+ HandleScope scope(isolate);
+
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+
+ Handle<Object> bound_first(break_iterator_holder->bound_first(), isolate);
+ if (!bound_first->IsUndefined(isolate)) {
+ DCHECK(bound_first->IsJSFunction());
+ return *bound_first;
+ }
+
+ Handle<JSFunction> new_bound_first_function =
+ CreateBoundFunction(isolate, break_iterator_holder,
+ Builtins::kV8BreakIteratorInternalFirst, 0);
+ break_iterator_holder->set_bound_first(*new_bound_first_function);
+ return *new_bound_first_function;
+}
+
+BUILTIN(V8BreakIteratorInternalFirst) {
+ HandleScope scope(isolate);
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ JSV8BreakIterator::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
+ isolate);
+
+ icu::BreakIterator* break_iterator =
+ break_iterator_holder->break_iterator()->raw();
+ CHECK_NOT_NULL(break_iterator);
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->first());
+}
+
+BUILTIN(V8BreakIteratorPrototypeNext) {
+ const char* const method = "get Intl.v8BreakIterator.prototype.next";
+ HandleScope scope(isolate);
+
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+
+ Handle<Object> bound_next(break_iterator_holder->bound_next(), isolate);
+ if (!bound_next->IsUndefined(isolate)) {
+ DCHECK(bound_next->IsJSFunction());
+ return *bound_next;
+ }
+
+ Handle<JSFunction> new_bound_next_function =
+ CreateBoundFunction(isolate, break_iterator_holder,
+ Builtins::kV8BreakIteratorInternalNext, 0);
+ break_iterator_holder->set_bound_next(*new_bound_next_function);
+ return *new_bound_next_function;
+}
+
+BUILTIN(V8BreakIteratorInternalNext) {
+ HandleScope scope(isolate);
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ JSV8BreakIterator::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
+ isolate);
+
+ icu::BreakIterator* break_iterator =
+ break_iterator_holder->break_iterator()->raw();
+ CHECK_NOT_NULL(break_iterator);
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->next());
+}
+
+BUILTIN(V8BreakIteratorPrototypeCurrent) {
+ const char* const method = "get Intl.v8BreakIterator.prototype.current";
+ HandleScope scope(isolate);
+
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+
+ Handle<Object> bound_current(break_iterator_holder->bound_current(), isolate);
+ if (!bound_current->IsUndefined(isolate)) {
+ DCHECK(bound_current->IsJSFunction());
+ return *bound_current;
+ }
+
+ Handle<JSFunction> new_bound_current_function =
+ CreateBoundFunction(isolate, break_iterator_holder,
+ Builtins::kV8BreakIteratorInternalCurrent, 0);
+ break_iterator_holder->set_bound_current(*new_bound_current_function);
+ return *new_bound_current_function;
+}
+
+BUILTIN(V8BreakIteratorInternalCurrent) {
+ HandleScope scope(isolate);
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ JSV8BreakIterator::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
+ isolate);
+
+ icu::BreakIterator* break_iterator =
+ break_iterator_holder->break_iterator()->raw();
+ CHECK_NOT_NULL(break_iterator);
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->current());
+}
+
+BUILTIN(V8BreakIteratorPrototypeBreakType) {
+ const char* const method = "get Intl.v8BreakIterator.prototype.breakType";
+ HandleScope scope(isolate);
+
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+
+ Handle<Object> bound_break_type(break_iterator_holder->bound_break_type(),
+ isolate);
+ if (!bound_break_type->IsUndefined(isolate)) {
+ DCHECK(bound_break_type->IsJSFunction());
+ return *bound_break_type;
+ }
+
+ Handle<JSFunction> new_bound_break_type_function =
+ CreateBoundFunction(isolate, break_iterator_holder,
+ Builtins::kV8BreakIteratorInternalBreakType, 0);
+ break_iterator_holder->set_bound_break_type(*new_bound_break_type_function);
+ return *new_bound_break_type_function;
+}
+
+BUILTIN(V8BreakIteratorInternalBreakType) {
+ HandleScope scope(isolate);
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ JSV8BreakIterator::cast(context->get(
+ static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
+ isolate);
+
+ icu::BreakIterator* break_iterator =
+ break_iterator_holder->break_iterator()->raw();
+ CHECK_NOT_NULL(break_iterator);
+
+ int32_t status = break_iterator->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("none");
+ } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ return ReadOnlyRoots(isolate).number_string();
+ } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("letter");
+ } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("kana");
+ } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ return *isolate->factory()->NewStringFromStaticChars("ideo");
+ } else {
+ return *isolate->factory()->NewStringFromStaticChars("unknown");
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl.h b/deps/v8/src/builtins/builtins-intl.h
deleted file mode 100644
index 419ff14db1..0000000000
--- a/deps/v8/src/builtins/builtins-intl.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BUILTINS_BUILTINS_INTL_H_
-#define V8_BUILTINS_BUILTINS_INTL_H_
-
-#include <stdint.h>
-#include <vector>
-
-namespace v8 {
-namespace internal {
-
-struct NumberFormatSpan {
- int32_t field_id;
- int32_t begin_pos;
- int32_t end_pos;
-
- NumberFormatSpan() {}
- NumberFormatSpan(int32_t field_id, int32_t begin_pos, int32_t end_pos)
- : field_id(field_id), begin_pos(begin_pos), end_pos(end_pos) {}
-};
-
-std::vector<NumberFormatSpan> FlattenRegionsToParts(
- std::vector<NumberFormatSpan>* regions);
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_BUILTINS_BUILTINS_INTL_H_
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 1e16a6b1de..802ed2edb2 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -5,6 +5,10 @@
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/growable-fixed-array-gen.h"
+#include "src/builtins/builtins-string-gen.h"
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
namespace v8 {
@@ -38,8 +42,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
BIND(&if_not_callable);
{
- Node* ret = CallRuntime(Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kNotIterable), object);
+ Node* ret = CallRuntime(Runtime::kThrowIteratorError, context, object);
GotoIfException(ret, if_exception, exception);
Unreachable();
}
@@ -197,62 +200,104 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn) {
- Label fast_path(this), slow_path(this), done(this);
+ // 1. Let iteratorRecord be ? GetIterator(items, method).
+ IteratorRecord iterator_record = GetIterator(context, iterable, iterator_fn);
+
+ // 2. Let values be a new empty List.
+ GrowableFixedArray values(state());
+
+ Variable* vars[] = {values.var_array(), values.var_length(),
+ values.var_capacity()};
+ Label loop_start(this, 3, vars), done(this);
+ Goto(&loop_start);
+ // 3. Let next be true.
+ // 4. Repeat, while next is not false
+ BIND(&loop_start);
+ {
+ // a. Set next to ? IteratorStep(iteratorRecord).
+ TNode<Object> next = CAST(IteratorStep(context, iterator_record, &done));
+ // b. If next is not false, then
+ // i. Let nextValue be ? IteratorValue(next).
+ TNode<Object> next_value = CAST(IteratorValue(context, next));
+ // ii. Append nextValue to the end of the List values.
+ values.Push(next_value);
+ Goto(&loop_start);
+ }
- TVARIABLE(JSArray, created_list);
+ BIND(&done);
+ return values.ToJSArray(context);
+}
- Branch(IsFastJSArrayWithNoCustomIteration(iterable, context), &fast_path,
- &slow_path);
+TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
- // This is a fast-path for ignoring the iterator.
- BIND(&fast_path);
- {
- TNode<JSArray> input_array = CAST(iterable);
- created_list = CAST(CloneFastJSArray(context, input_array));
- Goto(&done);
- }
+ Return(IterableToList(context, iterable, iterator_fn));
+}
- BIND(&slow_path);
- {
- // 1. Let iteratorRecord be ? GetIterator(items, method).
- IteratorRecord iterator_record =
- GetIterator(context, iterable, iterator_fn);
+// This builtin always returns a new JSArray and is thus safe to use even in the
+// presence of code that may call back into user-JS. This builtin will take the
+// fast path if the iterable is a fast array and the Array prototype and the
+// Symbol.iterator is untouched. The fast path skips the iterator and copies the
+// backing store to the new array. Note that if the array has holes, the holes
+// will be copied to the new array, which is inconsistent with the behavior of
+// an actual iteration, where holes should be replaced with undefined (if the
+// prototype has no elements). To maintain the correct behavior for holey
+// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup.
+TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
- // 2. Let values be a new empty List.
- GrowableFixedArray values(state());
+ Label slow_path(this);
- Variable* vars[] = {values.var_array(), values.var_length(),
- values.var_capacity()};
- Label loop_start(this, 3, vars), loop_end(this);
- Goto(&loop_start);
- // 3. Let next be true.
- // 4. Repeat, while next is not false
- BIND(&loop_start);
- {
- // a. Set next to ? IteratorStep(iteratorRecord).
- TNode<Object> next =
- CAST(IteratorStep(context, iterator_record, &loop_end));
- // b. If next is not false, then
- // i. Let nextValue be ? IteratorValue(next).
- TNode<Object> next_value = CAST(IteratorValue(context, next));
- // ii. Append nextValue to the end of the List values.
- values.Push(next_value);
- Goto(&loop_start);
- }
- BIND(&loop_end);
+ GotoIfNot(IsFastJSArrayWithNoCustomIteration(iterable, context), &slow_path);
- created_list = values.ToJSArray(context);
- Goto(&done);
- }
+ // The fast path will copy holes to the new array.
+ TailCallBuiltin(Builtins::kCloneFastJSArray, context, iterable);
- BIND(&done);
- return created_list.value();
+ BIND(&slow_path);
+ TailCallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn);
}
-TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
- TNode<Context> context, TNode<Object> iterable) {
- TNode<Object> method = GetIteratorMethod(context, iterable);
- return IterableToList(context, iterable, method);
+// This builtin loads the property Symbol.iterator as the iterator, and has a
+// fast path for fast arrays and another one for strings. These fast paths will
+// only be taken if Symbol.iterator and the Iterator prototype are not modified
+// in a way that changes the original iteration behavior.
+// * In case of fast holey arrays, holes will be converted to undefined to
+// reflect iteration semantics. Note that replacement by undefined is only
+// correct when the NoElements protector is valid.
+TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+
+ Label slow_path(this), check_string(this);
+
+ GotoIfForceSlowPath(&slow_path);
+
+ GotoIfNot(IsFastJSArrayWithNoCustomIteration(iterable, context),
+ &check_string);
+
+ // Fast path for fast JSArray.
+ TailCallBuiltin(Builtins::kCloneFastJSArrayFillingHoles, context, iterable);
+
+ BIND(&check_string);
+ {
+ StringBuiltinsAssembler string_assembler(state());
+ GotoIfNot(string_assembler.IsStringPrimitiveWithNoCustomIteration(iterable,
+ context),
+ &slow_path);
+
+ // Fast path for strings.
+ TailCallBuiltin(Builtins::kStringToList, context, iterable);
+ }
+
+ BIND(&slow_path);
+ {
+ TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
+ TailCallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 71d4b9753c..f61f7f52c0 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -54,10 +54,11 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
Variable* exception);
- // /#sec-iterabletolist
+ // #sec-iterabletolist
+ // Build a JSArray by iterating over {iterable} using {iterator_fn},
+ // following the ECMAscript operation with the same name.
TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable,
TNode<Object> iterator_fn);
- TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index d1314733c7..c11722ec6b 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -86,7 +86,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
{
// Optimized code slot is a weak reference.
TNode<Code> optimized_code =
- CAST(ToWeakHeapObject(maybe_optimized_code_entry, &fallthrough));
+ CAST(GetHeapObjectAssumeWeak(maybe_optimized_code_entry, &fallthrough));
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index 952bdda5de..36b0d30939 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -413,7 +413,16 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
GotoIf(SmiAbove(smi_index.value(), SmiConstant(0)), &if_cached);
// Cache exhausted, populate the cache. Return value is the new index.
- smi_index = CAST(CallRuntime(Runtime::kGenerateRandomNumbers, context));
+ Node* const refill_math_random =
+ ExternalConstant(ExternalReference::refill_math_random());
+ Node* const isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ MachineType type_tagged = MachineType::AnyTagged();
+ MachineType type_ptr = MachineType::Pointer();
+
+ smi_index =
+ CAST(CallCFunction2(type_tagged, type_ptr, type_tagged,
+ refill_math_random, isolate_ptr, native_context));
Goto(&if_cached);
// Compute next index by decrement.
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index cfc81612f2..582f6242ad 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -525,17 +525,15 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&string_add_convert_left);
{
// Convert {left} to a String and concatenate it with the String {right}.
- Callable callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- Return(CallStub(callable, context, var_left.value(), var_right.value()));
+ TailCallBuiltin(Builtins::kStringAdd_ConvertLeft, context, var_left.value(),
+ var_right.value());
}
BIND(&string_add_convert_right);
{
// Convert {right} to a String and concatenate it with the String {left}.
- Callable callable = CodeFactory::StringAdd(
- isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- Return(CallStub(callable, context, var_left.value(), var_right.value()));
+ TailCallBuiltin(Builtins::kStringAdd_ConvertRight, context,
+ var_left.value(), var_right.value());
}
BIND(&do_bigint_add);
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 2f21c2d4b1..d15c41105d 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -111,6 +111,9 @@ BUILTIN(NumberPrototypeToFixed) {
// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
BUILTIN(NumberPrototypeToLocaleString) {
HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberToLocaleString);
+
Handle<Object> value = args.at(0);
// Unwrap the receiver {value}.
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index a8d83e641f..fbac2e1abc 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -46,11 +46,6 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
- // Checks that |map| has only simple properties, returns bitfield3.
- TNode<Uint32T> EnsureOnlyHasSimpleProperties(TNode<Map> map,
- TNode<Int32T> instance_type,
- Label* bailout);
-
void ObjectAssignFast(TNode<Context> context, TNode<JSReceiver> to,
TNode<Object> from, Label* slow);
};
@@ -96,8 +91,7 @@ void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
Node* lhs = StringConstant("[object ");
Node* rhs = StringConstant("]");
- Callable callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE);
Return(CallStub(callable, context, CallStub(callable, context, lhs, string),
rhs));
@@ -304,7 +298,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
// So the array filled by the-hole even if enum_cache exists.
FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries,
IntPtrConstant(0), object_enum_length,
- Heap::kTheHoleValueRootIndex);
+ RootIndex::kTheHoleValue);
TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0));
TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0));
@@ -524,18 +518,6 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
args.PopAndReturn(to);
}
-TNode<Uint32T> ObjectBuiltinsAssembler::EnsureOnlyHasSimpleProperties(
- TNode<Map> map, TNode<Int32T> instance_type, Label* bailout) {
- GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout);
-
- TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
- GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask |
- Map::HasHiddenPrototypeBit::kMask),
- bailout);
-
- return bit_field3;
-}
-
// This function mimics what FastAssign() function does for C++ implementation.
void ObjectBuiltinsAssembler::ObjectAssignFast(TNode<Context> context,
TNode<JSReceiver> to,
@@ -553,8 +535,9 @@ void ObjectBuiltinsAssembler::ObjectAssignFast(TNode<Context> context,
GotoIf(IsJSReceiverInstanceType(from_instance_type), &cont);
GotoIfNot(IsStringInstanceType(from_instance_type), &done);
{
- Branch(SmiEqual(LoadStringLengthAsSmi(CAST(from)), SmiConstant(0)), &done,
- slow);
+ Branch(
+ Word32Equal(LoadStringLengthAsWord32(CAST(from)), Int32Constant(0)),
+ &done, slow);
}
BIND(&cont);
}
@@ -567,132 +550,18 @@ void ObjectBuiltinsAssembler::ObjectAssignFast(TNode<Context> context,
TNode<BoolT> to_is_simple_receiver = IsSimpleObjectMap(to_map);
GotoIfNot(IsJSObjectInstanceType(from_instance_type), slow);
- TNode<Uint32T> from_bit_field3 =
- EnsureOnlyHasSimpleProperties(from_map, from_instance_type, slow);
-
GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(from))), slow);
- TNode<DescriptorArray> from_descriptors = LoadMapDescriptors(from_map);
- TNode<Uint32T> nof_descriptors =
- DecodeWord32<Map::NumberOfOwnDescriptorsBits>(from_bit_field3);
-
- TVARIABLE(BoolT, var_stable, Int32TrueConstant());
- VariableList list({&var_stable}, zone());
-
- DescriptorArrayForEach(
- list, Unsigned(Int32Constant(0)), nof_descriptors,
- [=, &var_stable](TNode<UintPtrT> descriptor_key_index) {
- TNode<Name> next_key = CAST(
- LoadWeakFixedArrayElement(from_descriptors, descriptor_key_index));
-
- TVARIABLE(Object, var_value, SmiConstant(0));
- Label do_store(this), next_iteration(this);
-
- {
- TVARIABLE(Map, var_from_map);
- TVARIABLE(HeapObject, var_meta_storage);
- TVARIABLE(IntPtrT, var_entry);
- TVARIABLE(Uint32T, var_details);
- Label if_found(this);
-
- Label if_found_fast(this), if_found_dict(this);
-
- Label if_stable(this), if_not_stable(this);
- Branch(var_stable.value(), &if_stable, &if_not_stable);
- BIND(&if_stable);
- {
- // Directly decode from the descriptor array if |from| did not
- // change shape.
- var_from_map = from_map;
- var_meta_storage = from_descriptors;
- var_entry = Signed(descriptor_key_index);
- Goto(&if_found_fast);
- }
- BIND(&if_not_stable);
- {
- // If the map did change, do a slower lookup. We are still
- // guaranteed that the object has a simple shape, and that the key
- // is a name.
- var_from_map = LoadMap(CAST(from));
- TryLookupPropertyInSimpleObject(
- CAST(from), var_from_map.value(), next_key, &if_found_fast,
- &if_found_dict, &var_meta_storage, &var_entry, &next_iteration);
- }
-
- BIND(&if_found_fast);
- {
- TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
- TNode<IntPtrT> name_index = var_entry.value();
-
- // Skip non-enumerable properties.
- var_details = LoadDetailsByKeyIndex(descriptors, name_index);
- GotoIf(IsSetWord32(var_details.value(),
- PropertyDetails::kAttributesDontEnumMask),
- &next_iteration);
-
- LoadPropertyFromFastObject(from, var_from_map.value(), descriptors,
- name_index, var_details.value(),
- &var_value);
- Goto(&if_found);
- }
- BIND(&if_found_dict);
- {
- Node* dictionary = var_meta_storage.value();
- Node* entry = var_entry.value();
-
- TNode<Uint32T> details =
- LoadDetailsByKeyIndex<NameDictionary>(dictionary, entry);
- // Skip non-enumerable properties.
- GotoIf(
- IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
- &next_iteration);
-
- var_details = details;
- var_value = LoadValueByKeyIndex<NameDictionary>(dictionary, entry);
- Goto(&if_found);
- }
-
- // Here we have details and value which could be an accessor.
- BIND(&if_found);
- {
- Label slow_load(this, Label::kDeferred);
-
- var_value =
- CallGetterIfAccessor(var_value.value(), var_details.value(),
- context, from, &slow_load, kCallJSGetter);
- Goto(&do_store);
-
- BIND(&slow_load);
- {
- var_value =
- CallRuntime(Runtime::kGetProperty, context, from, next_key);
- Goto(&do_store);
- }
- }
- }
-
- // Store property to target object.
- BIND(&do_store);
- {
- KeyedStoreGenericGenerator::SetProperty(
- state(), context, to, to_is_simple_receiver, next_key,
- var_value.value(), LanguageMode::kStrict);
-
- // Check if the |from| object is still stable, i.e. we can proceed
- // using property details from preloaded |from_descriptors|.
- var_stable = Select<BoolT>(
- var_stable.value(),
- [=] { return WordEqual(LoadMap(CAST(from)), from_map); },
- [=] { return Int32FalseConstant(); });
-
- Goto(&next_iteration);
- }
-
- BIND(&next_iteration);
- });
+ ForEachEnumerableOwnProperty(context, from_map, CAST(from),
+ [=](TNode<Name> key, TNode<Object> value) {
+ KeyedStoreGenericGenerator::SetProperty(
+ state(), context, to,
+ to_is_simple_receiver, key, value,
+ LanguageMode::kStrict);
+ },
+ slow);
Goto(&done);
-
BIND(&done);
}
@@ -1008,13 +877,13 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&if_arguments);
{
- var_default.Bind(LoadRoot(Heap::karguments_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::karguments_to_string));
Goto(&checkstringtag);
}
BIND(&if_array);
{
- var_default.Bind(LoadRoot(Heap::karray_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::karray_to_string));
Goto(&checkstringtag);
}
@@ -1027,26 +896,26 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* boolean_prototype =
LoadObjectField(boolean_initial_map, Map::kPrototypeOffset);
- var_default.Bind(LoadRoot(Heap::kboolean_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kboolean_to_string));
var_holder.Bind(boolean_prototype);
Goto(&checkstringtag);
}
BIND(&if_date);
{
- var_default.Bind(LoadRoot(Heap::kdate_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kdate_to_string));
Goto(&checkstringtag);
}
BIND(&if_error);
{
- var_default.Bind(LoadRoot(Heap::kerror_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kerror_to_string));
Goto(&checkstringtag);
}
BIND(&if_function);
{
- var_default.Bind(LoadRoot(Heap::kfunction_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kfunction_to_string));
Goto(&checkstringtag);
}
@@ -1059,7 +928,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
number_constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* number_prototype =
LoadObjectField(number_initial_map, Map::kPrototypeOffset);
- var_default.Bind(LoadRoot(Heap::knumber_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::knumber_to_string));
var_holder.Bind(number_prototype);
Goto(&checkstringtag);
}
@@ -1067,7 +936,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&if_object);
{
CSA_ASSERT(this, IsJSReceiver(receiver));
- var_default.Bind(LoadRoot(Heap::kobject_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
Goto(&checkstringtag);
}
@@ -1082,10 +951,10 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
GotoIf(IsSymbolMap(receiver_map), &if_symbol);
GotoIf(IsUndefined(receiver), &return_undefined);
CSA_ASSERT(this, IsNull(receiver));
- Return(LoadRoot(Heap::knull_to_stringRootIndex));
+ Return(LoadRoot(RootIndex::knull_to_string));
BIND(&return_undefined);
- Return(LoadRoot(Heap::kundefined_to_stringRootIndex));
+ Return(LoadRoot(RootIndex::kundefined_to_string));
}
BIND(&if_proxy);
@@ -1099,12 +968,12 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
CallRuntime(Runtime::kArrayIsArray, context, receiver);
TNode<String> builtin_tag = Select<String>(
IsTrue(receiver_is_array),
- [=] { return CAST(LoadRoot(Heap::kArray_stringRootIndex)); },
+ [=] { return CAST(LoadRoot(RootIndex::kArray_string)); },
[=] {
return Select<String>(
IsCallableMap(receiver_map),
- [=] { return CAST(LoadRoot(Heap::kFunction_stringRootIndex)); },
- [=] { return CAST(LoadRoot(Heap::kObject_stringRootIndex)); });
+ [=] { return CAST(LoadRoot(RootIndex::kFunction_string)); },
+ [=] { return CAST(LoadRoot(RootIndex::kObject_string)); });
});
// Lookup the @@toStringTag property on the {receiver}.
@@ -1125,7 +994,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&if_regexp);
{
- var_default.Bind(LoadRoot(Heap::kregexp_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kregexp_to_string));
Goto(&checkstringtag);
}
@@ -1138,7 +1007,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
string_constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* string_prototype =
LoadObjectField(string_initial_map, Map::kPrototypeOffset);
- var_default.Bind(LoadRoot(Heap::kstring_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kstring_to_string));
var_holder.Bind(string_prototype);
Goto(&checkstringtag);
}
@@ -1152,7 +1021,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* symbol_prototype =
LoadObjectField(symbol_initial_map, Map::kPrototypeOffset);
- var_default.Bind(LoadRoot(Heap::kobject_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
var_holder.Bind(symbol_prototype);
Goto(&checkstringtag);
}
@@ -1166,7 +1035,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* bigint_prototype =
LoadObjectField(bigint_initial_map, Map::kPrototypeOffset);
- var_default.Bind(LoadRoot(Heap::kobject_to_stringRootIndex));
+ var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
var_holder.Bind(bigint_prototype);
Goto(&checkstringtag);
}
@@ -1209,7 +1078,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&return_generic);
{
Node* tag = GetProperty(context, ToObject(context, receiver),
- LoadRoot(Heap::kto_string_tag_symbolRootIndex));
+ LoadRoot(RootIndex::kto_string_tag_symbol));
GotoIf(TaggedIsSmi(tag), &return_default);
GotoIfNot(IsString(tag), &return_default);
ReturnToStringFormat(context, tag);
@@ -1271,7 +1140,7 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) {
TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField(
prototype_info, PrototypeInfo::kObjectCreateMapOffset);
GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), &call_runtime);
- map.Bind(ToWeakHeapObject(maybe_map, &call_runtime));
+ map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime));
Goto(&instantiate_map);
}
@@ -1323,7 +1192,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime);
// Stay on the fast path only if there are no elements.
GotoIfNot(WordEqual(LoadElements(properties),
- LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
+ LoadRoot(RootIndex::kEmptyFixedArray)),
&call_runtime);
// Handle dictionary objects or fast objects with properties in runtime.
Node* bit_field3 = LoadMapBitField3(properties_map);
@@ -1367,7 +1236,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
prototype_info, PrototypeInfo::kObjectCreateMapOffset);
GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()),
&call_runtime);
- map.Bind(ToWeakHeapObject(maybe_map, &call_runtime));
+ map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime));
Goto(&instantiate_map);
}
@@ -1476,8 +1345,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
formal_parameter_count);
Node* parameters_and_registers = AllocateFixedArray(HOLEY_ELEMENTS, size);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
- IntPtrConstant(0), size,
- Heap::kUndefinedValueRootIndex);
+ IntPtrConstant(0), size, RootIndex::kUndefinedValue);
// TODO(cbruni): support start_offset to avoid double initialization.
Node* result = AllocateJSObjectFromMap(maybe_map, nullptr, nullptr, kNone,
kWithSlackTracking);
@@ -1522,7 +1390,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
object = ToObject_Inline(CAST(context), CAST(object));
// 2. Let key be ? ToPropertyKey(P).
- key = ToName(context, key);
+ key = CallBuiltin(Builtins::kToName, context, key);
// 3. Let desc be ? obj.[[GetOwnProperty]](key).
Label if_keyisindex(this), if_iskeyunique(this),
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 241a2041bd..1d43217999 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -28,9 +28,9 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
Node* const promise = Allocate(JSPromise::kSizeWithEmbedderFields);
StoreMapNoWriteBarrier(promise, promise_map);
StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(promise, JSPromise::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
return promise;
}
@@ -137,7 +137,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
CreatePromiseResolvingFunctions(promise, debug_event, native_context);
Node* capability = Allocate(PromiseCapability::kSize);
- StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreMapNoWriteBarrier(capability, RootIndex::kPromiseCapabilityMap);
StoreObjectFieldNoWriteBarrier(capability,
PromiseCapability::kPromiseOffset, promise);
StoreObjectFieldNoWriteBarrier(capability,
@@ -150,13 +150,13 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
BIND(&if_slow_promise_capability);
{
Node* capability = Allocate(PromiseCapability::kSize);
- StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreMapNoWriteBarrier(capability, RootIndex::kPromiseCapabilityMap);
StoreObjectFieldRoot(capability, PromiseCapability::kPromiseOffset,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
StoreObjectFieldRoot(capability, PromiseCapability::kResolveOffset,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
Node* executor_context =
CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
@@ -352,7 +352,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
BIND(&if_fulfilled);
{
- var_map.Bind(LoadRoot(Heap::kPromiseFulfillReactionJobTaskMapRootIndex));
+ var_map.Bind(LoadRoot(RootIndex::kPromiseFulfillReactionJobTaskMap));
var_handler.Bind(on_fulfilled);
Goto(&enqueue);
}
@@ -360,7 +360,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
BIND(&if_rejected);
{
CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
- var_map.Bind(LoadRoot(Heap::kPromiseRejectReactionJobTaskMapRootIndex));
+ var_map.Bind(LoadRoot(RootIndex::kPromiseRejectReactionJobTaskMap));
var_handler.Bind(on_rejected);
GotoIf(PromiseHasHandler(promise), &enqueue);
CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
@@ -401,7 +401,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(
Node* next, Node* promise_or_capability, Node* fulfill_handler,
Node* reject_handler) {
Node* const reaction = Allocate(PromiseReaction::kSize);
- StoreMapNoWriteBarrier(reaction, Heap::kPromiseReactionMapRootIndex);
+ StoreMapNoWriteBarrier(reaction, RootIndex::kPromiseReactionMap);
StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next);
StoreObjectFieldNoWriteBarrier(reaction,
PromiseReaction::kPromiseOrCapabilityOffset,
@@ -431,10 +431,10 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
}
Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
- Heap::RootListIndex map_root_index, Node* context, Node* argument,
- Node* handler, Node* promise_or_capability) {
- DCHECK(map_root_index == Heap::kPromiseFulfillReactionJobTaskMapRootIndex ||
- map_root_index == Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ RootIndex map_root_index, Node* context, Node* argument, Node* handler,
+ Node* promise_or_capability) {
+ DCHECK(map_root_index == RootIndex::kPromiseFulfillReactionJobTaskMap ||
+ map_root_index == RootIndex::kPromiseRejectReactionJobTaskMap);
Node* const map = LoadRoot(map_root_index);
return AllocatePromiseReactionJobTask(map, context, argument, handler,
promise_or_capability);
@@ -444,7 +444,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
Node* promise_to_resolve, Node* then, Node* thenable, Node* context) {
Node* const microtask = Allocate(PromiseResolveThenableJobTask::kSize);
StoreMapNoWriteBarrier(microtask,
- Heap::kPromiseResolveThenableJobTaskMapRootIndex);
+ RootIndex::kPromiseResolveThenableJobTaskMap);
StoreObjectFieldNoWriteBarrier(
microtask, PromiseResolveThenableJobTask::kContextOffset, context);
StoreObjectFieldNoWriteBarrier(
@@ -502,8 +502,8 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
// of stores here to avoid screwing up the store buffer.
STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
if (type == PromiseReaction::kFulfill) {
- StoreMapNoWriteBarrier(
- current, Heap::kPromiseFulfillReactionJobTaskMapRootIndex);
+ StoreMapNoWriteBarrier(current,
+ RootIndex::kPromiseFulfillReactionJobTaskMap);
StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
argument);
StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
@@ -516,7 +516,7 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
Node* handler =
LoadObjectField(current, PromiseReaction::kRejectHandlerOffset);
StoreMapNoWriteBarrier(current,
- Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ RootIndex::kPromiseRejectReactionJobTaskMap);
StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
argument);
StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 4954b383fe..39b2a24683 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -89,9 +89,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* AllocatePromiseReaction(Node* next, Node* promise_or_capability,
Node* fulfill_handler, Node* reject_handler);
- Node* AllocatePromiseReactionJobTask(Heap::RootListIndex map_root_index,
- Node* context, Node* argument,
- Node* handler,
+ Node* AllocatePromiseReactionJobTask(RootIndex map_root_index, Node* context,
+ Node* argument, Node* handler,
Node* promise_or_capability);
Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument,
Node* handler,
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 34caf58688..f0d891910a 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -58,7 +58,7 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
Node* proxy = Allocate(JSProxy::kSize);
StoreMapNoWriteBarrier(proxy, map.value());
StoreObjectFieldRoot(proxy, JSProxy::kPropertiesOrHashOffset,
- Heap::kEmptyPropertyDictionaryRootIndex);
+ RootIndex::kEmptyPropertyDictionary);
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target);
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler);
@@ -124,7 +124,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
Node* proxy, Node* native_context) {
Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength));
- StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ StoreMapNoWriteBarrier(context, RootIndex::kFunctionContextMap);
InitializeFunctionContext(native_context, context, kProxyContextLength);
StoreContextElementNoWriteBarrier(context, kProxySlot, proxy);
return context;
@@ -230,9 +230,9 @@ TF_BUILTIN(ProxyRevocable, ProxiesCodeStubAssembler) {
native_context, Context::PROXY_REVOCABLE_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(result, result_map);
StoreObjectFieldRoot(result, JSProxyRevocableResult::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(result, JSProxyRevocableResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kProxyOffset,
proxy);
StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kRevokeOffset,
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index 3dd07a796a..cd3f2b4bed 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -218,7 +218,7 @@ BUILTIN(ReflectSet) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, receiver, name, Handle<JSReceiver>::cast(target));
Maybe<bool> result = Object::SetSuperProperty(
- &it, value, LanguageMode::kSloppy, Object::MAY_BE_STORE_FROM_KEYED);
+ &it, value, LanguageMode::kSloppy, StoreOrigin::kMaybeKeyed);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 206602aaa7..30717f41de 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -81,13 +81,13 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
// Initialize the elements.
DCHECK(!IsDoubleElementsKind(elements_kind));
- const Heap::RootListIndex map_index = Heap::kFixedArrayMapRootIndex;
+ const RootIndex map_index = RootIndex::kFixedArrayMap;
DCHECK(Heap::RootIsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(elements, map_index);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
FillFixedArrayWithValue(elements_kind, elements, IntPtrZero(), length_intptr,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
return CAST(result);
}
@@ -862,7 +862,7 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
TVARIABLE(BoolT, var_result);
#ifdef V8_ENABLE_FORCE_SLOW_PATH
- var_result = BoolConstant(0);
+ var_result = BoolConstant(false);
GotoIfForceSlowPath(&out);
#endif
@@ -1103,7 +1103,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Isolate* isolate = this->isolate();
TNode<IntPtrT> const int_one = IntPtrConstant(1);
- TVARIABLE(Smi, var_length, SmiZero());
+ TVARIABLE(Uint32T, var_length, Uint32Constant(0));
TVARIABLE(IntPtrT, var_flags);
// First, count the number of characters we will need and check which flags
@@ -1115,13 +1115,13 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
var_flags = SmiUntag(flags_smi);
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
- var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
- Goto(&next); \
- BIND(&next); \
+#define CASE_FOR_FLAG(FLAG) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
+ Goto(&next); \
+ BIND(&next); \
} while (false)
CASE_FOR_FLAG(JSRegExp::kGlobal);
@@ -1145,7 +1145,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Label if_isflagset(this); \
BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
BIND(&if_isflagset); \
- var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
var_flags = Signed(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
Goto(&next); \
BIND(&next); \
@@ -2109,9 +2109,9 @@ TNode<Object> RegExpBuiltinsAssembler::MatchAllIterator(
StoreMapNoWriteBarrier(iterator, map);
StoreObjectFieldRoot(iterator,
JSRegExpStringIterator::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(iterator, JSRegExpStringIterator::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
// 5. Set iterator.[[IteratingRegExp]] to R.
StoreObjectFieldNoWriteBarrier(
@@ -2903,14 +2903,13 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
TNode<String> first_part =
CAST(CallBuiltin(Builtins::kSubString, context, string,
var_last_match_end.value(), match_start));
- var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured,
- context, var_result.value(), first_part));
+ var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_result.value(), first_part));
GotoIf(SmiEqual(replace_length, SmiZero()), &loop_end);
- var_result =
- CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured, context,
- var_result.value(), replace_string));
+ var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_result.value(), replace_string));
Goto(&loop_end);
BIND(&loop_end);
@@ -2936,8 +2935,8 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
TNode<String> last_part =
CAST(CallBuiltin(Builtins::kSubString, context, string,
var_last_match_end.value(), string_length));
- var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured,
- context, var_result.value(), last_part));
+ var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_result.value(), last_part));
Goto(&out);
}
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 52673bfd36..4befb13d7c 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -11,6 +11,8 @@ namespace v8 {
namespace internal {
using compiler::Node;
+template <typename T>
+using TNode = compiler::TNode<T>;
class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -21,7 +23,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
protected:
typedef Node* (CodeAssembler::*AssemblerFunction)(MachineType type,
Node* base, Node* offset,
- Node* value);
+ Node* value,
+ Node* value_high);
void ValidateSharedTypedArray(Node* tagged, Node* context,
Node** out_instance_type,
Node** out_backing_store);
@@ -35,6 +38,11 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
void AtomicBinopBuiltinCommon(Node* array, Node* index, Node* value,
Node* context, AssemblerFunction function,
Runtime::FunctionId runtime_function);
+
+ // Create a BigInt from the result of a 64-bit atomic operation, using
+ // projections on 32-bit platforms.
+ TNode<BigInt> BigIntFromSigned64(Node* signed64);
+ TNode<BigInt> BigIntFromUnsigned64(Node* unsigned64);
};
void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
@@ -50,10 +58,9 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
&invalid);
// Fail if the array's JSArrayBuffer is not shared.
- Node* array_buffer = LoadObjectField(tagged, JSTypedArray::kBufferOffset);
- Node* bitfield = LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldOffset,
- MachineType::Uint32());
- GotoIfNot(IsSetWord32<JSArrayBuffer::IsShared>(bitfield), &invalid);
+ TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(CAST(tagged));
+ TNode<Uint32T> bitfield = LoadJSArrayBufferBitField(array_buffer);
+ GotoIfNot(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &invalid);
// Fail if the array's element type is float32, float64 or clamped.
Node* elements_instance_type = LoadInstanceType(LoadElements(tagged));
@@ -63,8 +70,13 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
- Branch(Int32LessThan(elements_instance_type,
+ GotoIf(Int32LessThan(elements_instance_type,
Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
+ &not_float_or_clamped);
+ STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
+ Branch(Int32GreaterThan(elements_instance_type,
+ Int32Constant(FIXED_UINT8_CLAMPED_ARRAY_TYPE)),
&not_float_or_clamped, &invalid);
BIND(&invalid);
@@ -76,15 +88,12 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
BIND(&not_float_or_clamped);
*out_instance_type = elements_instance_type;
- Node* backing_store =
- LoadObjectField(array_buffer, JSArrayBuffer::kBackingStoreOffset);
- Node* byte_offset = ChangeUint32ToWord(TruncateTaggedToWord32(
- context, LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset)));
- *out_backing_store =
- IntPtrAdd(BitcastTaggedToWord(backing_store), byte_offset);
+ TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(array_buffer);
+ TNode<UintPtrT> byte_offset = LoadJSArrayBufferViewByteOffset(CAST(tagged));
+ *out_backing_store = IntPtrAdd(backing_store, byte_offset);
}
-// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
+// https://tc39.github.io/ecma262/#sec-validateatomicaccess
Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
Node* tagged, Node* context, Node** number_index) {
VARIABLE(var_result, MachineRepresentation::kWord32);
@@ -112,7 +121,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
// Check if the index is in bounds. If not, throw RangeError.
Label check_passed(this);
Node* array_length_word32 =
- TruncateTaggedToWord32(context, LoadTypedArrayLength(CAST(array)));
+ TruncateTaggedToWord32(context, LoadJSTypedArrayLength(CAST(array)));
GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
@@ -130,10 +139,32 @@ void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
CSA_ASSERT(this,
Uint32LessThan(index_word,
TruncateTaggedToWord32(
- context, LoadTypedArrayLength(CAST(array)))));
+ context, LoadJSTypedArrayLength(CAST(array)))));
}
#endif
+TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
+ Node* signed64) {
+ if (Is64()) {
+ return BigIntFromInt64(UncheckedCast<IntPtrT>(signed64));
+ } else {
+ TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Projection(0, signed64));
+ TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Projection(1, signed64));
+ return BigIntFromInt32Pair(low, high);
+ }
+}
+
+TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
+ Node* unsigned64) {
+ if (Is64()) {
+ return BigIntFromUint64(UncheckedCast<UintPtrT>(unsigned64));
+ } else {
+ TNode<UintPtrT> low = UncheckedCast<UintPtrT>(Projection(0, unsigned64));
+ TNode<UintPtrT> high = UncheckedCast<UintPtrT>(Projection(1, unsigned64));
+ return BigIntFromUint32Pair(low, high);
+ }
+}
+
TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
Node* array = Parameter(Descriptor::kArray);
Node* index = Parameter(Descriptor::kIndex);
@@ -150,14 +181,14 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
Node* index_word = ChangeUint32ToWord(index_word32);
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
- other(this);
+ i64(this), u64(this), other(this);
int32_t case_values[] = {
- FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
- };
- Label* case_labels[] = {
- &i8, &u8, &i16, &u16, &i32, &u32,
+ FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE,
+ FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE,
+ FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+ FIXED_BIGINT64_ARRAY_TYPE, FIXED_BIGUINT64_ARRAY_TYPE,
};
+ Label* case_labels[] = {&i8, &u8, &i16, &u16, &i32, &u32, &i64, &u64};
Switch(instance_type, &other, case_values, case_labels,
arraysize(case_labels));
@@ -184,7 +215,24 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store,
WordShl(index_word, 2))));
+#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
+ BIND(&i64);
+ Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
+ BIND(&u64);
+ Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
+#else
+ BIND(&i64);
+ // This uses Uint64() intentionally: AtomicLoad is not implemented for
+ // Int64(), which is fine because the machine instruction only cares
+ // about words.
+ Return(BigIntFromSigned64(AtomicLoad(MachineType::Uint64(), backing_store,
+ WordShl(index_word, 3))));
+
+ BIND(&u64);
+ Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store,
+ WordShl(index_word, 3))));
+#endif
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
@@ -206,6 +254,13 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
ValidateAtomicIndex(array, index_word32, context);
Node* index_word = ChangeUint32ToWord(index_word32);
+ Label u8(this), u16(this), u32(this), u64(this), other(this);
+ STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ GotoIf(
+ Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
+ &u64);
+
Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
@@ -213,14 +268,11 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
DebugSanityCheckAtomicIndex(array, index_word32, context);
#endif
- Label u8(this), u16(this), u32(this), other(this);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
};
- Label* case_labels[] = {
- &u8, &u8, &u16, &u16, &u32, &u32,
- };
+ Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32};
Switch(instance_type, &other, case_values, case_labels,
arraysize(case_labels));
@@ -239,6 +291,24 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
WordShl(index_word, 2), value_word32);
Return(value_integer);
+ BIND(&u64);
+#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
+ Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_integer,
+ value));
+#else
+ TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
+ TVARIABLE(UintPtrT, var_low);
+ TVARIABLE(UintPtrT, var_high);
+ BigIntToRawBytes(value_bigint, &var_low, &var_high);
+ Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
+ AtomicStore(MachineRepresentation::kWord64, backing_store,
+ WordShl(index_word, 3), var_low.value(), high);
+ Return(value_bigint);
+#endif
+
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
@@ -259,22 +329,26 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
-
-#if DEBUG
- DebugSanityCheckAtomicIndex(array, index_word32, context);
-#endif
-
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
- value_integer));
+ value));
#else
Node* index_word = ChangeUint32ToWord(index_word32);
+ Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
+ i64(this), u64(this), big(this), other(this);
+ STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ GotoIf(
+ Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
+ &big);
+
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
- Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
- other(this);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -311,6 +385,34 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
AtomicExchange(MachineType::Uint32(), backing_store,
WordShl(index_word, 2), value_word32)));
+ BIND(&big);
+ TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
+ TVARIABLE(UintPtrT, var_low);
+ TVARIABLE(UintPtrT, var_high);
+ BigIntToRawBytes(value_bigint, &var_low, &var_high);
+ Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
+ GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
+ &i64);
+ GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
+ &u64);
+ Unreachable();
+
+ BIND(&i64);
+ // This uses Uint64() intentionally: AtomicExchange is not implemented for
+ // Int64(), which is fine because the machine instruction only cares
+ // about words.
+ Return(BigIntFromSigned64(AtomicExchange(MachineType::Uint64(), backing_store,
+ WordShl(index_word, 3),
+ var_low.value(), high)));
+
+ BIND(&u64);
+ Return(BigIntFromUnsigned64(
+ AtomicExchange(MachineType::Uint64(), backing_store,
+ WordShl(index_word, 3), var_low.value(), high)));
+
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
@@ -333,26 +435,29 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
- Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
-
-#if DEBUG
- DebugSanityCheckAtomicIndex(array, index_word32, context);
-#endif
-
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
- index_integer, old_value_integer, new_value_integer));
+ index_integer, old_value, new_value));
#else
Node* index_word = ChangeUint32ToWord(index_word32);
- Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer);
+ Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
+ i64(this), u64(this), big(this), other(this);
+ STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ GotoIf(
+ Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
+ &big);
+ Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
+ Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
+ Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer);
Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer);
- Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
- other(this);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -393,6 +498,39 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
MachineType::Uint32(), backing_store, WordShl(index_word, 2),
old_value_word32, new_value_word32)));
+ BIND(&big);
+ TNode<BigInt> old_value_bigint = ToBigInt(CAST(context), CAST(old_value));
+ TNode<BigInt> new_value_bigint = ToBigInt(CAST(context), CAST(new_value));
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
+ TVARIABLE(UintPtrT, var_old_low);
+ TVARIABLE(UintPtrT, var_old_high);
+ TVARIABLE(UintPtrT, var_new_low);
+ TVARIABLE(UintPtrT, var_new_high);
+ BigIntToRawBytes(old_value_bigint, &var_old_low, &var_old_high);
+ BigIntToRawBytes(new_value_bigint, &var_new_low, &var_new_high);
+ Node* old_high = Is64() ? nullptr : static_cast<Node*>(var_old_high.value());
+ Node* new_high = Is64() ? nullptr : static_cast<Node*>(var_new_high.value());
+ GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
+ &i64);
+ GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
+ &u64);
+ Unreachable();
+
+ BIND(&i64);
+ // This uses Uint64() intentionally: AtomicCompareExchange is not implemented
+ // for Int64(), which is fine because the machine instruction only cares
+ // about words.
+ Return(BigIntFromSigned64(AtomicCompareExchange(
+ MachineType::Uint64(), backing_store, WordShl(index_word, 3),
+ var_old_low.value(), var_new_low.value(), old_high, new_high)));
+
+ BIND(&u64);
+ Return(BigIntFromUnsigned64(AtomicCompareExchange(
+ MachineType::Uint64(), backing_store, WordShl(index_word, 3),
+ var_old_low.value(), var_new_low.value(), old_high, new_high)));
+
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
@@ -429,27 +567,27 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
-
-#if DEBUG
- // In Debug mode, we re-validate the index as a sanity check because
- // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
- // neutered and the TypedArray length can't change either, so skipping this
- // check in Release mode is safe.
- ValidateAtomicIndex(array, index_word32, context);
-#endif
-
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
- Return(CallRuntime(runtime_function, context, array, index_integer,
- value_integer));
+ Return(CallRuntime(runtime_function, context, array, index_integer, value));
#else
Node* index_word = ChangeUint32ToWord(index_word32);
+ Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
+ i64(this), u64(this), big(this), other(this);
+
+ STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
+ GotoIf(
+ Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
+ &big);
+
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
- Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
- other(this);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -462,29 +600,59 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
BIND(&i8);
Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ index_word, value_word32, nullptr)));
BIND(&u8);
Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ index_word, value_word32, nullptr)));
BIND(&i16);
Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ WordShl(index_word, 1), value_word32,
+ nullptr)));
BIND(&u16);
Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ WordShl(index_word, 1), value_word32,
+ nullptr)));
BIND(&i32);
Return(ChangeInt32ToTagged(
(this->*function)(MachineType::Int32(), backing_store,
- WordShl(index_word, 2), value_word32)));
+ WordShl(index_word, 2), value_word32, nullptr)));
BIND(&u32);
Return(ChangeUint32ToTagged(
(this->*function)(MachineType::Uint32(), backing_store,
- WordShl(index_word, 2), value_word32)));
+ WordShl(index_word, 2), value_word32, nullptr)));
+
+ BIND(&big);
+ TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
+ TVARIABLE(UintPtrT, var_low);
+ TVARIABLE(UintPtrT, var_high);
+ BigIntToRawBytes(value_bigint, &var_low, &var_high);
+ Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
+ GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
+ &i64);
+ GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
+ &u64);
+ Unreachable();
+
+ BIND(&i64);
+ // This uses Uint64() intentionally: Atomic* ops are not implemented for
+ // Int64(), which is fine because the machine instructions only care
+ // about words.
+ Return(BigIntFromSigned64(
+ (this->*function)(MachineType::Uint64(), backing_store,
+ WordShl(index_word, 3), var_low.value(), high)));
+
+ BIND(&u64);
+ Return(BigIntFromUnsigned64(
+ (this->*function)(MachineType::Uint64(), backing_store,
+ WordShl(index_word, 3), var_low.value(), high)));
// This shouldn't happen, we've already validated the type.
BIND(&other);
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 92c1c65d1f..859d634cc9 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -74,6 +74,7 @@ V8_WARN_UNUSED_RESULT Maybe<size_t> ValidateAtomicAccess(
size_t access_index;
if (!TryNumberToSize(*access_index_obj, &access_index) ||
+ typed_array->WasNeutered() ||
access_index >= typed_array->length_value()) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidAtomicAccessIndex));
@@ -82,28 +83,24 @@ V8_WARN_UNUSED_RESULT Maybe<size_t> ValidateAtomicAccess(
return Just<size_t>(access_index);
}
-// ES #sec-atomics.wake
-// Atomics.wake( typedArray, index, count )
-BUILTIN(AtomicsWake) {
- HandleScope scope(isolate);
- Handle<Object> array = args.atOrUndefined(isolate, 1);
- Handle<Object> index = args.atOrUndefined(isolate, 2);
- Handle<Object> count = args.atOrUndefined(isolate, 3);
-
+namespace {
+MaybeHandle<Object> AtomicsWake(Isolate* isolate, Handle<Object> array,
+ Handle<Object> index, Handle<Object> count) {
Handle<JSTypedArray> sta;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true));
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true),
+ Object);
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
- if (maybe_index.IsNothing()) return ReadOnlyRoots(isolate).exception();
+ MAYBE_RETURN_NULL(maybe_index);
size_t i = maybe_index.FromJust();
uint32_t c;
if (count->IsUndefined(isolate)) {
c = kMaxUInt32;
} else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count,
- Object::ToInteger(isolate, count));
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, count,
+ Object::ToInteger(isolate, count), Object);
double count_double = count->Number();
if (count_double < 0)
count_double = 0;
@@ -113,9 +110,35 @@ BUILTIN(AtomicsWake) {
}
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = (i << 2) + NumberToSize(sta->byte_offset());
+ size_t addr = (i << 2) + sta->byte_offset();
+
+ return Handle<Object>(FutexEmulation::Wake(array_buffer, addr, c), isolate);
+}
+
+} // namespace
+
+// ES #sec-atomics.wake
+// Atomics.wake( typedArray, index, count )
+BUILTIN(AtomicsWake) {
+ HandleScope scope(isolate);
+ Handle<Object> array = args.atOrUndefined(isolate, 1);
+ Handle<Object> index = args.atOrUndefined(isolate, 2);
+ Handle<Object> count = args.atOrUndefined(isolate, 3);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kAtomicsWake);
+ RETURN_RESULT_OR_FAILURE(isolate, AtomicsWake(isolate, array, index, count));
+}
+
+// ES #sec-atomics.notify
+// Atomics.notify( typedArray, index, count )
+BUILTIN(AtomicsNotify) {
+ HandleScope scope(isolate);
+ Handle<Object> array = args.atOrUndefined(isolate, 1);
+ Handle<Object> index = args.atOrUndefined(isolate, 2);
+ Handle<Object> count = args.atOrUndefined(isolate, 3);
- return FutexEmulation::Wake(array_buffer, addr, c);
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kAtomicsNotify);
+ RETURN_RESULT_OR_FAILURE(isolate, AtomicsWake(isolate, array, index, count));
}
// ES #sec-atomics.wait
@@ -158,7 +181,7 @@ BUILTIN(AtomicsWait) {
}
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = (i << 2) + NumberToSize(sta->byte_offset());
+ size_t addr = (i << 2) + sta->byte_offset();
return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32,
timeout_number);
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index c46a3fd35d..574f425a0a 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -39,11 +39,11 @@ Node* StringBuiltinsAssembler::DirectStringData(Node* string,
BIND(&if_external);
{
// This is only valid for ExternalStrings where the resource data
- // pointer is cached (i.e. no short external strings).
- CSA_ASSERT(
- this, Word32NotEqual(Word32And(string_instance_type,
- Int32Constant(kShortExternalStringMask)),
- Int32Constant(kShortExternalStringTag)));
+ // pointer is cached (i.e. no uncached external strings).
+ CSA_ASSERT(this, Word32NotEqual(
+ Word32And(string_instance_type,
+ Int32Constant(kUncachedExternalStringMask)),
+ Int32Constant(kUncachedExternalStringTag)));
var_data.Bind(LoadObjectField(string, ExternalString::kResourceDataOffset,
MachineType::Pointer()));
Goto(&if_join);
@@ -191,11 +191,11 @@ void StringBuiltinsAssembler::StringEqual_Core(
// Check if both {lhs} and {rhs} are direct strings, and that in case of
// ExternalStrings the data pointer is cached.
- STATIC_ASSERT(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kUncachedExternalStringTag != 0);
STATIC_ASSERT(kIsIndirectStringTag != 0);
int const kBothDirectStringMask =
- kIsIndirectStringMask | kShortExternalStringMask |
- ((kIsIndirectStringMask | kShortExternalStringMask) << 8);
+ kIsIndirectStringMask | kUncachedExternalStringMask |
+ ((kIsIndirectStringMask | kUncachedExternalStringMask) << 8);
GotoIfNot(Word32Equal(Word32And(both_instance_types,
Int32Constant(kBothDirectStringMask)),
Int32Constant(0)),
@@ -284,67 +284,31 @@ void StringBuiltinsAssembler::StringEqual_Loop(
}
}
-void StringBuiltinsAssembler::Generate_StringAdd(StringAddFlags flags,
- PretenureFlag pretenure_flag,
- Node* context, Node* left,
- Node* right) {
- switch (flags) {
- case STRING_ADD_CONVERT_LEFT: {
- // TODO(danno): The ToString and JSReceiverToPrimitive below could be
- // combined to avoid duplicate smi and instance type checks.
- left = ToString(context, JSReceiverToPrimitive(context, left));
- Callable callable = CodeFactory::StringAdd(
- isolate(), STRING_ADD_CHECK_NONE, pretenure_flag);
- TailCallStub(callable, context, left, right);
- break;
- }
- case STRING_ADD_CONVERT_RIGHT: {
- // TODO(danno): The ToString and JSReceiverToPrimitive below could be
- // combined to avoid duplicate smi and instance type checks.
- right = ToString(context, JSReceiverToPrimitive(context, right));
- Callable callable = CodeFactory::StringAdd(
- isolate(), STRING_ADD_CHECK_NONE, pretenure_flag);
- TailCallStub(callable, context, left, right);
- break;
- }
- case STRING_ADD_CHECK_NONE: {
- CodeStubAssembler::AllocationFlag allocation_flags =
- (pretenure_flag == TENURED) ? CodeStubAssembler::kPretenured
- : CodeStubAssembler::kNone;
- Return(StringAdd(context, CAST(left), CAST(right), allocation_flags));
- break;
- }
- }
-}
-
-TF_BUILTIN(StringAdd_CheckNone_NotTenured, StringBuiltinsAssembler) {
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
+TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) {
+ TNode<String> left = CAST(Parameter(Descriptor::kLeft));
+ TNode<String> right = CAST(Parameter(Descriptor::kRight));
Node* context = Parameter(Descriptor::kContext);
- Generate_StringAdd(STRING_ADD_CHECK_NONE, NOT_TENURED, context, left, right);
+ Return(StringAdd(context, left, right));
}
-TF_BUILTIN(StringAdd_CheckNone_Tenured, StringBuiltinsAssembler) {
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
+TF_BUILTIN(StringAdd_ConvertLeft, StringBuiltinsAssembler) {
+ TNode<Object> left = CAST(Parameter(Descriptor::kLeft));
+ TNode<String> right = CAST(Parameter(Descriptor::kRight));
Node* context = Parameter(Descriptor::kContext);
- Generate_StringAdd(STRING_ADD_CHECK_NONE, TENURED, context, left, right);
+ // TODO(danno): The ToString and JSReceiverToPrimitive below could be
+ // combined to avoid duplicate smi and instance type checks.
+ left = ToString(context, JSReceiverToPrimitive(context, left));
+ TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right);
}
-TF_BUILTIN(StringAdd_ConvertLeft_NotTenured, StringBuiltinsAssembler) {
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
+TF_BUILTIN(StringAdd_ConvertRight, StringBuiltinsAssembler) {
+ TNode<String> left = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> right = CAST(Parameter(Descriptor::kRight));
Node* context = Parameter(Descriptor::kContext);
- Generate_StringAdd(STRING_ADD_CONVERT_LEFT, NOT_TENURED, context, left,
- right);
-}
-
-TF_BUILTIN(StringAdd_ConvertRight_NotTenured, StringBuiltinsAssembler) {
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* context = Parameter(Descriptor::kContext);
- Generate_StringAdd(STRING_ADD_CONVERT_RIGHT, NOT_TENURED, context, left,
- right);
+ // TODO(danno): The ToString and JSReceiverToPrimitive below could be
+ // combined to avoid duplicate smi and instance type checks.
+ right = ToString(context, JSReceiverToPrimitive(context, right));
+ TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right);
}
TF_BUILTIN(SubString, StringBuiltinsAssembler) {
@@ -354,12 +318,10 @@ TF_BUILTIN(SubString, StringBuiltinsAssembler) {
Return(SubString(string, SmiUntag(from), SmiUntag(to)));
}
-void StringBuiltinsAssembler::GenerateStringAt(char const* method_name,
- TNode<Context> context,
- Node* receiver,
- TNode<Object> maybe_position,
- TNode<Object> default_return,
- StringAtAccessor accessor) {
+void StringBuiltinsAssembler::GenerateStringAt(
+ char const* method_name, TNode<Context> context, Node* receiver,
+ TNode<Object> maybe_position, TNode<Object> default_return,
+ const StringAtAccessor& accessor) {
// Check that {receiver} is coercible to Object and convert it to a String.
TNode<String> string = ToThisString(context, receiver, method_name);
@@ -587,8 +549,9 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
}
TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
+ TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<IntPtrT> position =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
// Load the character code at the {position} from the {receiver}.
TNode<Int32T> code = StringCharCodeAt(receiver, position);
@@ -639,7 +602,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
- TNode<Smi> smi_argc = SmiTag(arguments.GetLength(INTPTR_PARAMETERS));
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
@@ -664,7 +626,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
{
Label two_byte(this);
// Assume that the resulting string contains only one-byte characters.
- Node* one_byte_result = AllocateSeqOneByteString(context, smi_argc);
+ Node* one_byte_result = AllocateSeqOneByteString(context, Unsigned(argc));
TVARIABLE(IntPtrT, var_max_index);
var_max_index = IntPtrConstant(0);
@@ -698,7 +660,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// At least one of the characters in the string requires a 16-bit
// representation. Allocate a SeqTwoByteString to hold the resulting
// string.
- Node* two_byte_result = AllocateSeqTwoByteString(context, smi_argc);
+ Node* two_byte_result = AllocateSeqTwoByteString(context, Unsigned(argc));
// Copy the characters that have already been put in the 8-bit string into
// their corresponding positions in the new 16-bit string.
@@ -817,7 +779,7 @@ TF_BUILTIN(StringPrototypeConcat, CodeStubAssembler) {
void StringBuiltinsAssembler::StringIndexOf(
Node* const subject_string, Node* const search_string, Node* const position,
- std::function<void(Node*)> f_return) {
+ const std::function<void(Node*)>& f_return) {
CSA_ASSERT(this, IsString(subject_string));
CSA_ASSERT(this, IsString(search_string));
CSA_ASSERT(this, TaggedIsSmi(position));
@@ -1229,8 +1191,6 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
TNode<Object> count = CAST(Parameter(Descriptor::kCount));
Node* const string =
ToThisString(context, receiver, "String.prototype.repeat");
- Node* const is_stringempty =
- SmiEqual(LoadStringLengthAsSmi(string), SmiConstant(0));
VARIABLE(
var_count, MachineRepresentation::kTagged,
@@ -1248,7 +1208,8 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
TNode<Smi> smi_count = CAST(var_count.value());
GotoIf(SmiLessThan(smi_count, SmiConstant(0)), &invalid_count);
GotoIf(SmiEqual(smi_count, SmiConstant(0)), &return_emptystring);
- GotoIf(is_stringempty, &return_emptystring);
+ GotoIf(Word32Equal(LoadStringLengthAsWord32(string), Int32Constant(0)),
+ &return_emptystring);
GotoIf(SmiGreaterThan(smi_count, SmiConstant(String::kMaxLength)),
&invalid_string_length);
Return(CallBuiltin(Builtins::kStringRepeat, context, string, smi_count));
@@ -1266,7 +1227,8 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
&invalid_count);
GotoIf(Float64LessThan(number_value, Float64Constant(0.0)),
&invalid_count);
- Branch(is_stringempty, &return_emptystring, &invalid_string_length);
+ Branch(Word32Equal(LoadStringLengthAsWord32(string), Int32Constant(0)),
+ &return_emptystring, &invalid_string_length);
}
}
@@ -1311,9 +1273,6 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) {
VARIABLE(var_temp, MachineRepresentation::kTagged, string);
TVARIABLE(Smi, var_count, count);
- Callable stringadd_callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
-
Label loop(this, {&var_count, &var_result, &var_temp}), return_result(this);
Goto(&loop);
BIND(&loop);
@@ -1321,16 +1280,16 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) {
{
Label next(this);
GotoIfNot(SmiToInt32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
- var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
- var_temp.value()));
+ var_result.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_result.value(), var_temp.value()));
Goto(&next);
BIND(&next);
}
var_count = SmiShr(var_count.value(), 1);
GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_result);
- var_temp.Bind(CallStub(stringadd_callable, context, var_temp.value(),
- var_temp.value()));
+ var_temp.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_temp.value(), var_temp.value()));
Goto(&loop);
}
@@ -1369,16 +1328,16 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
TNode<String> const subject_string = ToString_Inline(context, receiver);
TNode<String> const search_string = ToString_Inline(context, search);
- TNode<Smi> const subject_length = LoadStringLengthAsSmi(subject_string);
- TNode<Smi> const search_length = LoadStringLengthAsSmi(search_string);
+ TNode<IntPtrT> const subject_length = LoadStringLengthAsWord(subject_string);
+ TNode<IntPtrT> const search_length = LoadStringLengthAsWord(search_string);
// Fast-path single-char {search}, long cons {receiver}, and simple string
// {replace}.
{
Label next(this);
- GotoIfNot(SmiEqual(search_length, SmiConstant(1)), &next);
- GotoIfNot(SmiGreaterThan(subject_length, SmiConstant(0xFF)), &next);
+ GotoIfNot(WordEqual(search_length, IntPtrConstant(1)), &next);
+ GotoIfNot(IntPtrGreaterThan(subject_length, IntPtrConstant(0xFF)), &next);
GotoIf(TaggedIsSmi(replace), &next);
GotoIfNot(IsString(replace), &next);
@@ -1430,10 +1389,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&next);
}
- TNode<Smi> const match_end_index = SmiAdd(match_start_index, search_length);
-
- Callable stringadd_callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ TNode<Smi> const match_end_index =
+ SmiAdd(match_start_index, SmiFromIntPtr(search_length));
VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
@@ -1465,8 +1422,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
CallJS(call_callable, context, replace, UndefinedConstant(),
search_string, match_start_index, subject_string);
Node* const replacement_string = ToString_Inline(context, replacement);
- var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
- replacement_string));
+ var_result.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_result.value(), replacement_string));
Goto(&out);
}
@@ -1476,8 +1433,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Node* const replacement =
GetSubstitution(context, subject_string, match_start_index,
match_end_index, replace_string);
- var_result.Bind(
- CallStub(stringadd_callable, context, var_result.value(), replacement));
+ var_result.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_result.value(), replacement));
Goto(&out);
}
@@ -1485,9 +1442,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
{
Node* const suffix =
CallBuiltin(Builtins::kStringSubstring, context, subject_string,
- SmiUntag(match_end_index), SmiUntag(subject_length));
- Node* const result =
- CallStub(stringadd_callable, context, var_result.value(), suffix);
+ SmiUntag(match_end_index), subject_length);
+ Node* const result = CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_result.value(), suffix);
Return(result);
}
}
@@ -1679,8 +1636,6 @@ class StringPadAssembler : public StringBuiltinsAssembler {
SmiLessThanOrEqual(smi_max_length, SmiConstant(String::kMaxLength)),
&invalid_string_length);
- Callable stringadd_callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
CSA_ASSERT(this, SmiGreaterThan(smi_max_length, string_length));
TNode<Smi> const pad_length = SmiSub(smi_max_length, string_length);
@@ -1717,19 +1672,20 @@ class StringPadAssembler : public StringBuiltinsAssembler {
Node* const remainder_string = CallBuiltin(
Builtins::kStringSubstring, context, var_fill_string.value(),
IntPtrConstant(0), ChangeInt32ToIntPtr(remaining_word32));
- var_pad.Bind(CallStub(stringadd_callable, context, var_pad.value(),
- remainder_string));
+ var_pad.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_pad.value(), remainder_string));
Goto(&return_result);
}
}
BIND(&return_result);
CSA_ASSERT(this,
SmiEqual(pad_length, LoadStringLengthAsSmi(var_pad.value())));
- arguments.PopAndReturn(variant == kStart
- ? CallStub(stringadd_callable, context,
- var_pad.value(), receiver_string)
- : CallStub(stringadd_callable, context,
- receiver_string, var_pad.value()));
+ arguments.PopAndReturn(
+ variant == kStart
+ ? CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ var_pad.value(), receiver_string)
+ : CallBuiltin(Builtins::kStringAdd_CheckNone, context,
+ receiver_string, var_pad.value()));
}
BIND(&dont_pad);
arguments.PopAndReturn(receiver_string);
@@ -1844,7 +1800,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<RawPtrT> string_data = UncheckedCast<RawPtrT>(
to_direct.PointerToData(&fill_thehole_and_call_runtime));
TNode<IntPtrT> string_data_offset = to_direct.offset();
- TNode<Object> cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex);
+ TNode<Object> cache = LoadRoot(RootIndex::kSingleCharacterStringCache);
BuildFastLoop(
IntPtrConstant(0), length,
@@ -1876,7 +1832,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
BIND(&fill_thehole_and_call_runtime);
{
FillFixedArrayWithValue(PACKED_ELEMENTS, elements, IntPtrConstant(0),
- length, Heap::kTheHoleValueRootIndex);
+ length, RootIndex::kTheHoleValue);
Goto(&call_runtime);
}
}
@@ -2303,10 +2259,10 @@ void StringTrimAssembler::ScanForNonWhiteSpaceOrLineTerminator(
BIND(&out);
}
-void StringTrimAssembler::BuildLoop(Variable* const var_index, Node* const end,
- int increment, Label* const if_none_found,
- Label* const out,
- std::function<Node*(Node*)> get_character) {
+void StringTrimAssembler::BuildLoop(
+ Variable* const var_index, Node* const end, int increment,
+ Label* const if_none_found, Label* const out,
+ const std::function<Node*(Node*)>& get_character) {
Label loop(this, var_index);
Goto(&loop);
BIND(&loop);
@@ -2403,14 +2359,14 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
ToThisString(context, receiver, "String.prototype[Symbol.iterator]");
Node* native_context = LoadNativeContext(context);
- Node* map =
- LoadContextElement(native_context, Context::STRING_ITERATOR_MAP_INDEX);
+ Node* map = LoadContextElement(native_context,
+ Context::INITIAL_STRING_ITERATOR_MAP_INDEX);
Node* iterator = Allocate(JSStringIterator::kSize);
StoreMapNoWriteBarrier(iterator, map);
StoreObjectFieldRoot(iterator, JSValue::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset,
string);
Node* index = SmiConstant(0);
@@ -2537,6 +2493,85 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
}
}
+TNode<BoolT> StringBuiltinsAssembler::IsStringPrimitiveWithNoCustomIteration(
+ TNode<Object> object, TNode<Context> context) {
+ Label if_false(this, Label::kDeferred), exit(this);
+ TVARIABLE(BoolT, var_result);
+
+ GotoIf(TaggedIsSmi(object), &if_false);
+ GotoIfNot(IsString(CAST(object)), &if_false);
+
+ // Check that the String iterator hasn't been modified in a way that would
+ // affect iteration.
+ Node* protector_cell = LoadRoot(RootIndex::kStringIteratorProtector);
+ DCHECK(isolate()->heap()->string_iterator_protector()->IsPropertyCell());
+ var_result =
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid));
+ Goto(&exit);
+
+ BIND(&if_false);
+ {
+ var_result = Int32FalseConstant();
+ Goto(&exit);
+ }
+
+ BIND(&exit);
+ return var_result.value();
+}
+
+TNode<JSArray> StringBuiltinsAssembler::StringToList(TNode<Context> context,
+ TNode<String> string) {
+ CSA_ASSERT(this, IsStringPrimitiveWithNoCustomIteration(string, context));
+ const ElementsKind kind = PACKED_ELEMENTS;
+ const TNode<IntPtrT> length = LoadStringLengthAsWord(string);
+
+ Node* const array_map =
+ LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ Node* const array = AllocateJSArray(kind, array_map, length, SmiTag(length));
+ Node* const elements = LoadElements(array);
+
+ const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> first_to_element_offset =
+ ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0);
+ VARIABLE(
+ var_offset, MachineType::PointerRepresentation(),
+ IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset)));
+ TVARIABLE(IntPtrT, var_position, IntPtrConstant(0));
+ Label done(this), next_codepoint(this, {&var_position, &var_offset});
+
+ Goto(&next_codepoint);
+
+ BIND(&next_codepoint);
+ {
+ // Loop condition.
+ GotoIfNot(IntPtrLessThan(var_position.value(), length), &done);
+ const UnicodeEncoding encoding = UnicodeEncoding::UTF16;
+ TNode<Int32T> ch =
+ LoadSurrogatePairAt(string, length, var_position.value(), encoding);
+ TNode<String> value = StringFromSingleCodePoint(ch, encoding);
+
+ Store(elements, var_offset.value(), value);
+
+ // Increment the position.
+ TNode<IntPtrT> ch_length = LoadStringLengthAsWord(value);
+ var_position = IntPtrAdd(var_position.value(), ch_length);
+ // Increment the array offset and continue the loop.
+ var_offset.Bind(
+ IntPtrAdd(var_offset.value(), IntPtrConstant(kPointerSize)));
+ Goto(&next_codepoint);
+ }
+
+ BIND(&done);
+ return UncheckedCast<JSArray>(array);
+}
+
+TF_BUILTIN(StringToList, StringBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<String> string = CAST(Parameter(Descriptor::kSource));
+ Return(StringToList(context, string));
+}
+
// -----------------------------------------------------------------------------
// ES6 section B.2.3 Additional Properties of the String.prototype object
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 06ac127f13..2420ad3014 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -23,8 +23,12 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* rhs, Node* rhs_instance_type,
TNode<IntPtrT> length, Label* if_equal,
Label* if_not_equal, Label* if_indirect);
+ TNode<BoolT> IsStringPrimitiveWithNoCustomIteration(TNode<Object> object,
+ TNode<Context> context);
protected:
+ TNode<JSArray> StringToList(TNode<Context> context, TNode<String> string);
+
void StringEqual_Loop(Node* lhs, Node* lhs_instance_type,
MachineType lhs_type, Node* rhs,
Node* rhs_instance_type, MachineType rhs_type,
@@ -64,7 +68,7 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
void GenerateStringAt(const char* method_name, TNode<Context> context,
Node* receiver, TNode<Object> maybe_position,
TNode<Object> default_return,
- StringAtAccessor accessor);
+ const StringAtAccessor& accessor);
TNode<Int32T> LoadSurrogatePairAt(SloppyTNode<String> string,
SloppyTNode<IntPtrT> length,
@@ -72,7 +76,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
UnicodeEncoding encoding);
void StringIndexOf(Node* const subject_string, Node* const search_string,
- Node* const position, std::function<void(Node*)> f_return);
+ Node* const position,
+ const std::function<void(Node*)>& f_return);
TNode<Smi> IndexOfDollarChar(Node* const context, Node* const string);
@@ -107,9 +112,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Handle<Symbol> symbol,
const NodeFunction0& regexp_call,
const NodeFunction1& generic_call);
-
- void Generate_StringAdd(StringAddFlags flags, PretenureFlag pretenure_flag,
- Node* context, Node* left, Node* right);
};
class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
@@ -145,7 +147,7 @@ class StringTrimAssembler : public StringBuiltinsAssembler {
void BuildLoop(Variable* const var_index, Node* const end, int increment,
Label* const if_none_found, Label* const out,
- std::function<Node*(Node*)> get_character);
+ const std::function<Node*(Node*)>& get_character);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 0dafa230b5..7aba998aa4 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -195,6 +195,9 @@ BUILTIN(StringPrototypeLastIndexOf) {
// do anything locale specific.
BUILTIN(StringPrototypeLocaleCompare) {
HandleScope handle_scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringLocaleCompare);
+
#ifdef V8_INTL_SUPPORT
TO_THIS_STRING(str1, "String.prototype.localeCompare");
Handle<String> str2;
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 55c0307484..97e0def67c 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -39,7 +39,7 @@ BUILTIN(SymbolFor) {
Handle<String> key;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
Object::ToString(isolate, key_obj));
- return *isolate->SymbolFor(Heap::kPublicSymbolTableRootIndex, key, false);
+ return *isolate->SymbolFor(RootIndex::kPublicSymbolTable, key, false);
}
// ES6 section 19.4.2.5 Symbol.keyFor.
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index c7c416d924..99979b0283 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -5,7 +5,6 @@
#include "src/builtins/builtins-typed-array-gen.h"
#include "src/builtins/builtins-constructor-gen.h"
-#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/builtins/growable-fixed-array-gen.h"
@@ -33,12 +32,12 @@ TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
TVARIABLE(Map, var_typed_map);
TNode<Map> array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
+ ReadOnlyRoots roots(isolate());
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
- Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(kind),
- isolate());
+ Handle<Map> map(roots.MapForFixedTypedArray(kind), isolate());
var_typed_map = HeapConstant(map);
});
@@ -65,11 +64,15 @@ TNode<UintPtrT> TypedArrayBuiltinsAssembler::CalculateExternalPointer(
// - Set EmbedderFields to 0.
void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
TNode<Smi> length,
- TNode<Number> byte_offset,
- TNode<Number> byte_length) {
+ TNode<UintPtrT> byte_offset,
+ TNode<UintPtrT> byte_length) {
StoreObjectField(holder, JSTypedArray::kLengthOffset, length);
- StoreObjectField(holder, JSArrayBufferView::kByteOffsetOffset, byte_offset);
- StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length);
+ StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset,
+ byte_offset,
+ MachineType::PointerRepresentation());
+ StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteLengthOffset,
+ byte_length,
+ MachineType::PointerRepresentation());
for (int offset = JSTypedArray::kSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kPointerSize) {
StoreObjectField(holder, offset, SmiConstant(0));
@@ -114,7 +117,8 @@ TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) {
// SmiMul returns a heap number in case of Smi overflow.
TNode<Number> byte_length = SmiMul(length, element_size);
- SetupTypedArray(holder, length, byte_offset, byte_length);
+ SetupTypedArray(holder, length, ChangeNonnegativeNumberToUintPtr(byte_offset),
+ ChangeNonnegativeNumberToUintPtr(byte_length));
AttachBuffer(holder, buffer, fixed_typed_map, length, byte_offset);
Return(UndefinedConstant());
}
@@ -146,8 +150,6 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// SmiMul returns a heap number in case of Smi overflow.
TNode<Number> byte_length = SmiMul(length, element_size);
- SetupTypedArray(holder, length, byte_offset, byte_length);
-
TNode<Map> fixed_typed_map = LoadMapForType(holder);
// If target and new_target for the buffer differ, allocate off-heap.
@@ -173,7 +175,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Node* native_context = LoadNativeContext(context);
Node* map =
LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX);
- Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray);
Node* buffer = Allocate(JSArrayBuffer::kSizeWithEmbedderFields);
StoreMapNoWriteBarrier(buffer, map);
@@ -189,14 +191,15 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// - Set all embedder fields to Smi(0).
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldSlot,
SmiConstant(0));
- int32_t bitfield_value = (1 << JSArrayBuffer::IsExternal::kShift) |
- (1 << JSArrayBuffer::IsNeuterable::kShift);
+ int32_t bitfield_value = (1 << JSArrayBuffer::IsExternalBit::kShift) |
+ (1 << JSArrayBuffer::IsNeuterableBit::kShift);
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
Int32Constant(bitfield_value),
MachineRepresentation::kWord32);
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
- byte_length);
+ SmiToIntPtr(CAST(byte_length)),
+ MachineType::PointerRepresentation());
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
SmiConstant(0));
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
@@ -305,6 +308,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
}
BIND(&done);
+ SetupTypedArray(holder, length, ChangeNonnegativeNumberToUintPtr(byte_offset),
+ ChangeNonnegativeNumberToUintPtr(byte_length));
Return(UndefinedConstant());
}
@@ -399,8 +404,8 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
BIND(&length_undefined);
{
ThrowIfArrayBufferIsDetached(context, buffer, "Construct");
- Node* buffer_byte_length =
- LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
+ TNode<Number> buffer_byte_length = ChangeUintPtrToTagged(
+ LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kByteLengthOffset));
Node* remainder = CallBuiltin(Builtins::kModulus, context,
buffer_byte_length, element_size);
@@ -424,8 +429,8 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
new_byte_length.Bind(SmiMul(new_length, element_size));
// Reading the byte length must come after the ToIndex operation, which
// could cause the buffer to become detached.
- Node* buffer_byte_length =
- LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
+ TNode<Number> buffer_byte_length = ChangeUintPtrToTagged(
+ LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kByteLengthOffset));
Node* end = CallBuiltin(Builtins::kAdd, context, offset.value(),
new_byte_length.value());
@@ -502,7 +507,7 @@ void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
Goto(&check_for_sab);
BIND(&if_notdetached);
- source_length = LoadTypedArrayLength(typed_array);
+ source_length = LoadJSTypedArrayLength(typed_array);
Goto(&check_for_sab);
// The spec requires that constructing a typed array using a SAB-backed typed
@@ -511,7 +516,7 @@ void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
BIND(&check_for_sab);
TNode<Uint32T> bitfield =
LoadObjectField<Uint32T>(source_buffer, JSArrayBuffer::kBitFieldOffset);
- Branch(IsSetWord32<JSArrayBuffer::IsShared>(bitfield), &construct,
+ Branch(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &construct,
&if_buffernotshared);
BIND(&if_buffernotshared);
@@ -640,8 +645,9 @@ void TypedArrayBuiltinsAssembler::ConstructByIterable(
Label fast_path(this), slow_path(this), done(this);
CSA_ASSERT(this, IsCallable(iterator_fn));
- TNode<JSArray> array_like = CAST(
- CallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn));
+ TNode<JSArray> array_like =
+ CAST(CallBuiltin(Builtins::kIterableToListMayPreserveHoles, context,
+ iterable, iterator_fn));
TNode<Object> initial_length = LoadJSArrayLength(array_like);
TNode<JSFunction> default_constructor = CAST(LoadContextElement(
@@ -674,6 +680,17 @@ TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(this->state());
TNode<JSTypedArray> result = CAST(
constructor_assembler.EmitFastNewObject(context, target, new_target));
+ // We need to set the byte_offset / byte_length to some sane values
+ // to keep the heap verifier happy.
+ // TODO(bmeurer): Fix this initialization to not use EmitFastNewObject,
+ // which causes the problem, since it puts Undefined into all slots of
+ // the object even though that doesn't make any sense for these fields.
+ StoreObjectFieldNoWriteBarrier(result, JSTypedArray::kByteOffsetOffset,
+ UintPtrConstant(0),
+ MachineType::PointerRepresentation());
+ StoreObjectFieldNoWriteBarrier(result, JSTypedArray::kByteLengthOffset,
+ UintPtrConstant(0),
+ MachineType::PointerRepresentation());
TNode<Smi> element_size =
SmiTag(GetTypedArrayElementSize(LoadElementsKind(result)));
@@ -775,50 +792,58 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
}
}
-void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
- Node* context, Node* receiver, const char* method_name, int object_offset) {
- // Check if the {receiver} is actually a JSTypedArray.
- ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, method_name);
-
- // Check if the {receiver}'s JSArrayBuffer was neutered.
- Node* receiver_buffer =
- LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(this, Label::kDeferred);
- GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
- Return(LoadObjectField(receiver, object_offset));
-
- BIND(&if_receiverisneutered);
- {
- // The {receiver}s buffer was neutered, default to zero.
- Return(SmiConstant(0));
- }
-}
-
// ES6 #sec-get-%typedarray%.prototype.bytelength
TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
+ const char* const kMethodName = "get TypedArray.prototype.byteLength";
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
- GenerateTypedArrayPrototypeGetter(context, receiver,
- "get TypedArray.prototype.byteLength",
- JSTypedArray::kByteLengthOffset);
+
+ // Check if the {receiver} is actually a JSTypedArray.
+ ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ TNode<JSArrayBuffer> receiver_buffer =
+ LoadJSArrayBufferViewBuffer(CAST(receiver));
+ TNode<UintPtrT> byte_length = Select<UintPtrT>(
+ IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
+ [=] { return LoadJSArrayBufferViewByteLength(CAST(receiver)); });
+ Return(ChangeUintPtrToTagged(byte_length));
}
// ES6 #sec-get-%typedarray%.prototype.byteoffset
TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
+ const char* const kMethodName = "get TypedArray.prototype.byteOffset";
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
- GenerateTypedArrayPrototypeGetter(context, receiver,
- "get TypedArray.prototype.byteOffset",
- JSTypedArray::kByteOffsetOffset);
+
+ // Check if the {receiver} is actually a JSTypedArray.
+ ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ TNode<JSArrayBuffer> receiver_buffer =
+ LoadJSArrayBufferViewBuffer(CAST(receiver));
+ TNode<UintPtrT> byte_offset = Select<UintPtrT>(
+ IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
+ [=] { return LoadJSArrayBufferViewByteOffset(CAST(receiver)); });
+ Return(ChangeUintPtrToTagged(byte_offset));
}
// ES6 #sec-get-%typedarray%.prototype.length
TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
+ const char* const kMethodName = "get TypedArray.prototype.length";
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
- GenerateTypedArrayPrototypeGetter(context, receiver,
- "get TypedArray.prototype.length",
- JSTypedArray::kLengthOffset);
+
+ // Check if the {receiver} is actually a JSTypedArray.
+ ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ TNode<JSArrayBuffer> receiver_buffer =
+ LoadJSArrayBufferViewBuffer(CAST(receiver));
+ TNode<Smi> length = Select<Smi>(
+ IsDetachedBuffer(receiver_buffer), [=] { return SmiConstant(0); },
+ [=] { return LoadJSTypedArrayLength(CAST(receiver)); });
+ Return(length);
}
TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
@@ -923,7 +948,7 @@ TNode<JSTypedArray> TypedArrayBuiltinsAssembler::CreateByLength(
// If newTypedArray.[[ArrayLength]] < argumentList[0], throw a TypeError
// exception.
Label if_length_is_not_short(this);
- TNode<Smi> new_length = LoadTypedArrayLength(new_typed_array);
+ TNode<Smi> new_length = LoadJSTypedArrayLength(new_typed_array);
GotoIfNot(SmiLessThan(new_length, len), &if_length_is_not_short);
ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort);
@@ -979,8 +1004,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
// Check for possible range errors.
- TNode<IntPtrT> source_length = SmiUntag(LoadTypedArrayLength(source));
- TNode<IntPtrT> target_length = SmiUntag(LoadTypedArrayLength(target));
+ TNode<IntPtrT> source_length = SmiUntag(LoadJSTypedArrayLength(source));
+ TNode<IntPtrT> target_length = SmiUntag(LoadJSTypedArrayLength(target));
TNode<IntPtrT> required_target_length = IntPtrAdd(source_length, offset);
GotoIf(IntPtrGreaterThan(required_target_length, target_length),
@@ -1030,7 +1055,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
IsBigInt64ElementsKind(target_el_kind)),
&exception);
- TNode<IntPtrT> source_length = SmiUntag(LoadTypedArrayLength(source));
+ TNode<IntPtrT> source_length = SmiUntag(LoadJSTypedArrayLength(source));
CallCCopyTypedArrayElementsToTypedArray(source, target, source_length,
offset);
Goto(&out);
@@ -1051,7 +1076,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
TNode<IntPtrT> source_length = SmiUntag(LoadFastJSArrayLength(source));
- TNode<IntPtrT> target_length = SmiUntag(LoadTypedArrayLength(target));
+ TNode<IntPtrT> target_length = SmiUntag(LoadJSTypedArrayLength(target));
// Maybe out of bounds?
GotoIf(IntPtrGreaterThan(IntPtrAdd(source_length, offset), target_length),
@@ -1266,7 +1291,7 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
TNode<JSTypedArray> source =
ValidateTypedArray(context, receiver, method_name);
- TNode<Smi> source_length = LoadTypedArrayLength(source);
+ TNode<Smi> source_length = LoadJSTypedArrayLength(source);
// Convert start offset argument to integer, and calculate relative offset.
TNode<Object> start = args.GetOptionalArgumentValue(0, SmiConstant(0));
@@ -1299,7 +1324,7 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField(
result_array, JSTypedArray::kBufferOffset))));
TNode<JSArrayBuffer> receiver_buffer =
- LoadArrayBufferViewBuffer(CAST(receiver));
+ LoadJSArrayBufferViewBuffer(CAST(receiver));
ThrowIfArrayBufferIsDetached(context, receiver_buffer, method_name);
// result_array could be a different type from source or share the same
@@ -1332,24 +1357,16 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
TNode<IntPtrT> count_bytes = IntPtrMul(SmiToIntPtr(count), source_el_size);
#ifdef DEBUG
- Label done(this), to_intptr_failed(this, Label::kDeferred);
- TNode<IntPtrT> target_byte_length = TryToIntptr(
- LoadObjectField<Number>(result_array, JSTypedArray::kByteLengthOffset),
- &to_intptr_failed);
- CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, target_byte_length));
-
- TNode<IntPtrT> source_byte_length = TryToIntptr(
- LoadObjectField<Number>(source, JSTypedArray::kByteLengthOffset),
- &to_intptr_failed);
- TNode<IntPtrT> source_size_in_bytes =
- IntPtrSub(source_byte_length, source_start_bytes);
- CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, source_size_in_bytes));
- Goto(&done);
-
- BIND(&to_intptr_failed);
- Unreachable();
-
- BIND(&done);
+ TNode<UintPtrT> target_byte_length =
+ LoadJSArrayBufferViewByteLength(result_array);
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(Unsigned(count_bytes),
+ target_byte_length));
+ TNode<UintPtrT> source_byte_length =
+ LoadJSArrayBufferViewByteLength(source);
+ TNode<UintPtrT> source_size_in_bytes =
+ UintPtrSub(source_byte_length, Unsigned(source_start_bytes));
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(Unsigned(count_bytes),
+ source_size_in_bytes));
#endif // DEBUG
CallCMemmove(target_data_ptr, source_start, count_bytes);
@@ -1395,7 +1412,7 @@ TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
// 5. Let buffer be O.[[ViewedArrayBuffer]].
TNode<JSArrayBuffer> buffer = GetBuffer(context, source);
// 6. Let srcLength be O.[[ArrayLength]].
- TNode<Smi> source_length = LoadTypedArrayLength(source);
+ TNode<Smi> source_length = LoadJSTypedArrayLength(source);
// 7. Let relativeBegin be ? ToInteger(begin).
// 8. If relativeBegin < 0, let beginIndex be max((srcLength + relativeBegin),
@@ -1430,7 +1447,7 @@ TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
// 14. Let srcByteOffset be O.[[ByteOffset]].
TNode<Number> source_byte_offset =
- LoadObjectField<Number>(source, JSTypedArray::kByteOffsetOffset);
+ ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(source));
// 15. Let beginByteOffset be srcByteOffset + beginIndex × elementSize.
TNode<Number> offset = SmiMul(var_begin.value(), SmiFromIntPtr(element_size));
@@ -1606,17 +1623,6 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
"%TypedArray%.of");
}
-// This builtin always returns a new JSArray and is thus safe to use even in the
-// presence of code that may call back into user-JS.
-TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
-
- IteratorBuiltinsAssembler iterator_assembler(state());
- Return(iterator_assembler.IterableToList(context, iterable, iterator_fn));
-}
-
// ES6 #sec-%typedarray%.from
TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -1818,7 +1824,7 @@ TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
ValidateTypedArray(context, receiver, method_name);
// 3. Let len be O.[[ArrayLength]].
- TNode<Smi> length = LoadTypedArrayLength(source);
+ TNode<Smi> length = LoadJSTypedArrayLength(source);
// 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 11768d660a..1e35ae69a9 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -21,9 +21,6 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
const char* method_name);
protected:
- void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver,
- const char* method_name,
- int object_offset);
void GenerateTypedArrayPrototypeIterationMethod(TNode<Context> context,
TNode<Object> receiver,
const char* method_name,
@@ -50,7 +47,8 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
TNode<Smi> element_size);
void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
- TNode<Number> byte_offset, TNode<Number> byte_length);
+ TNode<UintPtrT> byte_offset,
+ TNode<UintPtrT> byte_length);
void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
TNode<Map> map, TNode<Smi> length,
TNode<Number> byte_offset);
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index facfaf93f8..60be33de20 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -38,16 +38,19 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
LoadFromParentFrame(WasmCompiledFrameConstants::kWasmInstanceOffset));
}
+ TNode<Object> LoadContextFromInstance(TNode<Object> instance) {
+ return UncheckedCast<Object>(
+ Load(MachineType::AnyTagged(), instance,
+ IntPtrConstant(WasmInstanceObject::kNativeContextOffset -
+ kHeapObjectTag)));
+ }
+
TNode<Code> LoadCEntryFromInstance(TNode<Object> instance) {
return UncheckedCast<Code>(
Load(MachineType::AnyTagged(), instance,
IntPtrConstant(WasmInstanceObject::kCEntryStubOffset -
kHeapObjectTag)));
}
-
- TNode<Code> LoadCEntryFromFrame() {
- return LoadCEntryFromInstance(LoadInstanceFromFrame());
- }
};
TF_BUILTIN(WasmAllocateHeapNumber, WasmBuiltinsAssembler) {
@@ -55,18 +58,6 @@ TF_BUILTIN(WasmAllocateHeapNumber, WasmBuiltinsAssembler) {
TailCallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant());
}
-TF_BUILTIN(WasmArgumentsAdaptor, WasmBuiltinsAssembler) {
- TNode<Object> context = UncheckedParameter(Descriptor::kContext);
- TNode<Object> function = UncheckedParameter(Descriptor::kTarget);
- TNode<Object> new_target = UncheckedParameter(Descriptor::kNewTarget);
- TNode<Object> argc1 = UncheckedParameter(Descriptor::kActualArgumentsCount);
- TNode<Object> argc2 = UncheckedParameter(Descriptor::kExpectedArgumentsCount);
- TNode<Code> target =
- LoadBuiltinFromFrame(Builtins::kArgumentsAdaptorTrampoline);
- TailCallStub(ArgumentAdaptorDescriptor{}, target, context, function,
- new_target, argc1, argc2);
-}
-
TF_BUILTIN(WasmCallJavaScript, WasmBuiltinsAssembler) {
TNode<Object> context = UncheckedParameter(Descriptor::kContext);
TNode<Object> function = UncheckedParameter(Descriptor::kFunction);
@@ -83,9 +74,18 @@ TF_BUILTIN(WasmToNumber, WasmBuiltinsAssembler) {
}
TF_BUILTIN(WasmStackGuard, WasmBuiltinsAssembler) {
- TNode<Code> centry = LoadCEntryFromFrame();
- TailCallRuntimeWithCEntry(Runtime::kWasmStackGuard, centry,
- NoContextConstant());
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+ TNode<Object> context = LoadContextFromInstance(instance);
+ TailCallRuntimeWithCEntry(Runtime::kWasmStackGuard, centry, context);
+}
+
+TF_BUILTIN(WasmThrow, WasmBuiltinsAssembler) {
+ TNode<Object> exception = UncheckedParameter(Descriptor::kException);
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+ TNode<Object> context = LoadContextFromInstance(instance);
+ TailCallRuntimeWithCEntry(Runtime::kThrow, centry, context, exception);
}
TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) {
@@ -100,9 +100,9 @@ TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) {
TNode<Smi> num_pages_smi = SmiFromInt32(num_pages);
TNode<Object> instance = LoadInstanceFromFrame();
TNode<Code> centry = LoadCEntryFromInstance(instance);
- TNode<Smi> ret_smi = UncheckedCast<Smi>(
- CallRuntimeWithCEntry(Runtime::kWasmGrowMemory, centry,
- NoContextConstant(), instance, num_pages_smi));
+ TNode<Object> context = LoadContextFromInstance(instance);
+ TNode<Smi> ret_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
+ Runtime::kWasmGrowMemory, centry, context, instance, num_pages_smi));
TNode<Int32T> ret = SmiToInt32(ret_smi);
ReturnRaw(ret);
@@ -112,10 +112,12 @@ TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) {
#define DECLARE_ENUM(name) \
TF_BUILTIN(ThrowWasm##name, WasmBuiltinsAssembler) { \
- TNode<Code> centry = LoadCEntryFromFrame(); \
+ TNode<Object> instance = LoadInstanceFromFrame(); \
+ TNode<Code> centry = LoadCEntryFromInstance(instance); \
+ TNode<Object> context = LoadContextFromInstance(instance); \
int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- TailCallRuntimeWithCEntry(Runtime::kThrowWasmError, centry, \
- NoContextConstant(), SmiConstant(message_id)); \
+ TailCallRuntimeWithCEntry(Runtime::kThrowWasmError, centry, context, \
+ SmiConstant(message_id)); \
}
FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
#undef DECLARE_ENUM
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index b2c7433e8d..103f00c56e 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -51,13 +51,12 @@ struct BuiltinMetadata {
#define DECL_TFC(Name, ...) { #Name, Builtins::TFC, {} },
#define DECL_TFS(Name, ...) { #Name, Builtins::TFS, {} },
#define DECL_TFH(Name, ...) { #Name, Builtins::TFH, {} },
-#define DECL_BCH(Name, ...) { #Name "Handler", Builtins::BCH, {} }, \
- { #Name "WideHandler", Builtins::BCH, {} }, \
- { #Name "ExtraWideHandler", Builtins::BCH, {} },
+#define DECL_BCH(Name, ...) { #Name, Builtins::BCH, {} },
+#define DECL_DLH(Name, ...) { #Name, Builtins::DLH, {} },
#define DECL_ASM(Name, ...) { #Name, Builtins::ASM, {} },
const BuiltinMetadata builtin_metadata[] = {
BUILTIN_LIST(DECL_CPP, DECL_API, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH,
- DECL_BCH, DECL_ASM)
+ DECL_BCH, DECL_DLH, DECL_ASM)
};
#undef DECL_CPP
#undef DECL_API
@@ -66,6 +65,7 @@ const BuiltinMetadata builtin_metadata[] = {
#undef DECL_TFS
#undef DECL_TFH
#undef DECL_BCH
+#undef DECL_DLH
#undef DECL_ASM
// clang-format on
@@ -166,11 +166,12 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
break; \
}
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
- CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN,
+ IGNORE_BUILTIN)
#undef CASE_OTHER
default:
Builtins::Kind kind = Builtins::KindOf(name);
- DCHECK_NE(kind, BCH);
+ DCHECK(kind != BCH && kind != DLH);
if (kind == TFJ || kind == CPP) {
return Callable(code, JSTrampolineDescriptor{});
}
@@ -264,8 +265,11 @@ bool Builtins::IsLazy(int index) {
case kArrayReduceRightPreLoopEagerDeoptContinuation:
case kArraySomeLoopEagerDeoptContinuation:
case kArraySomeLoopLazyDeoptContinuation:
- case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
- case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
+ case kAsyncFunctionAwaitResolveClosure: // https://crbug.com/v8/7522
+ case kAsyncGeneratorAwaitResolveClosure: // https://crbug.com/v8/7522
+ case kAsyncGeneratorYieldResolveClosure: // https://crbug.com/v8/7522
+ case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
+ case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
// CEntry variants must be immovable, whereas lazy deserialization allocates
// movable code.
case kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
@@ -281,9 +285,13 @@ bool Builtins::IsLazy(int index) {
case kCompileLazy:
case kDebugBreakTrampoline:
case kDeserializeLazy:
+ case kDeserializeLazyHandler:
+ case kDeserializeLazyWideHandler:
+ case kDeserializeLazyExtraWideHandler:
case kFunctionPrototypeHasInstance: // https://crbug.com/v8/6786.
case kHandleApiCall:
case kIllegal:
+ case kIllegalHandler:
case kInstantiateAsmJs:
case kInterpreterEnterBytecodeAdvance:
case kInterpreterEnterBytecodeDispatch:
@@ -306,27 +314,21 @@ bool Builtins::IsLazy(int index) {
return false;
default:
// TODO(6624): Extend to other kinds.
- return KindOf(index) == TFJ;
+ return KindOf(index) == TFJ || KindOf(index) == BCH;
}
UNREACHABLE();
}
// static
+bool Builtins::IsLazyDeserializer(Code* code) {
+ return IsLazyDeserializer(code->builtin_index());
+}
+
+// static
bool Builtins::IsIsolateIndependent(int index) {
DCHECK(IsBuiltinId(index));
#ifndef V8_TARGET_ARCH_IA32
switch (index) {
-// Bytecode handlers do not yet support being embedded.
-#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
-#define BYTECODE_BUILTIN(Name, ...) \
- case k##Name##Handler: \
- case k##Name##WideHandler: \
- case k##Name##ExtraWideHandler: \
- return false;
- BUILTIN_LIST_BYTECODE_HANDLERS(BYTECODE_BUILTIN)
-#undef BYTECODE_BUILTIN
-#endif // V8_EMBEDDED_BYTECODE_HANDLERS
-
// TODO(jgruber): There's currently two blockers for moving
// InterpreterEntryTrampoline into the binary:
// 1. InterpreterEnterBytecode calculates a pointer into the middle of
@@ -423,6 +425,7 @@ const char* Builtins::KindNameOf(int index) {
case TFS: return "TFS";
case TFH: return "TFH";
case BCH: return "BCH";
+ case DLH: return "DLH";
case ASM: return "ASM";
}
// clang-format on
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 0bd3c317bf..9f404a0ac0 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -25,6 +25,11 @@ namespace compiler {
class CodeAssemblerState;
}
+template <typename T>
+static constexpr T FirstFromVarArgs(T x, ...) noexcept {
+ return x;
+}
+
// Convenience macro to avoid generating named accessors for all builtins.
#define BUILTIN_CODE(isolate, name) \
(isolate)->builtins()->builtin_handle(Builtins::k##name)
@@ -40,13 +45,16 @@ class Builtins {
enum Name : int32_t {
#define DEF_ENUM(Name, ...) k##Name,
-#define DEF_ENUM_BYTECODE_HANDLER(Name, ...) \
- k##Name##Handler, k##Name##WideHandler, k##Name##ExtraWideHandler,
BUILTIN_LIST(DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM,
- DEF_ENUM_BYTECODE_HANDLER, DEF_ENUM)
+ DEF_ENUM, DEF_ENUM, DEF_ENUM)
#undef DEF_ENUM
-#undef DEF_ENUM_BYTECODE_HANDLER
- builtin_count
+ builtin_count,
+
+#define EXTRACT_NAME(Name, ...) k##Name,
+ // Define kFirstBytecodeHandler,
+ kFirstBytecodeHandler =
+ FirstFromVarArgs(BUILTIN_LIST_BYTECODE_HANDLERS(EXTRACT_NAME) 0)
+#undef EXTRACT_NAME
};
static const int32_t kNoBuiltinId = -1;
@@ -56,7 +64,7 @@ class Builtins {
}
// The different builtin kinds are documented in builtins-definitions.h.
- enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM };
+ enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM };
static BailoutId GetContinuationBailoutId(Name name);
static Name GetBuiltinFromBailoutId(BailoutId);
@@ -111,6 +119,35 @@ class Builtins {
// special cases such as CompileLazy and DeserializeLazy.
static bool IsLazy(int index);
+ static constexpr int kFirstWideBytecodeHandler =
+ kFirstBytecodeHandler + kNumberOfBytecodeHandlers;
+ static constexpr int kFirstExtraWideBytecodeHandler =
+ kFirstWideBytecodeHandler + kNumberOfWideBytecodeHandlers;
+ STATIC_ASSERT(kFirstExtraWideBytecodeHandler +
+ kNumberOfWideBytecodeHandlers ==
+ builtin_count);
+
+ // Returns the index of the appropriate lazy deserializer in the builtins
+ // table.
+ static constexpr int LazyDeserializerForBuiltin(const int index) {
+ return index < kFirstWideBytecodeHandler
+ ? (index < kFirstBytecodeHandler
+ ? Builtins::kDeserializeLazy
+ : Builtins::kDeserializeLazyHandler)
+ : (index < kFirstExtraWideBytecodeHandler
+ ? Builtins::kDeserializeLazyWideHandler
+ : Builtins::kDeserializeLazyExtraWideHandler);
+ }
+
+ static constexpr bool IsLazyDeserializer(int builtin_index) {
+ return builtin_index == kDeserializeLazy ||
+ builtin_index == kDeserializeLazyHandler ||
+ builtin_index == kDeserializeLazyWideHandler ||
+ builtin_index == kDeserializeLazyExtraWideHandler;
+ }
+
+ static bool IsLazyDeserializer(Code* code);
+
// Helper methods used for testing isolate-independent builtins.
// TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
static bool IsIsolateIndependent(int index);
@@ -179,7 +216,8 @@ class Builtins {
static void Generate_##Name(compiler::CodeAssemblerState* state);
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
- DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, DECLARE_ASM)
+ DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, IGNORE_BUILTIN,
+ DECLARE_ASM)
#undef DECLARE_ASM
#undef DECLARE_TF
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 04c0655bf7..26995453dd 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -19,14 +19,14 @@ BuiltinsConstantsTableBuilder::BuiltinsConstantsTableBuilder(Isolate* isolate)
// as a constant, which means that codegen will load it using the root
// register.
DCHECK(isolate_->heap()->RootCanBeTreatedAsConstant(
- Heap::kEmptyFixedArrayRootIndex));
+ RootIndex::kEmptyFixedArray));
}
uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
#ifdef DEBUG
// Roots must not be inserted into the constants table as they are already
// accessibly from the root list.
- Heap::RootListIndex root_list_index;
+ RootIndex root_list_index;
DCHECK(!isolate_->heap()->IsRootHandle(object, &root_list_index));
// Not yet finalized.
@@ -56,7 +56,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
#ifdef DEBUG
// Roots must not be inserted into the constants table as they are already
// accessibly from the root list.
- Heap::RootListIndex root_list_index;
+ RootIndex root_list_index;
DCHECK(!isolate_->heap()->IsRootHandle(code_object, &root_list_index));
// Not yet finalized.
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 874c122995..1e86f88d83 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -3,15 +3,14 @@
// found in the LICENSE file.
module data_view {
-
extern operator '.buffer'
- macro LoadArrayBufferViewBuffer(JSArrayBufferView): JSArrayBuffer;
+ macro LoadJSArrayBufferViewBuffer(JSArrayBufferView): JSArrayBuffer;
extern operator '.byte_length'
- macro LoadDataViewByteLength(JSDataView): Number;
+ macro LoadJSArrayBufferViewByteLength(JSArrayBufferView): uintptr;
extern operator '.byte_offset'
- macro LoadDataViewByteOffset(JSDataView): Number;
+ macro LoadJSArrayBufferViewByteOffset(JSArrayBufferView): uintptr;
extern operator '.backing_store'
- macro LoadArrayBufferBackingStore(JSArrayBuffer): RawPtr;
+ macro LoadJSArrayBufferBackingStore(JSArrayBuffer): RawPtr;
macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String {
if constexpr (kind == UINT8_ELEMENTS) {
@@ -69,10 +68,10 @@ module data_view {
return IsDetachedBuffer(view.buffer);
}
- macro ValidateDataView(context: Context,
- o: Object, method: String): JSDataView {
+ macro ValidateDataView(
+ context: Context, o: Object, method: String): JSDataView {
try {
- return cast<JSDataView>(o) otherwise CastError;
+ return Cast<JSDataView>(o) otherwise CastError;
}
label CastError {
ThrowTypeError(context, kIncompatibleMethodReceiver, method);
@@ -82,35 +81,35 @@ module data_view {
// ES6 section 24.2.4.1 get DataView.prototype.buffer
javascript builtin DataViewPrototypeGetBuffer(
context: Context, receiver: Object, ...arguments): JSArrayBuffer {
- let data_view: JSDataView = ValidateDataView(
- context, receiver, 'get DataView.prototype.buffer');
- return data_view.buffer;
+ let dataView: JSDataView =
+ ValidateDataView(context, receiver, 'get DataView.prototype.buffer');
+ return dataView.buffer;
}
// ES6 section 24.2.4.2 get DataView.prototype.byteLength
javascript builtin DataViewPrototypeGetByteLength(
context: Context, receiver: Object, ...arguments): Number {
- let data_view: JSDataView = ValidateDataView(
+ let dataView: JSDataView = ValidateDataView(
context, receiver, 'get DataView.prototype.byte_length');
- if (WasNeutered(data_view)) {
+ if (WasNeutered(dataView)) {
// TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {data_view} was neutered.
+ // here if the JSArrayBuffer of the {dataView} was neutered.
return 0;
}
- return data_view.byte_length;
+ return Convert<Number>(dataView.byte_length);
}
// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
javascript builtin DataViewPrototypeGetByteOffset(
context: Context, receiver: Object, ...arguments): Number {
- let data_view: JSDataView = ValidateDataView(
+ let dataView: JSDataView = ValidateDataView(
context, receiver, 'get DataView.prototype.byte_offset');
- if (WasNeutered(data_view)) {
+ if (WasNeutered(dataView)) {
// TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {data_view} was neutered.
+ // here if the JSArrayBuffer of the {dataView} was neutered.
return 0;
}
- return data_view.byte_offset;
+ return Convert<Number>(dataView.byte_offset);
}
extern macro BitcastInt32ToFloat32(uint32): float32;
@@ -120,109 +119,110 @@ module data_view {
extern macro Float64InsertLowWord32(float64, uint32): float64;
extern macro Float64InsertHighWord32(float64, uint32): float64;
- extern macro LoadUint8(RawPtr, intptr): uint32;
- extern macro LoadInt8(RawPtr, intptr): int32;
+ extern macro LoadUint8(RawPtr, uintptr): uint32;
+ extern macro LoadInt8(RawPtr, uintptr): int32;
- macro LoadDataView8(buffer: JSArrayBuffer, offset: intptr,
- signed: constexpr bool): Smi {
+ macro LoadDataView8(
+ buffer: JSArrayBuffer, offset: uintptr, signed: constexpr bool): Smi {
if constexpr (signed) {
- return convert<Smi>(LoadInt8(buffer.backing_store, offset));
+ return Convert<Smi>(LoadInt8(buffer.backing_store, offset));
} else {
- return convert<Smi>(LoadUint8(buffer.backing_store, offset));
+ return Convert<Smi>(LoadUint8(buffer.backing_store, offset));
}
}
- macro LoadDataView16(buffer: JSArrayBuffer, offset: intptr,
- requested_little_endian: bool,
- signed: constexpr bool): Number {
- let data_pointer: RawPtr = buffer.backing_store;
+ macro LoadDataView16(
+ buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
+ signed: constexpr bool): Number {
+ let dataPointer: RawPtr = buffer.backing_store;
let b0: int32;
let b1: int32;
let result: int32;
// Sign-extend the most significant byte by loading it as an Int8.
- if (requested_little_endian) {
- b0 = Signed(LoadUint8(data_pointer, offset));
- b1 = LoadInt8(data_pointer, offset + 1);
+ if (requestedLittleEndian) {
+ b0 = Signed(LoadUint8(dataPointer, offset));
+ b1 = LoadInt8(dataPointer, offset + 1);
result = (b1 << 8) + b0;
} else {
- b0 = LoadInt8(data_pointer, offset);
- b1 = Signed(LoadUint8(data_pointer, offset + 1));
+ b0 = LoadInt8(dataPointer, offset);
+ b1 = Signed(LoadUint8(dataPointer, offset + 1));
result = (b0 << 8) + b1;
}
if constexpr (signed) {
- return convert<Smi>(result);
+ return Convert<Smi>(result);
} else {
// Bit-mask the higher bits to prevent sign extension if we're unsigned.
- return convert<Smi>(result & 0xFFFF);
+ return Convert<Smi>(result & 0xFFFF);
}
}
- macro LoadDataView32(buffer: JSArrayBuffer, offset: intptr,
- requested_little_endian: bool,
- kind: constexpr ElementsKind): Number {
- let data_pointer: RawPtr = buffer.backing_store;
+ macro LoadDataView32(
+ buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
+ kind: constexpr ElementsKind): Number {
+ let dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = LoadUint8(data_pointer, offset);
- let b1: uint32 = LoadUint8(data_pointer, offset + 1);
- let b2: uint32 = LoadUint8(data_pointer, offset + 2);
- let b3: uint32 = LoadUint8(data_pointer, offset + 3);
+ let b0: uint32 = LoadUint8(dataPointer, offset);
+ let b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ let b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ let b3: uint32 = LoadUint8(dataPointer, offset + 3);
let result: uint32;
- if (requested_little_endian) {
+ if (requestedLittleEndian) {
result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
} else {
result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
}
if constexpr (kind == INT32_ELEMENTS) {
- return convert<Number>(Signed(result));
+ return Convert<Number>(Signed(result));
} else if constexpr (kind == UINT32_ELEMENTS) {
- return convert<Number>(result);
+ return Convert<Number>(result);
} else if constexpr (kind == FLOAT32_ELEMENTS) {
- let float_res: float64 = convert<float64>(BitcastInt32ToFloat32(result));
- return convert<Number>(float_res);
+ let floatRes: float64 = Convert<float64>(BitcastInt32ToFloat32(result));
+ return Convert<Number>(floatRes);
} else {
unreachable;
}
}
- macro LoadDataViewFloat64(buffer: JSArrayBuffer, offset: intptr,
- requested_little_endian: bool): Number {
- let data_pointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = LoadUint8(data_pointer, offset);
- let b1: uint32 = LoadUint8(data_pointer, offset + 1);
- let b2: uint32 = LoadUint8(data_pointer, offset + 2);
- let b3: uint32 = LoadUint8(data_pointer, offset + 3);
- let b4: uint32 = LoadUint8(data_pointer, offset + 4);
- let b5: uint32 = LoadUint8(data_pointer, offset + 5);
- let b6: uint32 = LoadUint8(data_pointer, offset + 6);
- let b7: uint32 = LoadUint8(data_pointer, offset + 7);
- let low_word: uint32;
- let high_word: uint32;
-
- if (requested_little_endian) {
- low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ macro LoadDataViewFloat64(
+ buffer: JSArrayBuffer, offset: uintptr,
+ requestedLittleEndian: bool): Number {
+ let dataPointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = LoadUint8(dataPointer, offset);
+ let b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ let b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ let b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ let b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ let b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ let b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ let b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ let lowWord: uint32;
+ let highWord: uint32;
+
+ if (requestedLittleEndian) {
+ lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
} else {
- high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
}
let result: float64 = 0;
- result = Float64InsertLowWord32(result, low_word);
- result = Float64InsertHighWord32(result, high_word);
+ result = Float64InsertLowWord32(result, lowWord);
+ result = Float64InsertHighWord32(result, highWord);
- return convert<Number>(result);
+ return Convert<Number>(result);
}
extern macro AllocateBigInt(intptr): BigInt;
extern macro StoreBigIntBitfield(BigInt, intptr): void;
extern macro StoreBigIntDigit(BigInt, constexpr int31, uintptr): void;
- extern macro DataViewEncodeBigIntBits(constexpr bool,
- constexpr int31): intptr;
+ extern macro DataViewEncodeBigIntBits(
+ constexpr bool, constexpr int31): intptr;
const kPositiveBigInt: constexpr bool = false;
const kNegativeBigInt: constexpr bool = true;
@@ -230,173 +230,168 @@ module data_view {
const kOneDigitBigInt: constexpr int31 = 1;
const kTwoDigitBigInt: constexpr int31 = 2;
- macro CreateEmptyBigInt(is_positive: bool, length: constexpr int31): BigInt {
+ macro CreateEmptyBigInt(isPositive: bool, length: constexpr int31): BigInt {
// Allocate a BigInt with the desired length (number of digits).
let result: BigInt = AllocateBigInt(length);
// Write the desired sign and length to the BigInt bitfield.
- if (is_positive) {
- StoreBigIntBitfield(result,
- DataViewEncodeBigIntBits(kPositiveBigInt, length));
+ if (isPositive) {
+ StoreBigIntBitfield(
+ result, DataViewEncodeBigIntBits(kPositiveBigInt, length));
} else {
- StoreBigIntBitfield(result,
- DataViewEncodeBigIntBits(kNegativeBigInt, length));
+ StoreBigIntBitfield(
+ result, DataViewEncodeBigIntBits(kNegativeBigInt, length));
}
return result;
}
// Create a BigInt on a 64-bit architecture from two 32-bit values.
- macro MakeBigIntOn64Bit(low_word: uint32, high_word: uint32,
- signed: constexpr bool): BigInt {
-
+ macro MakeBigIntOn64Bit(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// 0n is represented by a zero-length BigInt.
- if (low_word == 0 && high_word == 0) {
+ if (lowWord == 0 && highWord == 0) {
return AllocateBigInt(kZeroDigitBigInt);
}
- let is_positive: bool = true;
- let high_part: intptr = Signed(convert<uintptr>(high_word));
- let low_part: intptr = Signed(convert<uintptr>(low_word));
- let raw_value: intptr = (high_part << 32) + low_part;
+ let isPositive: bool = true;
+ let highPart: intptr = Signed(Convert<uintptr>(highWord));
+ let lowPart: intptr = Signed(Convert<uintptr>(lowWord));
+ let rawValue: intptr = (highPart << 32) + lowPart;
if constexpr (signed) {
- if (raw_value < 0) {
- is_positive = false;
- // We have to store the absolute value of raw_value in the digit.
- raw_value = 0 - raw_value;
+ if (rawValue < 0) {
+ isPositive = false;
+ // We have to store the absolute value of rawValue in the digit.
+ rawValue = 0 - rawValue;
}
}
// Allocate the BigInt and store the absolute value.
- let result: BigInt = CreateEmptyBigInt(is_positive, kOneDigitBigInt);
+ let result: BigInt = CreateEmptyBigInt(isPositive, kOneDigitBigInt);
- StoreBigIntDigit(result, 0, Unsigned(raw_value));
+ StoreBigIntDigit(result, 0, Unsigned(rawValue));
return result;
}
// Create a BigInt on a 32-bit architecture from two 32-bit values.
- macro MakeBigIntOn32Bit(low_word: uint32, high_word: uint32,
- signed: constexpr bool): BigInt {
-
+ macro MakeBigIntOn32Bit(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// 0n is represented by a zero-length BigInt.
- if (low_word == 0 && high_word == 0) {
+ if (lowWord == 0 && highWord == 0) {
return AllocateBigInt(kZeroDigitBigInt);
}
// On a 32-bit platform, we might need 1 or 2 digits to store the number.
- let need_two_digits: bool = false;
- let is_positive: bool = true;
+ let needTwoDigits: bool = false;
+ let isPositive: bool = true;
- // We need to do some math on low_word and high_word,
- // so convert them to int32.
- let low_part: int32 = Signed(low_word);
- let high_part: int32 = Signed(high_word);
+ // We need to do some math on lowWord and highWord,
+ // so Convert them to int32.
+ let lowPart: int32 = Signed(lowWord);
+ let highPart: int32 = Signed(highWord);
- // If high_word == 0, the number is positive, and we only need 1 digit,
+ // If highWord == 0, the number is positive, and we only need 1 digit,
// so we don't have anything to do.
// Otherwise, all cases are possible.
- if (high_word != 0) {
+ if (highWord != 0) {
if constexpr (signed) {
-
- // If high_part < 0, the number is always negative.
- if (high_part < 0) {
- is_positive = false;
+ // If highPart < 0, the number is always negative.
+ if (highPart < 0) {
+ isPositive = false;
// We have to compute the absolute value by hand.
// There will be a negative carry from the low word
// to the high word iff low != 0.
- high_part = 0 - high_part;
- if (low_part != 0) {
- high_part = high_part - 1;
+ highPart = 0 - highPart;
+ if (lowPart != 0) {
+ highPart = highPart - 1;
}
- low_part = 0 - low_part;
+ lowPart = 0 - lowPart;
- // Here, high_part could be 0 again so we might have 1 or 2 digits.
- if (high_part != 0) {
- need_two_digits = true;
+ // Here, highPart could be 0 again so we might have 1 or 2 digits.
+ if (highPart != 0) {
+ needTwoDigits = true;
}
} else {
// In this case, the number is positive, and we need 2 digits.
- need_two_digits = true;
+ needTwoDigits = true;
}
} else {
// In this case, the number is positive (unsigned),
// and we need 2 digits.
- need_two_digits = true;
+ needTwoDigits = true;
}
}
// Allocate the BigInt with the right sign and length.
let result: BigInt;
- if (need_two_digits) {
- result = CreateEmptyBigInt(is_positive, kTwoDigitBigInt);
+ if (needTwoDigits) {
+ result = CreateEmptyBigInt(isPositive, kTwoDigitBigInt);
} else {
- result = CreateEmptyBigInt(is_positive, kOneDigitBigInt);
+ result = CreateEmptyBigInt(isPositive, kOneDigitBigInt);
}
// Finally, write the digit(s) to the BigInt.
- StoreBigIntDigit(result, 0, Unsigned(convert<intptr>(low_part)));
+ StoreBigIntDigit(result, 0, Unsigned(Convert<intptr>(lowPart)));
- if (need_two_digits) {
- StoreBigIntDigit(result, 1, Unsigned(convert<intptr>(high_part)));
+ if (needTwoDigits) {
+ StoreBigIntDigit(result, 1, Unsigned(Convert<intptr>(highPart)));
}
return result;
}
- macro MakeBigInt(low_word: uint32, high_word: uint32,
- signed: constexpr bool): BigInt {
+ macro MakeBigInt(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// A BigInt digit has the platform word size, so we only need one digit
// on 64-bit platforms but may need two on 32-bit.
if constexpr (Is64()) {
- return MakeBigIntOn64Bit(low_word, high_word, signed);
+ return MakeBigIntOn64Bit(lowWord, highWord, signed);
} else {
- return MakeBigIntOn32Bit(low_word, high_word, signed);
+ return MakeBigIntOn32Bit(lowWord, highWord, signed);
}
}
- macro LoadDataViewBigInt(buffer: JSArrayBuffer, offset: intptr,
- requested_little_endian: bool,
- signed: constexpr bool): BigInt {
- let data_pointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = LoadUint8(data_pointer, offset);
- let b1: uint32 = LoadUint8(data_pointer, offset + 1);
- let b2: uint32 = LoadUint8(data_pointer, offset + 2);
- let b3: uint32 = LoadUint8(data_pointer, offset + 3);
- let b4: uint32 = LoadUint8(data_pointer, offset + 4);
- let b5: uint32 = LoadUint8(data_pointer, offset + 5);
- let b6: uint32 = LoadUint8(data_pointer, offset + 6);
- let b7: uint32 = LoadUint8(data_pointer, offset + 7);
- let low_word: uint32;
- let high_word: uint32;
-
- if (requested_little_endian) {
- low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ macro LoadDataViewBigInt(
+ buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
+ signed: constexpr bool): BigInt {
+ let dataPointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = LoadUint8(dataPointer, offset);
+ let b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ let b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ let b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ let b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ let b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ let b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ let b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ let lowWord: uint32;
+ let highWord: uint32;
+
+ if (requestedLittleEndian) {
+ lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
} else {
- high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
}
- return MakeBigInt(low_word, high_word, signed);
+ return MakeBigInt(lowWord, highWord, signed);
}
- extern macro ToSmiIndex(Object, Context): Smi labels RangeError;
+ extern macro ToSmiIndex(Object, Context): Smi
+ labels RangeError;
extern macro DataViewElementSize(constexpr ElementsKind): constexpr int31;
- macro DataViewGet(context: Context,
- receiver: Object,
- offset: Object,
- requested_little_endian: Object,
- kind: constexpr ElementsKind): Numeric {
-
- let data_view: JSDataView = ValidateDataView(
- context, receiver, MakeDataViewGetterNameString(kind));
+ macro DataViewGet(
+ context: Context, receiver: Object, offset: Object,
+ requestedLittleEndian: Object, kind: constexpr ElementsKind): Numeric {
+ let dataView: JSDataView =
+ ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind));
let getIndex: Number;
try {
@@ -406,28 +401,26 @@ module data_view {
ThrowRangeError(context, kInvalidDataViewAccessorOffset);
}
- let littleEndian: bool = ToBoolean(requested_little_endian);
- let buffer: JSArrayBuffer = data_view.buffer;
+ let littleEndian: bool = ToBoolean(requestedLittleEndian);
+ let buffer: JSArrayBuffer = dataView.buffer;
if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(context, kDetachedOperation,
- MakeDataViewGetterNameString(kind));
+ ThrowTypeError(
+ context, kDetachedOperation, MakeDataViewGetterNameString(kind));
}
- let viewOffset: Number = data_view.byte_offset;
- let viewSize: Number = data_view.byte_length;
- let elementSize: Number = DataViewElementSize(kind);
+ let getIndexFloat: float64 = Convert<float64>(getIndex);
+ let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- if (getIndex + elementSize > viewSize) {
+ let viewOffsetWord: uintptr = dataView.byte_offset;
+ let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ let elementSizeFloat: float64 = Convert<float64>(DataViewElementSize(kind));
+
+ if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(context, kInvalidDataViewAccessorOffset);
}
- let getIndexFloat: float64 = convert<float64>(getIndex);
- let getIndexIntptr: intptr = Signed(convert<uintptr>(getIndexFloat));
- let viewOffsetFloat: float64 = convert<float64>(viewOffset);
- let viewOffsetIntptr: intptr = Signed(convert<uintptr>(viewOffsetFloat));
-
- let bufferIndex: intptr = getIndexIntptr + viewOffsetIntptr;
+ let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
if constexpr (kind == UINT8_ELEMENTS) {
return LoadDataView8(buffer, bufferIndex, false);
@@ -455,201 +448,173 @@ module data_view {
}
javascript builtin DataViewPrototypeGetUint8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetInt8(
context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS);
}
javascript builtin DataViewPrototypeGetUint16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- UINT16_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, UINT16_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetInt16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- INT16_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, INT16_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetUint32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- UINT32_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, UINT32_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetInt32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- INT32_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, INT32_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetFloat32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- FLOAT32_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, FLOAT32_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetFloat64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- FLOAT64_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, FLOAT64_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetBigUint64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- BIGUINT64_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, BIGUINT64_ELEMENTS);
+ }
javascript builtin DataViewPrototypeGetBigInt64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let is_little_endian : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewGet(context, receiver, offset, is_little_endian,
- BIGINT64_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, BIGINT64_ELEMENTS);
+ }
extern macro ToNumber(Context, Object): Number;
extern macro ToBigInt(Context, Object): BigInt;
extern macro TruncateFloat64ToFloat32(float64): float32;
extern macro TruncateFloat64ToWord32(float64): uint32;
- extern macro StoreWord8(RawPtr, intptr, uint32): void;
+ extern macro StoreWord8(RawPtr, uintptr, uint32): void;
- macro StoreDataView8(buffer: JSArrayBuffer, offset: intptr,
- value: uint32) {
+ macro StoreDataView8(buffer: JSArrayBuffer, offset: uintptr, value: uint32) {
StoreWord8(buffer.backing_store, offset, value & 0xFF);
}
- macro StoreDataView16(buffer: JSArrayBuffer, offset: intptr, value: uint32,
- requested_little_endian: bool) {
- let data_pointer: RawPtr = buffer.backing_store;
+ macro StoreDataView16(
+ buffer: JSArrayBuffer, offset: uintptr, value: uint32,
+ requestedLittleEndian: bool) {
+ let dataPointer: RawPtr = buffer.backing_store;
let b0: uint32 = value & 0xFF;
let b1: uint32 = (value >>> 8) & 0xFF;
- if (requested_little_endian) {
- StoreWord8(data_pointer, offset, b0);
- StoreWord8(data_pointer, offset + 1, b1);
+ if (requestedLittleEndian) {
+ StoreWord8(dataPointer, offset, b0);
+ StoreWord8(dataPointer, offset + 1, b1);
} else {
- StoreWord8(data_pointer, offset, b1);
- StoreWord8(data_pointer, offset + 1, b0);
+ StoreWord8(dataPointer, offset, b1);
+ StoreWord8(dataPointer, offset + 1, b0);
}
}
- macro StoreDataView32(buffer: JSArrayBuffer, offset: intptr, value: uint32,
- requested_little_endian: bool) {
- let data_pointer: RawPtr = buffer.backing_store;
+ macro StoreDataView32(
+ buffer: JSArrayBuffer, offset: uintptr, value: uint32,
+ requestedLittleEndian: bool) {
+ let dataPointer: RawPtr = buffer.backing_store;
let b0: uint32 = value & 0xFF;
let b1: uint32 = (value >>> 8) & 0xFF;
let b2: uint32 = (value >>> 16) & 0xFF;
- let b3: uint32 = value >>> 24; // We don't need to mask here.
+ let b3: uint32 = value >>> 24; // We don't need to mask here.
- if (requested_little_endian) {
- StoreWord8(data_pointer, offset, b0);
- StoreWord8(data_pointer, offset + 1, b1);
- StoreWord8(data_pointer, offset + 2, b2);
- StoreWord8(data_pointer, offset + 3, b3);
+ if (requestedLittleEndian) {
+ StoreWord8(dataPointer, offset, b0);
+ StoreWord8(dataPointer, offset + 1, b1);
+ StoreWord8(dataPointer, offset + 2, b2);
+ StoreWord8(dataPointer, offset + 3, b3);
} else {
- StoreWord8(data_pointer, offset, b3);
- StoreWord8(data_pointer, offset + 1, b2);
- StoreWord8(data_pointer, offset + 2, b1);
- StoreWord8(data_pointer, offset + 3, b0);
- }
- }
-
- macro StoreDataView64(buffer: JSArrayBuffer, offset: intptr,
- low_word: uint32, high_word: uint32,
- requested_little_endian: bool) {
- let data_pointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = low_word & 0xFF;
- let b1: uint32 = (low_word >>> 8) & 0xFF;
- let b2: uint32 = (low_word >>> 16) & 0xFF;
- let b3: uint32 = low_word >>> 24;
-
- let b4: uint32 = high_word & 0xFF;
- let b5: uint32 = (high_word >>> 8) & 0xFF;
- let b6: uint32 = (high_word >>> 16) & 0xFF;
- let b7: uint32 = high_word >>> 24;
-
-
- if (requested_little_endian) {
- StoreWord8(data_pointer, offset, b0);
- StoreWord8(data_pointer, offset + 1, b1);
- StoreWord8(data_pointer, offset + 2, b2);
- StoreWord8(data_pointer, offset + 3, b3);
- StoreWord8(data_pointer, offset + 4, b4);
- StoreWord8(data_pointer, offset + 5, b5);
- StoreWord8(data_pointer, offset + 6, b6);
- StoreWord8(data_pointer, offset + 7, b7);
+ StoreWord8(dataPointer, offset, b3);
+ StoreWord8(dataPointer, offset + 1, b2);
+ StoreWord8(dataPointer, offset + 2, b1);
+ StoreWord8(dataPointer, offset + 3, b0);
+ }
+ }
+
+ macro StoreDataView64(
+ buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32,
+ requestedLittleEndian: bool) {
+ let dataPointer: RawPtr = buffer.backing_store;
+
+ let b0: uint32 = lowWord & 0xFF;
+ let b1: uint32 = (lowWord >>> 8) & 0xFF;
+ let b2: uint32 = (lowWord >>> 16) & 0xFF;
+ let b3: uint32 = lowWord >>> 24;
+
+ let b4: uint32 = highWord & 0xFF;
+ let b5: uint32 = (highWord >>> 8) & 0xFF;
+ let b6: uint32 = (highWord >>> 16) & 0xFF;
+ let b7: uint32 = highWord >>> 24;
+
+ if (requestedLittleEndian) {
+ StoreWord8(dataPointer, offset, b0);
+ StoreWord8(dataPointer, offset + 1, b1);
+ StoreWord8(dataPointer, offset + 2, b2);
+ StoreWord8(dataPointer, offset + 3, b3);
+ StoreWord8(dataPointer, offset + 4, b4);
+ StoreWord8(dataPointer, offset + 5, b5);
+ StoreWord8(dataPointer, offset + 6, b6);
+ StoreWord8(dataPointer, offset + 7, b7);
} else {
- StoreWord8(data_pointer, offset, b7);
- StoreWord8(data_pointer, offset + 1, b6);
- StoreWord8(data_pointer, offset + 2, b5);
- StoreWord8(data_pointer, offset + 3, b4);
- StoreWord8(data_pointer, offset + 4, b3);
- StoreWord8(data_pointer, offset + 5, b2);
- StoreWord8(data_pointer, offset + 6, b1);
- StoreWord8(data_pointer, offset + 7, b0);
+ StoreWord8(dataPointer, offset, b7);
+ StoreWord8(dataPointer, offset + 1, b6);
+ StoreWord8(dataPointer, offset + 2, b5);
+ StoreWord8(dataPointer, offset + 3, b4);
+ StoreWord8(dataPointer, offset + 4, b3);
+ StoreWord8(dataPointer, offset + 5, b2);
+ StoreWord8(dataPointer, offset + 6, b1);
+ StoreWord8(dataPointer, offset + 7, b0);
}
}
@@ -660,55 +625,48 @@ module data_view {
// We might get here a BigInt that is bigger than 64 bits, but we're only
// interested in the 64 lowest ones. This means the lowest BigInt digit
// on 64-bit platforms, and the 2 lowest BigInt digits on 32-bit ones.
- macro StoreDataViewBigInt(buffer: JSArrayBuffer, offset: intptr,
- bigint_value: BigInt,
- requested_little_endian: bool) {
-
- let length: uintptr = DataViewDecodeBigIntLength(bigint_value);
- let sign: uintptr = DataViewDecodeBigIntSign(bigint_value);
+ macro StoreDataViewBigInt(
+ buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
+ requestedLittleEndian: bool) {
+ let length: uintptr = DataViewDecodeBigIntLength(bigIntValue);
+ let sign: uintptr = DataViewDecodeBigIntSign(bigIntValue);
// The 32-bit words that will hold the BigInt's value in
// two's complement representation.
- let low_word: uint32 = 0;
- let high_word: uint32 = 0;
+ let lowWord: uint32 = 0;
+ let highWord: uint32 = 0;
// The length is nonzero if and only if the BigInt's value is nonzero.
if (length != 0) {
if constexpr (Is64()) {
// There is always exactly 1 BigInt digit to load in this case.
- let value: uintptr = LoadBigIntDigit(bigint_value, 0);
- low_word = convert<uint32>(value); // Truncates value to 32 bits.
- high_word = convert<uint32>(value >>> 32);
- }
- else { // There might be either 1 or 2 BigInt digits we need to load.
- low_word = convert<uint32>(LoadBigIntDigit(bigint_value, 0));
- if (length >= 2) { // Only load the second digit if there is one.
- high_word = convert<uint32>(LoadBigIntDigit(bigint_value, 1));
+ let value: uintptr = LoadBigIntDigit(bigIntValue, 0);
+ lowWord = Convert<uint32>(value); // Truncates value to 32 bits.
+ highWord = Convert<uint32>(value >>> 32);
+ } else { // There might be either 1 or 2 BigInt digits we need to load.
+ lowWord = Convert<uint32>(LoadBigIntDigit(bigIntValue, 0));
+ if (length >= 2) { // Only load the second digit if there is one.
+ highWord = Convert<uint32>(LoadBigIntDigit(bigIntValue, 1));
}
}
}
- if (sign != 0) { // The number is negative, convert it.
- high_word = Unsigned(0 - Signed(high_word));
- if (low_word != 0) {
- high_word = Unsigned(Signed(high_word) - 1);
+ if (sign != 0) { // The number is negative, Convert it.
+ highWord = Unsigned(0 - Signed(highWord));
+ if (lowWord != 0) {
+ highWord = Unsigned(Signed(highWord) - 1);
}
- low_word = Unsigned(0 - Signed(low_word));
+ lowWord = Unsigned(0 - Signed(lowWord));
}
- StoreDataView64(buffer, offset, low_word, high_word,
- requested_little_endian);
+ StoreDataView64(buffer, offset, lowWord, highWord, requestedLittleEndian);
}
- macro DataViewSet(context: Context,
- receiver: Object,
- offset: Object,
- value: Object,
- requested_little_endian: Object,
- kind: constexpr ElementsKind): Object {
-
- let data_view: JSDataView = ValidateDataView(
- context, receiver, MakeDataViewSetterNameString(kind));
+ macro DataViewSet(
+ context: Context, receiver: Object, offset: Object, value: Object,
+ requestedLittleEndian: Object, kind: constexpr ElementsKind): Object {
+ let dataView: JSDataView =
+ ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind));
let getIndex: Number;
try {
@@ -718,215 +676,160 @@ module data_view {
ThrowRangeError(context, kInvalidDataViewAccessorOffset);
}
- let littleEndian: bool = ToBoolean(requested_little_endian);
- let buffer: JSArrayBuffer = data_view.buffer;
+ let littleEndian: bool = ToBoolean(requestedLittleEndian);
+ let buffer: JSArrayBuffer = dataView.buffer;
- let bigint_value: BigInt;
- let num_value: Number;
+ let bigIntValue: BigInt;
+ let numValue: Number;
// According to ES6 section 24.2.1.2 SetViewValue, we must perform
// the conversion before doing the bounds check.
if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
- bigint_value = ToBigInt(context, value);
+ bigIntValue = ToBigInt(context, value);
} else {
- num_value = ToNumber(context, value);
+ numValue = ToNumber(context, value);
}
if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(context, kDetachedOperation,
- MakeDataViewSetterNameString(kind));
+ ThrowTypeError(
+ context, kDetachedOperation, MakeDataViewSetterNameString(kind));
}
- let viewOffset: Number = data_view.byte_offset;
- let viewSize: Number = data_view.byte_length;
- let elementSize: Number = DataViewElementSize(kind);
+ let getIndexFloat: float64 = Convert<float64>(getIndex);
+ let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+
+ let viewOffsetWord: uintptr = dataView.byte_offset;
+ let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ let elementSizeFloat: float64 = Convert<float64>(DataViewElementSize(kind));
- if (getIndex + elementSize > viewSize) {
+ if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(context, kInvalidDataViewAccessorOffset);
}
- let getIndexFloat: float64 = convert<float64>(getIndex);
- let getIndexIntptr: intptr = Signed(convert<uintptr>(getIndexFloat));
- let viewOffsetFloat: float64 = convert<float64>(viewOffset);
- let viewOffsetIntptr: intptr = Signed(convert<uintptr>(viewOffsetFloat));
-
- let bufferIndex: intptr = getIndexIntptr + viewOffsetIntptr;
+ let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
- StoreDataViewBigInt(buffer, bufferIndex, bigint_value,
- littleEndian);
- }
- else {
- let double_value: float64 = ChangeNumberToFloat64(num_value);
+ StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian);
+ } else {
+ let doubleValue: float64 = ChangeNumberToFloat64(numValue);
if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) {
- StoreDataView8(buffer, bufferIndex,
- TruncateFloat64ToWord32(double_value));
- }
- else if constexpr (kind == UINT16_ELEMENTS || kind == INT16_ELEMENTS) {
- StoreDataView16(buffer, bufferIndex,
- TruncateFloat64ToWord32(double_value), littleEndian);
- }
- else if constexpr (kind == UINT32_ELEMENTS || kind == INT32_ELEMENTS) {
- StoreDataView32(buffer, bufferIndex,
- TruncateFloat64ToWord32(double_value), littleEndian);
- }
- else if constexpr (kind == FLOAT32_ELEMENTS) {
- let float_value: float32 = TruncateFloat64ToFloat32(double_value);
- StoreDataView32(buffer, bufferIndex,
- BitcastFloat32ToInt32(float_value), littleEndian);
- }
- else if constexpr (kind == FLOAT64_ELEMENTS) {
- let low_word: uint32 = Float64ExtractLowWord32(double_value);
- let high_word: uint32 = Float64ExtractHighWord32(double_value);
- StoreDataView64(buffer, bufferIndex, low_word, high_word,
- littleEndian);
+ StoreDataView8(
+ buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue));
+ } else if constexpr (kind == UINT16_ELEMENTS || kind == INT16_ELEMENTS) {
+ StoreDataView16(
+ buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
+ littleEndian);
+ } else if constexpr (kind == UINT32_ELEMENTS || kind == INT32_ELEMENTS) {
+ StoreDataView32(
+ buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
+ littleEndian);
+ } else if constexpr (kind == FLOAT32_ELEMENTS) {
+ let floatValue: float32 = TruncateFloat64ToFloat32(doubleValue);
+ StoreDataView32(
+ buffer, bufferIndex, BitcastFloat32ToInt32(floatValue),
+ littleEndian);
+ } else if constexpr (kind == FLOAT64_ELEMENTS) {
+ let lowWord: uint32 = Float64ExtractLowWord32(doubleValue);
+ let highWord: uint32 = Float64ExtractHighWord32(doubleValue);
+ StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian);
}
}
return Undefined;
}
javascript builtin DataViewPrototypeSetUint8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewSet(context, receiver, offset, value, Undefined,
- UINT8_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, Undefined, UINT8_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetInt8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- return DataViewSet(context, receiver, offset, value, Undefined,
- INT8_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, Undefined, INT8_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetUint16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, UINT16_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, UINT16_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetInt16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, INT16_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, INT16_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetUint32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, UINT32_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, UINT32_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetInt32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, INT32_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, INT32_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetFloat32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, FLOAT32_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, FLOAT32_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetFloat64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, FLOAT64_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, FLOAT64_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetBigUint64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, BIGUINT64_ELEMENTS);
- }
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, BIGUINT64_ELEMENTS);
+ }
javascript builtin DataViewPrototypeSetBigInt64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- let value : Object = arguments.length > 1 ?
- arguments[1] :
- Undefined;
- let is_little_endian : Object = arguments.length > 2 ?
- arguments[2] :
- Undefined;
- return DataViewSet(context, receiver, offset, value,
- is_little_endian, BIGINT64_ELEMENTS);
- }
-
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let isLittleEndian: Object =
+ arguments.length > 2 ? arguments[2] : Undefined;
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian, BIGINT64_ELEMENTS);
+ }
}
diff --git a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
new file mode 100644
index 0000000000..8266807b43
--- /dev/null
+++ b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
@@ -0,0 +1,97 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fstream>
+#include <iostream>
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+void WriteBytecode(std::ofstream& out, Bytecode bytecode,
+ OperandScale operand_scale, int* count, int offset_table[],
+ int table_index) {
+ DCHECK_NOT_NULL(count);
+ if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+ out << " \\\n V(" << Bytecodes::ToString(bytecode, operand_scale, "")
+ << "Handler, interpreter::OperandScale::k" << operand_scale
+ << ", interpreter::Bytecode::k" << Bytecodes::ToString(bytecode) << ")";
+ offset_table[table_index] = *count;
+ (*count)++;
+ } else {
+ offset_table[table_index] = -1;
+ }
+}
+
+void WriteHeader(const char* header_filename) {
+ std::ofstream out(header_filename);
+
+ out << "// Automatically generated from interpreter/bytecodes.h\n"
+ << "// The following list macro is used to populate the builtins list\n"
+ << "// with the bytecode handlers\n\n"
+ << "#ifndef V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n"
+ << "#define V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n\n"
+ << "namespace v8 {\n"
+ << "namespace internal {\n\n"
+ << "#define BUILTIN_LIST_BYTECODE_HANDLERS(V)";
+
+ constexpr int kTableSize =
+ BytecodeOperands::kOperandScaleCount * Bytecodes::kBytecodeCount;
+ int offset_table[kTableSize];
+ int count = 0;
+ int index = 0;
+
+#define ADD_BYTECODES(Name, ...) \
+ WriteBytecode(out, Bytecode::k##Name, operand_scale, &count, offset_table, \
+ index++);
+ OperandScale operand_scale = OperandScale::kSingle;
+ BYTECODE_LIST(ADD_BYTECODES)
+ int single_count = count;
+ operand_scale = OperandScale::kDouble;
+ BYTECODE_LIST(ADD_BYTECODES)
+ int wide_count = count - single_count;
+ operand_scale = OperandScale::kQuadruple;
+ BYTECODE_LIST(ADD_BYTECODES)
+#undef ADD_BYTECODES
+ int extra_wide_count = count - wide_count - single_count;
+ CHECK_GT(single_count, wide_count);
+ CHECK_EQ(single_count, Bytecodes::kBytecodeCount);
+ CHECK_EQ(wide_count, extra_wide_count);
+ out << "\n\nconst int kNumberOfBytecodeHandlers = " << single_count << ";\n"
+ << "const int kNumberOfWideBytecodeHandlers = " << wide_count << ";\n\n"
+ << "// Mapping from (Bytecode + OperandScaleAsIndex * |Bytecodes|) to\n"
+ << "// a dense form with all the illegal Bytecode/OperandScale\n"
+ << "// combinations removed. Used to index into the builtins table.\n"
+ << "constexpr int kBytecodeToBuiltinsMapping[" << kTableSize << "] = {\n"
+ << " ";
+
+ for (int i = 0; i < kTableSize; ++i) {
+ if (i == single_count || i == 2 * single_count) {
+ out << "\n ";
+ }
+ out << offset_table[i] << ", ";
+ }
+
+ out << "};\n\n"
+ << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "#endif // V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n";
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+int main(int argc, const char* argv[]) {
+ if (argc != 2) {
+ std::cerr << "Usage: " << argv[0] << " <output filename>\n";
+ std::exit(1);
+ }
+
+ v8::internal::interpreter::WriteHeader(argv[1]);
+
+ return 0;
+}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 4707667bbf..063daed5e3 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -22,6 +22,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
__ mov(kJavaScriptCallExtraArg1Register,
Immediate(ExternalReference::Create(address)));
if (exit_frame_type == BUILTIN_EXIT) {
@@ -70,6 +71,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
@@ -88,10 +90,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiUntag(eax);
// The receiver for the builtin/api call.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+ // Set up pointer to last argument. We are using esi as scratch register.
+ __ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
@@ -100,7 +102,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- eax: number of arguments (untagged)
// -- edi: constructor function
// -- edx: new target
- // -- ebx: pointer to last argument
+ // -- esi: pointer to last argument
// -- ecx: counter
// -- sp[0*kPointerSize]: the hole (receiver)
// -- sp[1*kPointerSize]: number of arguments (tagged)
@@ -108,7 +110,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
+ __ push(Operand(esi, ecx, times_4, 0));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
@@ -118,27 +120,54 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// edi: constructor function
// edx: new target
ParameterCount actual(eax);
+ // Reload context from the frame.
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
__ InvokeFunction(edi, edx, actual, CALL_FUNCTION);
// Restore context from the frame.
__ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ mov(ebx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ PushReturnAddressFrom(ecx);
__ ret(0);
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow,
+ bool include_receiver = false) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ // Compute the space that is left as a negative number in scratch. If
+ // we already overflowed, this will be a positive number.
+ __ mov(scratch, __ StaticVariable(real_stack_limit));
+ __ sub(scratch, esp);
+ // Add the size of the arguments.
+ static_assert(kPointerSize == 4,
+ "The next instruction assumes kPointerSize == 4");
+ __ lea(scratch, Operand(scratch, num_args, times_4, 0));
+ if (include_receiver) {
+ __ add(scratch, Immediate(kPointerSize));
+ }
+ // See if we overflowed, i.e. scratch is positive.
+ __ cmp(scratch, Immediate(0));
+ __ j(greater, stack_overflow); // Signed comparison.
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax: number of arguments (untagged)
// -- edi: constructor function
@@ -158,7 +187,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(esi);
__ Push(ecx);
__ Push(edi);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ Push(edx);
// ----------- S t a t e -------------
@@ -169,8 +198,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kPointerSize]: context
// -----------------------------------
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(ebx, SharedFunctionInfo::kFlagsOffset),
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test(FieldOperand(eax, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver);
@@ -182,7 +211,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver);
- __ LoadRoot(eax, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(eax, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- eax: implicit receiver
@@ -216,13 +245,27 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[5*kPointerSize]: context
// -----------------------------------
- // Restore constructor function and argument count.
- __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
+ // Restore argument count.
__ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(eax);
// Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+ __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Check if we have enough stack space to push all arguments.
+ // Argument count in eax. Clobbers ecx.
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
+ __ jmp(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+
+ __ bind(&enough_stack_space);
// Copy arguments and receiver to the expression stack.
Label loop, entry;
@@ -230,23 +273,24 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: number of arguments (untagged)
// -- edx: new target
- // -- ebx: pointer to last argument
+ // -- edi: pointer to last argument
// -- ecx: counter (tagged)
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: padding
- // -- edi and sp[3*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: constructor function
// -- sp[4*kPointerSize]: number of arguments (tagged)
// -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
- __ Push(Operand(ebx, ecx, times_pointer_size, 0));
+ __ Push(Operand(edi, ecx, times_pointer_size, 0));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
- // Call the function.
+ // Restore and and call the constructor function.
+ __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
ParameterCount actual(eax);
__ InvokeFunction(edi, edx, actual, CALL_FUNCTION);
@@ -272,8 +316,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &use_receiver,
- Label::kNear);
+ __ JumpIfRoot(eax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
@@ -295,57 +338,33 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ bind(&use_receiver);
__ mov(eax, Operand(esp, 0 * kPointerSize));
- __ JumpIfRoot(eax, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
- __ mov(ebx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_JSBuiltinsConstructStubHelper(masm);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edi);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow,
- bool include_receiver = false) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(scratch1, __ StaticVariable(real_stack_limit));
- // Make scratch2 the space we have left. The stack might already be overflowed
- // here which will cause scratch2 to become negative.
- __ mov(scratch2, esp);
- __ sub(scratch2, scratch1);
- // Make scratch1 the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(scratch1, num_args);
- if (include_receiver) {
- __ add(scratch1, Immediate(1));
- }
- __ shl(scratch1, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch2, scratch1);
- __ j(less_equal, stack_overflow); // Signed comparison.
-}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -353,26 +372,29 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ const Register scratch1 = edx;
+ const Register scratch2 = edi;
+
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ mov(esi, __ StaticVariable(context_address));
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
+ // Load the previous frame pointer (edx) to access C arguments
+ __ mov(scratch1, Operand(ebp, 0));
// Push the function and the receiver onto the stack.
- __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+ __ push(Operand(scratch1, EntryFrameConstants::kFunctionArgOffset));
+ __ push(Operand(scratch1, EntryFrameConstants::kReceiverArgOffset));
// Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+ __ mov(eax, Operand(scratch1, EntryFrameConstants::kArgcOffset));
+ __ mov(scratch1, Operand(scratch1, EntryFrameConstants::kArgvOffset));
// Check if we have enough stack space to push all arguments.
- // Argument count in eax. Clobbers ecx and edx.
+ // Argument count in eax. Clobbers ecx.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, eax, ecx, edx, &stack_overflow);
+ Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
__ jmp(&enough_stack_space);
__ bind(&stack_overflow);
@@ -387,19 +409,20 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Move(ecx, Immediate(0));
__ jmp(&entry, Label::kNear);
__ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
+ // Push the parameter from argv.
+ __ mov(scratch2, Operand(scratch1, ecx, times_4, 0));
+ __ push(Operand(scratch2, 0)); // dereference handle
__ inc(ecx);
__ bind(&entry);
__ cmp(ecx, eax);
__ j(not_equal, &loop);
// Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
+ __ mov(scratch2, Operand(ebp, 0));
// Get the new.target and function from the frame.
- __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
- __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+ __ mov(edx, Operand(scratch2, EntryFrameConstants::kNewTargetArgOffset));
+ __ mov(edi, Operand(scratch2, EntryFrameConstants::kFunctionArgOffset));
// Invoke the code.
Handle<Code> builtin = is_construct
@@ -415,10 +438,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_JSEntryTrampolineHelper(masm, true);
}
@@ -437,6 +462,8 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : the value to pass to the generator
// -- edx : the JSGeneratorObject to resume
@@ -471,7 +498,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(esp, ecx, RootIndex::kRealStackLimit);
__ j(below, &stack_overflow);
// Pop return address.
@@ -488,26 +515,34 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- esp[0] : generator receiver
// -----------------------------------
- // Copy the function arguments from the generator object's register file.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ movzx_w(
- ecx, FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(ebx,
- FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
- Label done_loop, loop;
- __ Set(edi, 0);
+ Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm);
+ __ movd(xmm0, ebx);
- __ bind(&loop);
- __ cmp(edi, ecx);
- __ j(greater_equal, &done_loop);
- __ Push(
- FieldOperand(ebx, edi, times_pointer_size, FixedArray::kHeaderSize));
- __ add(edi, Immediate(1));
- __ jmp(&loop);
+ // Copy the function arguments from the generator object's register file.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ movzx_w(ecx, FieldOperand(
+ ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(ebx,
+ FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
+ {
+ Label done_loop, loop;
+ __ Set(edi, 0);
- __ bind(&done_loop);
+ __ bind(&loop);
+ __ cmp(edi, ecx);
+ __ j(greater_equal, &done_loop);
+ __ Push(
+ FieldOperand(ebx, edi, times_pointer_size, FixedArray::kHeaderSize));
+ __ add(edi, Immediate(1));
+ __ jmp(&loop);
+
+ __ bind(&done_loop);
+ }
+
+ // Restore registers.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
+ __ movd(ebx, xmm0);
}
// Underlying function needs to have bytecode available.
@@ -542,7 +577,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(edx);
__ Push(edi);
// Push hole as receiver since we do not use it for stepping.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(edx);
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
@@ -567,10 +602,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
}
}
-static void ReplaceClosureCodeWithOptimizedCode(
- MacroAssembler* masm, Register optimized_code, Register closure,
- Register scratch1, Register scratch2, Register scratch3) {
-
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure,
+ Register scratch1,
+ Register scratch2) {
// Store the optimized code in the closure.
__ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
@@ -611,21 +647,25 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register feedback_vector,
Register scratch) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee if needed, and caller)
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
- // -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch));
+ DCHECK(!AreAliased(eax, edx, edi, scratch));
Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = edi;
- Register optimized_code_entry = scratch;
+ // Load the feedback vector from the closure.
+ Register feedback_vector = scratch;
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ // Load the optimized code from the feedback vector and re-use the register.
+ Register optimized_code_entry = scratch;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
@@ -686,10 +726,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
- // The feedback vector is no longer used, so re-use it as a scratch
- // register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- edx, eax, feedback_vector);
+ edx, eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Move(ecx, optimized_code_entry);
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -716,15 +754,19 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
- Register bytecode, Register scratch1,
+ Register scratch1, Register scratch2,
Label* if_return) {
Register bytecode_size_table = scratch1;
+ Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
-
__ Move(bytecode_size_table,
Immediate(ExternalReference::bytecode_size_table_address()));
+ // Load the current bytecode.
+ __ movzx_b(bytecode, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
@@ -732,7 +774,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
STATIC_ASSERT(3 ==
static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
- __ cmpb(bytecode, Immediate(0x3));
+ __ cmp(bytecode, Immediate(0x3));
__ j(above, &process_bytecode, Label::kNear);
__ test(bytecode, Immediate(0x1));
__ j(not_equal, &extra_wide, Label::kNear);
@@ -754,9 +796,9 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&process_bytecode);
// Bailout to the return label if this is a return bytecode.
-#define JUMP_IF_EQUAL(NAME) \
- __ cmpb(bytecode, \
- Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmp(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
__ j(equal, if_return);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
@@ -780,18 +822,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ __ VerifyRootRegister();
+
Register closure = edi;
- Register feedback_vector = ebx;
- // Load the feedback vector from the closure.
+ // Read off the optimized code slot in the closure's feedback vector, and if
+ // there is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, ecx);
+
+ // Load the feedback vector and increment the invocation count.
+ Register feedback_vector = ecx;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- // Read off the optimized code slot in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
+ __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set
@@ -811,8 +858,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax);
__ Pop(eax);
- __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
-
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
@@ -836,16 +881,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
- __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kFrameSizeOffset));
+ Register frame_size = ecx;
+ __ mov(frame_size, FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
Label ok;
- __ mov(ecx, esp);
- __ sub(ecx, ebx);
+ __ mov(eax, esp);
+ __ sub(eax, frame_size);
ExternalReference stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ cmp(ecx, __ StaticVariable(stack_limit));
+ __ cmp(eax, __ StaticVariable(stack_limit));
__ j(above_equal, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
@@ -860,7 +906,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(eax);
// Continue loop if not done.
__ bind(&loop_check);
- __ sub(ebx, Immediate(kPointerSize));
+ __ sub(frame_size, Immediate(kPointerSize));
__ j(greater_equal, &loop_header);
}
@@ -876,7 +922,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and bytecode offset into registers.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -887,11 +933,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Immediate(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(ecx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(
kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ Operand(kInterpreterDispatchTableRegister, ecx, times_pointer_size, 0));
+ __ VerifyRootRegister();
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -907,16 +954,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, ecx,
- &do_return);
+ kInterpreterBytecodeOffsetRegister, ecx,
+ kInterpreterDispatchTableRegister, &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
// The return value is in eax.
- LeaveInterpreterFrame(masm, ebx, ecx);
+ LeaveInterpreterFrame(masm, edx, ecx);
+ __ VerifyRootRegister();
__ ret(0);
}
@@ -924,6 +970,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register array_limit,
Register start_address) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- start_address : Pointer to the last argument in the args array.
// -- array_limit : Pointer to one before the first argument in the
@@ -943,62 +990,62 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- ebx : the address of the first argument to be pushed. Subsequent
+ // -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -- edi : the target to call (can be any Object).
// -----------------------------------
+
+ const Register scratch = edx;
+ const Register argv = ecx;
+
Label stack_overflow;
- // Compute the expected number of arguments.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
+ // Add a stack check before pushing the arguments.
+ Generate_StackOverflowCheck(masm, eax, scratch, &stack_overflow, true);
+
+ __ movd(xmm0, eax); // Spill number of arguments.
- // Add a stack check before pushing the arguments. We need an extra register
- // to perform a stack check. So push it onto the stack temporarily. This
- // might cause stack overflow, but it will be detected by the check.
- __ Push(edi);
- Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow);
- __ Pop(edi);
+ // Compute the expected number of arguments.
+ __ mov(scratch, eax);
+ __ add(scratch, Immediate(1)); // Add one for receiver.
// Pop return address to allow tail-call after pushing arguments.
- __ Pop(edx);
+ __ PopReturnAddressTo(eax);
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ sub(ecx, Immediate(1)); // Subtract one for receiver.
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ sub(scratch, Immediate(1)); // Subtract one for receiver.
}
// Find the address of the last argument.
- __ shl(ecx, kPointerSizeLog2);
- __ neg(ecx);
- __ add(ecx, ebx);
- Generate_InterpreterPushArgs(masm, ecx, ebx);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(ebx); // Pass the spread in a register
- __ sub(eax, Immediate(1)); // Subtract one for spread
- }
+ __ shl(scratch, kPointerSizeLog2);
+ __ neg(scratch);
+ __ add(scratch, argv);
+ Generate_InterpreterPushArgs(masm, scratch, argv);
// Call the target.
- __ Push(edx); // Re-push return address.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(ecx); // Pass the spread in a register
+ __ PushReturnAddressFrom(eax);
+ __ movd(eax, xmm0); // Restore number of arguments.
+ __ sub(eax, Immediate(1)); // Subtract one for spread
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
+ __ PushReturnAddressFrom(eax);
+ __ movd(eax, xmm0); // Restore number of arguments.
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
__ bind(&stack_overflow);
{
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edi);
-
__ TailCallRuntime(Runtime::kThrowStackOverflow);
// This should be unreachable.
@@ -1008,44 +1055,40 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
namespace {
-// This function modified start_addr, and only reads the contents of num_args
-// register. scratch1 and scratch2 are used as temporary registers. Their
-// original values are restored after the use.
+// This function modifies start_addr, and only reads the contents of num_args
+// register. scratch1 and scratch2 are used as temporary registers.
void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
MacroAssembler* masm, Register num_args, Register start_addr,
- Register scratch1, Register scratch2, int num_slots_above_ret_addr,
+ Register scratch1, Register scratch2, int num_slots_to_move,
Label* stack_overflow) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// We have to move return address and the temporary registers above it
// before we can copy arguments onto the stack. To achieve this:
// Step 1: Increment the stack pointer by num_args + 1 (for receiver).
- // Step 2: Move the return address and values above it to the top of stack.
+ // Step 2: Move the return address and values around it to the top of stack.
// Step 3: Copy the arguments into the correct locations.
// current stack =====> required stack layout
- // | | | scratch1 | (2) <-- esp(1)
- // | | | .... | (2)
- // | | | scratch-n | (2)
- // | | | return addr | (2)
+ // | | | return addr | (2) <-- esp (1)
+ // | | | addtl. slot |
// | | | arg N | (3)
- // | scratch1 | <-- esp | .... |
- // | .... | | arg 1 |
- // | scratch-n | | arg 0 |
- // | return addr | | receiver slot |
+ // | | | .... |
+ // | | | arg 1 |
+ // | return addr | <-- esp | arg 0 |
+ // | addtl. slot | | receiver slot |
// Check for stack overflow before we increment the stack pointer.
- Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
- stack_overflow, true);
+ Generate_StackOverflowCheck(masm, num_args, scratch1, stack_overflow, true);
- // Step 1 - Update the stack pointer. scratch1 already contains the required
- // increment to the stack. i.e. num_args + 1 stack slots. This is computed in
- // Generate_StackOverflowCheck.
+ // Step 1 - Update the stack pointer.
+ __ lea(scratch1, Operand(num_args, times_4, kPointerSize));
__ AllocateStackFrame(scratch1);
- // Step 2 move return_address and slots above it to the correct locations.
+ // Step 2 move return_address and slots around it to the correct locations.
// Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
// basically when the source and destination overlap. We at least need one
// extra slot for receiver, so no extra checks are required to avoid copy.
- for (int i = 0; i < num_slots_above_ret_addr + 1; i++) {
+ for (int i = 0; i < num_slots_to_move + 1; i++) {
__ mov(scratch1,
Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
__ mov(Operand(esp, i * kPointerSize), scratch1);
@@ -1055,7 +1098,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Slot meant for receiver contains return address. Reset it so that
// we will not incorrectly interpret return address as an object.
__ mov(Operand(esp, num_args, times_pointer_size,
- (num_slots_above_ret_addr + 1) * kPointerSize),
+ (num_slots_to_move + 1) * kPointerSize),
Immediate(0));
__ mov(scratch1, num_args);
@@ -1064,7 +1107,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ bind(&loop_header);
__ mov(scratch2, Operand(start_addr, 0));
__ mov(Operand(esp, scratch1, times_pointer_size,
- num_slots_above_ret_addr * kPointerSize),
+ num_slots_to_move * kPointerSize),
scratch2);
__ sub(start_addr, Immediate(kPointerSize));
__ sub(scratch1, Immediate(1));
@@ -1078,69 +1121,74 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// static
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target
- // -- edi : the constructor
- // -- ebx : allocation site feedback (if available or undefined)
- // -- ecx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ecx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order
+ // as they are to be pushed onto the stack.
+ // -- esp[0] : return address
+ // -- esp[4] : allocation site feedback (if available or undefined)
+ // -- esp[8] : the new target
+ // -- esp[12] : the constructor
// -----------------------------------
+
Label stack_overflow;
- // We need two scratch registers. Push edi and edx onto stack.
- __ Push(edi);
- __ Push(edx);
- // Push arguments and move return address to the top of stack.
- // The eax register is readonly. The ecx register will be modified. The edx
- // and edi registers will be modified but restored to their original values.
- Generate_InterpreterPushZeroAndArgsAndReturnAddress(masm, eax, ecx, edx, edi,
- 2, &stack_overflow);
+ // Push arguments and move return address and stack spill slots to the top of
+ // stack. The eax register is readonly. The ecx register will be modified. edx
+ // and edi are used as scratch registers.
+ Generate_InterpreterPushZeroAndArgsAndReturnAddress(
+ masm, eax, ecx, edx, edi,
+ InterpreterPushArgsThenConstructDescriptor::kStackArgumentsCount,
+ &stack_overflow);
- // Restore edi and edx
- __ Pop(edx);
- __ Pop(edi);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ PopReturnAddressTo(ecx);
- __ Pop(ebx); // Pass the spread in a register
- __ PushReturnAddressFrom(ecx);
- __ sub(eax, Immediate(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(ebx);
- }
+ // Call the appropriate constructor. eax and ecx already contain intended
+ // values, remaining registers still need to be initialized from the stack.
if (mode == InterpreterPushArgsMode::kArrayFunction) {
- // Tail call to the array construct stub (still in the caller
- // context at this point).
- __ AssertFunction(edi);
- // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the
- // kRootRegister ebx, this useless move can be removed.
- __ Move(kJavaScriptCallExtraArg1Register, ebx);
- Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
- __ Jump(code, RelocInfo::CODE_TARGET);
+ // Tail call to the array construct stub (still in the caller context at
+ // this point).
+
+ __ movd(xmm0, eax); // Spill number of arguments.
+ __ PopReturnAddressTo(eax);
+ __ Pop(kJavaScriptCallExtraArg1Register);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ __ Pop(kJavaScriptCallTargetRegister);
+ __ PushReturnAddressFrom(eax);
+ __ movd(eax, xmm0); // Reload number of arguments.
+
+ __ AssertFunction(kJavaScriptCallTargetRegister);
+ __ AssertUndefinedOrAllocationSite(kJavaScriptCallExtraArg1Register);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- // Call the constructor with unmodified eax, edi, edx values.
+ __ movd(xmm0, eax); // Spill number of arguments.
+ __ PopReturnAddressTo(eax);
+ __ Drop(1); // The allocation site is unused.
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ __ Pop(kJavaScriptCallTargetRegister);
+ __ Pop(ecx); // Pop the spread (i.e. the first argument), overwriting ecx.
+ __ PushReturnAddressFrom(eax);
+ __ movd(eax, xmm0); // Reload number of arguments.
+ __ sub(eax, Immediate(1)); // The actual argc thus decrements by one.
+
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
RelocInfo::CODE_TARGET);
} else {
DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
- // Call the constructor with unmodified eax, edi, edx values.
+ __ PopReturnAddressTo(ecx);
+ __ Drop(1); // The allocation site is unused.
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ __ Pop(kJavaScriptCallTargetRegister);
+ __ PushReturnAddressFrom(ecx);
+
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
__ bind(&stack_overflow);
- {
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edx);
- __ Pop(edi);
-
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
-
- // This should be unreachable.
- __ int3();
- }
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ int3();
}
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
@@ -1151,26 +1199,30 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+ static constexpr Register scratch = ecx;
+
// If the SFI function_data is an InterpreterData, get the trampoline stored
// in it, otherwise get the trampoline from the builtins list.
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kFunctionOffset));
- __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+ __ mov(scratch, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ mov(scratch, FieldOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(scratch,
+ FieldOperand(scratch, SharedFunctionInfo::kFunctionDataOffset));
__ Push(eax);
- __ CmpObjectType(ebx, INTERPRETER_DATA_TYPE, eax);
+ __ CmpObjectType(scratch, INTERPRETER_DATA_TYPE, eax);
__ j(not_equal, &builtin_trampoline, Label::kNear);
- __ mov(ebx, FieldOperand(ebx, InterpreterData::kInterpreterTrampolineOffset));
+ __ mov(scratch,
+ FieldOperand(scratch, InterpreterData::kInterpreterTrampolineOffset));
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
- __ Move(ebx, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Move(scratch, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
__ bind(&trampoline_loaded);
__ Pop(eax);
- __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
- __ push(ebx);
+ __ add(scratch, Immediate(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+ __ push(scratch);
// Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister,
@@ -1185,7 +1237,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- ebx);
+ scratch);
__ Assert(
equal,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
@@ -1197,15 +1249,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(
- kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ movzx_b(scratch, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ mov(kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, scratch, times_pointer_size,
+ 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1213,20 +1267,16 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Load the current bytecode
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
-
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ kInterpreterBytecodeOffsetRegister, ecx, esi,
&if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
- __ mov(ebx, kInterpreterBytecodeOffsetRegister);
- __ SmiTag(ebx);
- __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
+ __ mov(ecx, kInterpreterBytecodeOffsetRegister);
+ __ SmiTag(ecx);
+ __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ecx);
Generate_InterpreterEnterBytecode(masm);
@@ -1236,10 +1286,13 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
// -- edx : new target (preserved for callee)
@@ -1272,7 +1325,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
}
for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
}
if (j < 3) {
__ jmp(&args_done, Label::kNear);
@@ -1291,10 +1344,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ SmiUntag(ecx);
scope.GenerateLeaveFrame();
- __ PopReturnAddressTo(ebx);
+ __ PopReturnAddressTo(edx);
__ inc(ecx);
__ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
- __ PushReturnAddressFrom(ebx);
+ __ PushReturnAddressFrom(edx);
__ ret(0);
__ bind(&failed);
@@ -1316,7 +1369,13 @@ namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
bool with_result) {
+#ifdef V8_EMBEDDED_BUILTINS
+ // TODO(v8:6666): Fold into Default config once root is fully supported.
+ const RegisterConfiguration* config(
+ RegisterConfiguration::PreserveRootIA32());
+#else
const RegisterConfiguration* config(RegisterConfiguration::Default());
+#endif
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1346,24 +1405,42 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
} // namespace
void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+#ifdef V8_EMBEDDED_BUILTINS
+ // TODO(v8:6666): Remove the ifdef once root is preserved by default.
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_ContinueToBuiltinHelper(masm, false, false);
}
void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
MacroAssembler* masm) {
+#ifdef V8_EMBEDDED_BUILTINS
+ // TODO(v8:6666): Remove the ifdef once root is preserved by default.
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_ContinueToBuiltinHelper(masm, false, true);
}
void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+#ifdef V8_EMBEDDED_BUILTINS
+ // TODO(v8:6666): Remove the ifdef once root is preserved by default.
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_ContinueToBuiltinHelper(masm, true, false);
}
void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
MacroAssembler* masm) {
+#ifdef V8_EMBEDDED_BUILTINS
+ // TODO(v8:6666): Remove the ifdef once root is preserved by default.
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
Generate_ContinueToBuiltinHelper(masm, true, true);
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kNotifyDeoptimized);
@@ -1377,6 +1454,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1385,32 +1464,37 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- esp[12] : receiver
// -----------------------------------
- // 1. Load receiver into edi, argArray into ebx (if present), remove all
+ // 1. Load receiver into xmm0, argArray into edx (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
Label no_arg_array, no_this_arg;
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ mov(ebx, edx);
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ // Spill receiver to allow the usage of edi as a scratch register.
+ __ movd(xmm0, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+ __ LoadRoot(edx, RootIndex::kUndefinedValue);
+ __ mov(edi, edx);
__ test(eax, eax);
__ j(zero, &no_this_arg, Label::kNear);
{
- __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+ __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
__ cmp(eax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -kPointerSize));
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
__ PopReturnAddressTo(ecx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ Push(edx);
+ __ Push(edi);
__ PushReturnAddressFrom(ecx);
+
+ // Restore receiver to edi.
+ __ movd(edi, xmm0);
}
// ----------- S t a t e -------------
- // -- ebx : argArray
+ // -- edx : argArray
// -- edi : receiver
// -- esp[0] : return address
// -- esp[4] : thisArg
@@ -1422,9 +1506,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(ebx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &no_arguments,
- Label::kNear);
+ __ JumpIfRoot(edx, RootIndex::kNullValue, &no_arguments, Label::kNear);
+ __ JumpIfRoot(edx, RootIndex::kUndefinedValue, &no_arguments, Label::kNear);
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
@@ -1441,6 +1524,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// Stack Layout:
// esp[0] : Return address
// esp[8] : Argument n
@@ -1456,9 +1541,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label done;
__ test(eax, eax);
__ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(ebx);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ PushReturnAddressFrom(ebx);
+ __ PopReturnAddressTo(edx);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ PushReturnAddressFrom(edx);
__ inc(eax);
__ bind(&done);
}
@@ -1473,11 +1558,11 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label loop;
__ mov(ecx, eax);
__ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
- __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
+ __ mov(edx, Operand(esp, ecx, times_pointer_size, 0));
+ __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), edx);
__ dec(ecx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
+ __ pop(edx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver).
}
@@ -1486,6 +1571,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1495,31 +1582,38 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- esp[16] : receiver
// -----------------------------------
- // 1. Load target into edi (if present), argumentsList into ebx (if present),
+ // 1. Load target into edi (if present), argumentsList into edx (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
Label done;
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
- __ mov(ebx, edi);
+ __ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
__ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
__ j(equal, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
__ bind(&done);
- __ PopReturnAddressTo(ecx);
+
+ // Spill argumentsList to use edx as a scratch register.
+ __ movd(xmm0, edx);
+
+ __ PopReturnAddressTo(edx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
+ __ Push(ecx);
+ __ PushReturnAddressFrom(edx);
+
+ // Restore argumentsList.
+ __ movd(edx, xmm0);
}
// ----------- S t a t e -------------
- // -- ebx : argumentsList
+ // -- edx : argumentsList
// -- edi : target
// -- esp[0] : return address
// -- esp[4] : thisArgument
@@ -1535,6 +1629,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1544,33 +1640,40 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- esp[16] : receiver
// -----------------------------------
- // 1. Load target into edi (if present), argumentsList into ebx (if present),
+ // 1. Load target into edi (if present), argumentsList into ecx (if present),
// new.target into edx (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
Label done;
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
- __ mov(ebx, edi);
+ __ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
__ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
__ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
__ bind(&done);
+
+ // Spill argumentsList to use ecx as a scratch register.
+ __ movd(xmm0, ecx);
+
__ PopReturnAddressTo(ecx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(ecx);
+
+ // Restore argumentsList.
+ __ movd(ecx, xmm0);
}
// ----------- S t a t e -------------
- // -- ebx : argumentsList
+ // -- ecx : argumentsList
// -- edx : new.target
// -- edi : target
// -- esp[0] : return address
@@ -1591,6 +1694,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1600,19 +1705,18 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero,
AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal,
AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
// function.
- __ mov(ebx, masm->isolate()->factory()->undefined_value());
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -1639,45 +1743,57 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack.
- __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
__ leave();
// Remove caller arguments from the stack.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, edi, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ PushReturnAddressFrom(ecx);
}
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- edi : target
+ // -- esi : context for the Call / Construct builtin
// -- eax : number of parameters on the stack (not including the receiver)
- // -- ebx : arguments list (a FixedArray)
// -- ecx : len (number of elements to from args)
- // -- edx : new.target (checked to be constructor or undefined)
+ // -- ecx : new.target (checked to be constructor or undefined)
+ // -- esp[4] : arguments list (a FixedArray)
// -- esp[0] : return address.
// -----------------------------------
- // We need to preserve eax, edi and ebx.
+ // We need to preserve eax, edi, esi and ebx.
__ movd(xmm0, edx);
__ movd(xmm1, edi);
__ movd(xmm2, eax);
+ __ movd(xmm3, esi); // Spill the context.
+
+ // TODO(v8:6666): Remove this usage of ebx to enable kRootRegister support.
+ const Register kArgumentsList = esi;
+ const Register kArgumentsLength = ecx;
+
+ __ PopReturnAddressTo(edx);
+ __ pop(kArgumentsList);
+ __ PushReturnAddressFrom(edx);
if (masm->emit_debug_code()) {
- // Allow ebx to be a FixedArray, or a FixedDoubleArray if ecx == 0.
+ // Allow kArgumentsList to be a FixedArray, or a FixedDoubleArray if
+ // kArgumentsLength == 0.
Label ok, fail;
- __ AssertNotSmi(ebx);
- __ mov(edx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ AssertNotSmi(kArgumentsList);
+ __ mov(edx, FieldOperand(kArgumentsList, HeapObject::kMapOffset));
__ CmpInstanceType(edx, FIXED_ARRAY_TYPE);
__ j(equal, &ok);
__ CmpInstanceType(edx, FIXED_DOUBLE_ARRAY_TYPE);
__ j(not_equal, &fail);
- __ cmp(ecx, 0);
+ __ cmp(kArgumentsLength, 0);
__ j(equal, &ok);
// Fall through.
__ bind(&fail);
@@ -1686,25 +1802,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&ok);
}
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edx, __ StaticVariable(real_stack_limit));
- // Make edx the space we have left. The stack might already be overflowed
- // here which will cause edx to become negative.
- __ neg(edx);
- __ add(edx, esp);
- __ sar(edx, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(edx, ecx);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, kArgumentsLength, edx, &stack_overflow);
// Push additional arguments onto the stack.
{
@@ -1712,14 +1813,14 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Move(eax, Immediate(0));
Label done, push, loop;
__ bind(&loop);
- __ cmp(eax, ecx);
+ __ cmp(eax, kArgumentsLength);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
- __ mov(edi,
- FieldOperand(ebx, eax, times_pointer_size, FixedArray::kHeaderSize));
- __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
+ __ mov(edi, FieldOperand(kArgumentsList, eax, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(edi, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(edi, RootIndex::kUndefinedValue);
__ bind(&push);
__ Push(edi);
__ inc(eax);
@@ -1729,34 +1830,45 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// Restore eax, edi and edx.
+ __ movd(esi, xmm3); // Restore the context.
__ movd(eax, xmm2);
__ movd(edi, xmm1);
__ movd(edx, xmm0);
// Compute the actual parameter count.
- __ add(eax, ecx);
+ __ add(eax, kArgumentsLength);
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ movd(esi, xmm3); // Restore the context.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object)
+ // -- esi : context for the Call / Construct builtin
// -- edx : the new target (for [[Construct]] calls)
// -- ecx : start index (to support rest parameters)
// -----------------------------------
+ __ movd(xmm0, esi); // Spill the context.
+
+ Register scratch = esi;
+
// Check if new.target has a [[Construct]] internal method.
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
__ bind(&new_target_not_constructor);
@@ -1764,18 +1876,18 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
__ Push(edx);
+ __ movd(esi, xmm0); // Restore the context.
__ CallRuntime(Runtime::kThrowNotConstructor);
}
__ bind(&new_target_constructor);
}
- // Preserve new.target (in case of [[Construct]]).
- __ movd(xmm0, edx);
+ __ movd(xmm1, edx); // Preserve new.target (in case of [[Construct]]).
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
+ __ mov(scratch, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &arguments_adaptor, Label::kNear);
{
@@ -1783,39 +1895,23 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ mov(edx, FieldOperand(edx, JSFunction::kSharedFunctionInfoOffset));
__ movzx_w(edx, FieldOperand(
edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(ebx, ebp);
+ __ mov(scratch, ebp);
}
__ jmp(&arguments_done, Label::kNear);
__ bind(&arguments_adaptor);
{
// Just load the length from the ArgumentsAdaptorFrame.
- __ mov(edx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(edx,
+ Operand(scratch, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(edx);
}
__ bind(&arguments_done);
- Label stack_done;
+ Label stack_done, stack_overflow;
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- Label done;
- __ LoadRoot(ecx, Heap::kRealStackLimitRootIndex);
- // Make ecx the space we have left. The stack might already be
- // overflowed here which will cause ecx to become negative.
- __ neg(ecx);
- __ add(ecx, esp);
- __ sar(ecx, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ Generate_StackOverflowCheck(masm, edx, ecx, &stack_overflow);
// Forward the arguments from the caller frame.
{
@@ -1824,7 +1920,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ PopReturnAddressTo(ecx);
__ bind(&loop);
{
- __ Push(Operand(ebx, edx, times_pointer_size, 1 * kPointerSize));
+ __ Push(Operand(scratch, edx, times_pointer_size, 1 * kPointerSize));
__ dec(edx);
__ j(not_zero, &loop);
}
@@ -1833,16 +1929,22 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
__ bind(&stack_done);
- // Restore new.target (in case of [[Construct]]).
- __ movd(edx, xmm0);
+ __ movd(edx, xmm1); // Restore new.target (in case of [[Construct]]).
+ __ movd(esi, xmm0); // Restore the context.
// Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ movd(esi, xmm0); // Restore the context.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
@@ -1883,13 +1985,15 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
__ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx); // Clobbers ecx.
__ j(above_equal, &done_convert);
+ // Reload the receiver (it was clobbered by CmpObjectType).
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy, Label::kNear);
- __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+ __ JumpIfRoot(ecx, RootIndex::kUndefinedValue, &convert_global_proxy,
+ Label::kNear);
+ __ JumpIfNotRoot(ecx, RootIndex::kNullValue, &convert_to_object,
Label::kNear);
__ bind(&convert_global_proxy);
{
@@ -1932,9 +2036,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ movzx_w(
- ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(eax);
- ParameterCount expected(ebx);
+ ParameterCount expected(ecx);
__ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -1948,40 +2052,43 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -----------------------------------
- // Load [[BoundArguments]] into ecx and length of that into ebx.
+ __ movd(xmm0, edx); // Spill edx.
+
+ // Load [[BoundArguments]] into ecx and length of that into edx.
Label no_bound_arguments;
__ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ test(ebx, ebx);
+ __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(edx);
+ __ test(edx, edx);
__ j(zero, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : new.target (only in case of [[Construct]])
- // -- edi : target (checked to be a JSBoundFunction)
- // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
- // -- ebx : the number of [[BoundArguments]]
+ // -- eax : the number of arguments (not including the receiver)
+ // -- xmm0 : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- edx : the number of [[BoundArguments]]
// -----------------------------------
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+ __ lea(ecx, Operand(edx, times_pointer_size, 0));
__ sub(esp, ecx);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
- __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ CompareRoot(esp, ecx, RootIndex::kRealStackLimit);
+ __ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+ __ lea(esp, Operand(esp, edx, times_pointer_size, 0));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -1997,10 +2104,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ Set(ecx, 0);
- __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+ __ lea(edx, Operand(esp, edx, times_pointer_size, 0));
__ bind(&loop);
- __ movd(xmm0, Operand(ebx, ecx, times_pointer_size, 0));
- __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm0);
+ __ movd(xmm1, Operand(edx, ecx, times_pointer_size, 0));
+ __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm1);
__ inc(ecx);
__ cmp(ecx, eax);
__ j(less, &loop);
@@ -2010,13 +2117,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
+ __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(edx);
__ bind(&loop);
- __ dec(ebx);
- __ movd(xmm0, FieldOperand(ecx, ebx, times_pointer_size,
+ __ dec(edx);
+ __ movd(xmm1, FieldOperand(ecx, edx, times_pointer_size,
FixedArray::kHeaderSize));
- __ movd(Operand(esp, eax, times_pointer_size, 0), xmm0);
+ __ movd(Operand(esp, eax, times_pointer_size, 0), xmm1);
__ lea(eax, Operand(eax, 1));
__ j(greater, &loop);
}
@@ -2026,13 +2133,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// [[BoundArguments]]), so we need to subtract one for the return address.
__ dec(eax);
}
+
__ bind(&no_bound_arguments);
+ __ movd(edx, xmm0); // Reload edx.
}
} // namespace
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSBoundFunction)
@@ -2040,8 +2150,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(edi);
// Patch the receiver to [[BoundThis]].
- __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2054,6 +2164,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -2101,6 +2212,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target (checked to be a constructor)
@@ -2109,10 +2221,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ AssertConstructor(edi);
__ AssertFunction(edi);
- // Calling convention for function specific ConstructStubs require
- // ebx to contain either an AllocationSite or undefined.
- __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
-
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
@@ -2121,16 +2229,23 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ j(zero, &call_generic_stub, Label::kNear);
+ // Calling convention for function specific ConstructStubs require
+ // ecx to contain either an AllocationSite or undefined.
+ __ LoadRoot(ecx, RootIndex::kUndefinedValue);
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
RelocInfo::CODE_TARGET);
__ bind(&call_generic_stub);
+ // Calling convention for function specific ConstructStubs require
+ // ecx to contain either an AllocationSite or undefined.
+ __ LoadRoot(ecx, RootIndex::kUndefinedValue);
__ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target (checked to be a constructor)
@@ -2213,18 +2328,23 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
- // -- ebx : expected number of arguments
+ // -- ecx : expected number of arguments
// -- edx : new target (passed through to callee)
// -- edi : function (passed through to callee)
// -----------------------------------
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
+ const Register kExpectedNumberOfArgumentsRegister = ecx;
+
Label invoke, dont_adapt_arguments, stack_overflow;
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ cmp(kExpectedNumberOfArgumentsRegister,
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
- __ cmp(eax, ebx);
+ __ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &too_few);
{ // Enough parameters: Actual >= expected.
@@ -2232,7 +2352,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
// edi is used as a scratch register. It should be restored from the frame
// when needed.
- Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
+ Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi,
+ &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2244,7 +2365,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ inc(eax);
__ push(Operand(edi, 0));
__ sub(edi, Immediate(kPointerSize));
- __ cmp(eax, ebx);
+ __ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &copy);
// eax now contains the expected number of arguments.
__ jmp(&invoke);
@@ -2255,16 +2376,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
// edi is used as a scratch register. It should be restored from the frame
// when needed.
- Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
+ Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi,
+ &stack_overflow);
- // Remember expected arguments in ecx.
- __ mov(ecx, ebx);
+ // Remember expected arguments in xmm0.
+ __ movd(xmm0, kExpectedNumberOfArgumentsRegister);
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
- // ebx = expected - actual.
- __ sub(ebx, eax);
+ // ecx = expected - actual.
+ __ sub(kExpectedNumberOfArgumentsRegister, eax);
// eax = -actual - 1
__ neg(eax);
__ sub(eax, Immediate(1));
@@ -2282,11 +2404,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&fill);
__ inc(eax);
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, ebx);
+ __ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &fill);
// Restore expected arguments.
- __ mov(eax, ecx);
+ __ movd(eax, xmm0);
}
// Call the entry point.
@@ -2325,15 +2447,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2350,23 +2469,21 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ bind(&skip);
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ leave();
- }
+ __ leave();
// Load deoptimization data from the code object.
- __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
- __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+ __ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
- __ SmiUntag(ebx);
+ __ SmiUntag(ecx);
// Compute the target address = code_obj + header_size + osr_offset
- __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack.
__ mov(Operand(esp, 0), eax);
@@ -2375,15 +2492,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ ret(0);
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// The function index was put in edi by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(edi);
@@ -2394,6 +2505,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
+ Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm);
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
arraysize(wasm::kGpParamRegisters),
"frame size mismatch");
@@ -2420,7 +2532,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
- __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, ecx);
+ {
+ // At this point, ebx has been spilled to the stack but is not yet
+ // overwritten with another value. We can still use it as kRootRegister.
+ Assembler::SupportsRootRegisterScope root_is_unclobbered(masm);
+ __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, ecx);
+ }
// The entrypoint address is the return value.
__ mov(edi, kReturnRegister0);
@@ -2452,6 +2569,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// ecx: pointer to the first argument
+#ifdef V8_EMBEDDED_BUILTINS
+ // TODO(v8:6666): Remove the ifdef once branch load poisoning is removed.
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+#endif
+
STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
STATIC_ASSERT(ecx == kRuntimeCallArgvRegister);
STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
@@ -2571,11 +2693,16 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
__ bind(&skip);
+#ifdef V8_EMBEDDED_BUILTINS
+ STATIC_ASSERT(kRootRegister == kSpeculationPoisonRegister);
+ CHECK(!FLAG_untrusted_code_mitigations);
+#else
// Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
- // both configurations. It is safe to always do this, because the underlying
- // register is caller-saved and can be arbitrarily clobbered.
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+#endif
// Compute the handler entry address and jump to it.
__ mov(edi, __ StaticVariable(pending_handler_entrypoint_address));
@@ -2583,6 +2710,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
Label check_negative, process_64_bits, done;
// Account for return address and saved regs.
@@ -2596,6 +2725,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
MemOperand return_operand = mantissa_operand;
Register scratch1 = ebx;
+ Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm);
// Since we must use ecx for shifts below, use some other register (eax)
// to calculate the result.
@@ -2676,6 +2806,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
const Register exponent = eax;
const Register scratch = ecx;
const XMMRegister double_result = xmm3;
@@ -2843,9 +2975,10 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
RelocInfo::CODE_TARGET);
__ bind(&not_one_case);
- // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the
- // kRootRegister ebx, this useless move can be removed.
- __ Move(kJavaScriptCallExtraArg1Register, ebx);
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ mov(kJavaScriptCallExtraArg1Register,
+ masm->isolate()->factory()->undefined_value());
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -2853,6 +2986,8 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
} // namespace
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
// -- eax : argc
// -- edi : constructor
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 0c892c960f..a2a335c70f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -56,7 +56,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -109,7 +108,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiUntag(a0);
// The receiver for the builtin/api call.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
__ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -176,7 +175,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
__ Push(cp, a0, a1);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ Push(a3);
// ----------- S t a t e -------------
@@ -201,7 +200,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver);
- __ LoadRoot(v0, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(v0, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- v0: receiver
@@ -291,7 +290,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(v0, Heap::kUndefinedValueRootIndex, &use_receiver);
+ __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
@@ -313,7 +312,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ bind(&use_receiver);
__ lw(v0, MemOperand(sp, 0 * kPointerSize));
- __ JumpIfRoot(v0, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
@@ -342,7 +341,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(a2, RootIndex::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause a2 to become negative.
__ Subu(a2, sp, a2);
@@ -410,7 +409,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t0, RootIndex::kUndefinedValue);
__ mov(s1, t0);
__ mov(s2, t0);
__ mov(s3, t0);
@@ -491,7 +490,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
@@ -558,7 +557,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, t0);
// Push hole as receiver since we do not use it for stepping.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1);
}
@@ -854,7 +853,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ Subu(t1, sp, Operand(t0));
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(a2, RootIndex::kRealStackLimit);
__ Branch(&ok, hs, t1, Operand(a2));
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
@@ -862,7 +861,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t1, RootIndex::kUndefinedValue);
__ Branch(&loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
@@ -886,7 +885,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -934,7 +933,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ subu(scratch1, sp, scratch1);
@@ -983,7 +982,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ mov(t0, a0); // No receiver.
}
@@ -1191,7 +1190,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ push(t4);
}
for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
}
if (j < 3) {
__ jmp(&args_done);
@@ -1290,15 +1289,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove accumulator.
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1310,11 +1304,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// If the code object is null, just return to the caller.
__ Ret(eq, v0, Operand(Smi::kZero));
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ LeaveFrame(StackFrame::STUB);
- }
+ __ LeaveFrame(StackFrame::STUB);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1336,14 +1328,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ Ret();
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1359,7 +1343,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
Label no_arg;
Register scratch = t0;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a2, RootIndex::kUndefinedValue);
__ mov(a3, a2);
// Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2);
@@ -1389,8 +1373,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(a2, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(a2, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a2, RootIndex::kNullValue, &no_arguments);
+ __ JumpIfRoot(a2, RootIndex::kUndefinedValue, &no_arguments);
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
@@ -1412,7 +1396,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label done;
__ Branch(&done, ne, a0, Operand(zero_reg));
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ Addu(a0, a0, Operand(1));
__ bind(&done);
}
@@ -1462,7 +1446,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
{
Label no_arg;
Register scratch = t0;
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a1, RootIndex::kUndefinedValue);
__ mov(a2, a1);
__ mov(a3, a1);
__ sll(scratch, a0, kPointerSizeLog2);
@@ -1514,7 +1498,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
{
Label no_arg;
Register scratch = t0;
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a1, RootIndex::kUndefinedValue);
__ mov(a2, a1);
// Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2);
@@ -1603,32 +1587,20 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
- // Make ip the space we have left. The stack might already be overflowed
- // here which will cause ip to become negative.
- __ Subu(t1, sp, t1);
- // Check if the arguments will overflow the stack.
- __ sll(kScratchReg, t0, kPointerSizeLog2);
- __ Branch(&done, gt, t1, Operand(kScratchReg)); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, t0, kScratchReg, t1, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ mov(t2, zero_reg);
Label done, push, loop;
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t1, RootIndex::kTheHoleValue);
__ bind(&loop);
__ Branch(&done, eq, t2, Operand(t0));
__ Lsa(kScratchReg, a2, t2, kPointerSizeLog2);
__ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Branch(&push, ne, t1, Operand(kScratchReg));
- __ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kScratchReg, RootIndex::kUndefinedValue);
__ bind(&push);
__ Push(kScratchReg);
__ Addu(t2, t2, Operand(1));
@@ -1639,6 +1611,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
@@ -1772,9 +1747,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy);
- __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
__ bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
@@ -1863,8 +1837,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
- __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
+ __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+ __ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
{
@@ -1973,7 +1947,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a2, RootIndex::kUndefinedValue);
Label call_generic_stub;
@@ -2021,8 +1995,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
- __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
+ __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+ __ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
{
@@ -2218,7 +2192,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a1: function
// a2: expected number of arguments
// a3: new target (passed through to callee)
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t0, RootIndex::kUndefinedValue);
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(t1, fp, Operand(t2));
// Adjust for frame.
@@ -2391,7 +2365,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Check result for exception sentinel.
Label exception_returned;
- __ LoadRoot(t0, Heap::kExceptionRootIndex);
+ __ LoadRoot(t0, RootIndex::kException);
__ Branch(&exception_returned, eq, t0, Operand(v0));
// Check that there is no pending exception, otherwise we
@@ -2402,7 +2376,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ li(a2, pending_exception_address);
__ lw(a2, MemOperand(a2));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t0, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, t0, Operand(a2));
__ stop("Unexpected pending exception");
@@ -2462,9 +2436,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ bind(&zero);
// Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
- // both configurations. It is safe to always do this, because the underlying
- // register is caller-saved and can be arbitrarily clobbered.
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
@@ -2705,6 +2679,10 @@ namespace {
void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
ElementsKind kind) {
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
+
__ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
.code(),
RelocInfo::CODE_TARGET, lo, a0, Operand(1));
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index d59f7c0ce5..4f1ba93a99 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -56,7 +56,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -108,7 +107,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiUntag(a0);
// The receiver for the builtin/api call.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
__ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -176,7 +175,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
__ Push(cp, a0, a1);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ Push(a3);
// ----------- S t a t e -------------
@@ -201,7 +200,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver);
- __ LoadRoot(v0, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(v0, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- v0: receiver
@@ -291,7 +290,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(v0, Heap::kUndefinedValueRootIndex, &use_receiver);
+ __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
@@ -313,7 +312,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ bind(&use_receiver);
__ Ld(v0, MemOperand(sp, 0 * kPointerSize));
- __ JumpIfRoot(v0, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
@@ -382,7 +381,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
@@ -451,7 +450,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a4);
// Push hole as receiver since we do not use it for stepping.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1);
}
@@ -488,7 +487,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(a2, RootIndex::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ dsubu(a2, sp, a2);
@@ -555,7 +554,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a4, RootIndex::kUndefinedValue);
__ mov(s1, a4);
__ mov(s2, a4);
__ mov(s3, a4);
@@ -853,7 +852,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ Dsubu(a5, sp, Operand(a4));
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(a2, RootIndex::kRealStackLimit);
__ Branch(&ok, hs, a5, Operand(a2));
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
@@ -861,7 +860,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
__ Branch(&loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
@@ -885,7 +884,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator as undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -933,7 +932,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ dsubu(scratch1, sp, scratch1);
@@ -980,7 +979,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ Dsubu(a3, a3, Operand(1)); // Subtract one for receiver.
}
@@ -1188,7 +1187,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ push(t2);
}
for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
}
if (j < 3) {
__ jmp(&args_done);
@@ -1287,15 +1286,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1307,11 +1301,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// If the code object is null, just return to the caller.
__ Ret(eq, v0, Operand(Smi::kZero));
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ LeaveFrame(StackFrame::STUB);
- }
+ __ LeaveFrame(StackFrame::STUB);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1332,14 +1324,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ Ret();
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1356,7 +1340,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register undefined_value = a3;
Register scratch = a4;
- __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
// 1. Load receiver into a1, argArray into a2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
@@ -1390,7 +1374,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(arg_array, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
__ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
// 4a. Apply the receiver to the given argArray.
@@ -1414,7 +1398,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label done;
__ Branch(&done, ne, a0, Operand(zero_reg));
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ Daddu(a0, a0, Operand(1));
__ bind(&done);
}
@@ -1465,7 +1449,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register undefined_value = a3;
Register scratch = a4;
- __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// remove all arguments from the stack (including the receiver), and push
@@ -1521,7 +1505,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register undefined_value = a4;
Register scratch = a5;
- __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
@@ -1620,20 +1604,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register len = a4;
// Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
- // Make ip the space we have left. The stack might already be overflowed
- // here which will cause ip to become negative.
- __ Dsubu(a5, sp, a5);
- // Check if the arguments will overflow the stack.
- __ dsll(kScratchReg, len, kPointerSizeLog2);
- __ Branch(&done, gt, a5, Operand(kScratchReg)); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -1646,11 +1618,11 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Daddu(a0, a0, len); // The 'len' argument for Call() or Construct().
__ dsll(scratch, len, kPointerSizeLog2);
__ Dsubu(scratch, sp, Operand(scratch));
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t1, RootIndex::kTheHoleValue);
__ bind(&loop);
__ Ld(a5, MemOperand(src));
__ Branch(&push, ne, a5, Operand(t1));
- __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
__ bind(&push);
__ daddiu(src, src, kPointerSize);
__ Push(a5);
@@ -1660,6 +1632,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
@@ -1793,9 +1768,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy);
- __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
__ bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
@@ -1883,8 +1857,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
- __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
+ __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+ __ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
{
@@ -1990,7 +1964,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a2, RootIndex::kUndefinedValue);
Label call_generic_stub;
@@ -2037,8 +2011,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
- __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
+ __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
+ __ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
{
@@ -2235,7 +2209,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a1: function
// a2: expected number of arguments
// a3: new target (passed through to callee)
- __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
__ dsll(a6, a2, kPointerSizeLog2);
__ Dsubu(a4, fp, Operand(a6));
// Adjust for frame.
@@ -2409,7 +2383,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Check result for exception sentinel.
Label exception_returned;
- __ LoadRoot(a4, Heap::kExceptionRootIndex);
+ __ LoadRoot(a4, RootIndex::kException);
__ Branch(&exception_returned, eq, a4, Operand(v0));
// Check that there is no pending exception, otherwise we
@@ -2420,7 +2394,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ li(a2, pending_exception_address);
__ Ld(a2, MemOperand(a2));
- __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, a4, Operand(a2));
__ stop("Unexpected pending exception");
@@ -2480,9 +2454,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ bind(&zero);
// Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
- // both configurations. It is safe to always do this, because the underlying
- // register is caller-saved and can be arbitrarily clobbered.
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
@@ -2724,6 +2698,10 @@ namespace {
void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
ElementsKind kind) {
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
+
__ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
.code(),
RelocInfo::CODE_TARGET, lo, a0, Operand(1));
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 01a0e4e371..4446f81e58 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -53,8 +53,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- // tail call a stub
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -109,7 +107,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r3);
__ SmiUntag(r3, SetRC);
// The receiver for the builtin/api call.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
__ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -184,7 +182,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Preserve the incoming parameters on the stack.
__ SmiTag(r3);
__ Push(cp, r3, r4);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ Push(r6);
// ----------- S t a t e -------------
@@ -209,7 +207,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r3, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- r3: receiver
@@ -303,7 +301,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &use_receiver);
+ __ JumpIfRoot(r3, RootIndex::kUndefinedValue, &use_receiver);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
@@ -325,7 +323,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ bind(&use_receiver);
__ LoadP(r3, MemOperand(sp));
- __ JumpIfRoot(r3, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
@@ -402,7 +400,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, RootIndex::kRealStackLimit);
__ blt(&stack_overflow);
// Push receiver.
@@ -468,7 +466,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r7);
// Push hole as receiver since we do not use it for stepping.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4);
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
@@ -505,7 +503,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(r5, RootIndex::kRealStackLimit);
// Make r5 the space we have left. The stack might already be overflowed
// here which will cause r5 to become negative.
__ sub(r5, sp, r5);
@@ -573,7 +571,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r7, RootIndex::kUndefinedValue);
__ mr(r14, r7);
__ mr(r15, r7);
__ mr(r16, r7);
@@ -887,7 +885,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ sub(r8, sp, r5);
- __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(r0, RootIndex::kRealStackLimit);
__ cmpl(r8, r0);
__ bge(&ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -896,7 +894,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If ok, push undefined as the initial value for all register file entries.
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r8, RootIndex::kUndefinedValue);
__ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC);
__ beq(&no_args, cr0);
__ mtctr(r5);
@@ -920,7 +918,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
@@ -968,7 +966,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
@@ -1014,7 +1012,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ mr(r6, r3); // Argument count is correct.
}
@@ -1227,7 +1225,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ push(r7);
}
for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
}
if (j < 3) {
__ jmp(&args_done);
@@ -1327,15 +1325,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset));
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -1352,11 +1345,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ bind(&skip);
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ LeaveFrame(StackFrame::STUB);
- }
+ __ LeaveFrame(StackFrame::STUB);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1386,14 +1377,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
}
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1413,7 +1396,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ mr(r5, scratch);
__ LoadP(r4, MemOperand(new_sp, 0)); // receiver
__ cmpi(arg_size, Operand(kPointerSize));
@@ -1438,8 +1421,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r5, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r5, RootIndex::kNullValue, &no_arguments);
+ __ JumpIfRoot(r5, RootIndex::kUndefinedValue, &no_arguments);
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
@@ -1462,7 +1445,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label done;
__ cmpi(r3, Operand::Zero());
__ bne(&done);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ addi(r3, r3, Operand(1));
__ bind(&done);
}
@@ -1517,7 +1500,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register scratch = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(scratch, r4);
__ mr(r5, r4);
__ cmpi(arg_size, Operand(kPointerSize));
@@ -1567,7 +1550,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register new_sp = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(r5, r4);
__ mr(r6, r4);
__ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined)
@@ -1666,21 +1649,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
- // Make ip the space we have left. The stack might already be overflowed
- // here which will cause ip to become negative.
- __ sub(ip, sp, ip);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, r7, Operand(kPointerSizeLog2));
- __ cmp(ip, r0); // Signed comparison.
- __ bgt(&done);
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -1692,9 +1662,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ mtctr(r7);
__ bind(&loop);
__ LoadPU(ip, MemOperand(r5, kPointerSize));
- __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(ip, RootIndex::kTheHoleValue);
__ bne(&skip);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, RootIndex::kUndefinedValue);
__ bind(&skip);
__ push(ip);
__ bdnz(&loop);
@@ -1704,6 +1674,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
@@ -1840,9 +1813,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bge(&done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy);
- __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
+ __ JumpIfRoot(r6, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(r6, RootIndex::kNullValue, &convert_to_object);
__ bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
@@ -1930,7 +1902,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, RootIndex::kRealStackLimit);
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
__ mr(sp, r9);
@@ -2062,7 +2034,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Calling convention for function specific ConstructStubs require
// r5 to contain either an AllocationSite or undefined.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r5, RootIndex::kUndefinedValue);
Label call_generic_stub;
@@ -2246,7 +2218,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r0, RootIndex::kUndefinedValue);
__ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
__ sub(r7, fp, r7);
// Adjust for frame.
@@ -2435,7 +2407,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Check result for exception sentinel.
Label exception_returned;
- __ CompareRoot(r3, Heap::kExceptionRootIndex);
+ __ CompareRoot(r3, RootIndex::kException);
__ beq(&exception_returned);
// Check that there is no pending exception, otherwise we
@@ -2447,7 +2419,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Move(r6, pending_exception_address);
__ LoadP(r6, MemOperand(r6));
- __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r6, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
__ stop("Unexpected pending exception");
@@ -2512,10 +2484,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register.
- if (FLAG_branch_load_poisoning) {
- __ ResetSpeculationPoisonRegister();
- }
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -2743,6 +2716,10 @@ namespace {
void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
ElementsKind kind) {
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
+
__ cmpli(r3, Operand(1));
__ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index b92011c38b..198ba0971d 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -53,8 +53,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- // tail call a stub
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -108,7 +106,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r2);
__ SmiUntag(r2);
// The receiver for the builtin/api call.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
__ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
@@ -178,7 +176,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Preserve the incoming parameters on the stack.
__ SmiTag(r2);
__ Push(cp, r2, r3);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ Push(r5);
// ----------- S t a t e -------------
@@ -203,7 +201,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver);
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r2, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- r2: receiver
@@ -295,7 +293,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &use_receiver);
+ __ JumpIfRoot(r2, RootIndex::kUndefinedValue, &use_receiver);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
@@ -317,7 +315,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ bind(&use_receiver);
__ LoadP(r2, MemOperand(sp));
- __ JumpIfRoot(r2, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(r2, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
@@ -393,7 +391,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, RootIndex::kRealStackLimit);
__ blt(&stack_overflow);
// Push receiver.
@@ -468,7 +466,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r6);
// Push hole as receiver since we do not use it for stepping.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r3);
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
@@ -505,7 +503,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(r4, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(r4, RootIndex::kRealStackLimit);
// Make r4 the space we have left. The stack might already be overflowed
// here which will cause r4 to become negative.
__ SubP(r4, sp, r4);
@@ -581,7 +579,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r6, RootIndex::kUndefinedValue);
__ LoadRR(r7, r6);
__ LoadRR(r8, r6);
__ LoadRR(r9, r6);
@@ -890,7 +888,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ SubP(r8, sp, r4);
- __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(r0, RootIndex::kRealStackLimit);
__ CmpLogicalP(r8, r0);
__ bge(&ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -899,7 +897,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If ok, push undefined as the initial value for all register file entries.
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r8, RootIndex::kUndefinedValue);
__ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
__ LoadAndTestP(r4, r4);
__ beq(&no_args);
@@ -924,7 +922,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
@@ -973,7 +971,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ SubP(scratch, sp, scratch);
@@ -1020,7 +1018,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ LoadRR(r5, r2); // Argument count is correct.
}
@@ -1230,7 +1228,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ push(r6);
}
for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
}
if (j < 3) {
__ jmp(&args_done);
@@ -1329,15 +1327,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1354,11 +1347,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ bind(&skip);
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ LeaveFrame(StackFrame::STUB);
- }
+ __ LeaveFrame(StackFrame::STUB);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1380,14 +1371,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ Ret();
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1407,7 +1390,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ LoadRR(r4, scratch);
__ LoadP(r3, MemOperand(new_sp, 0)); // receiver
__ CmpP(arg_size, Operand(kPointerSize));
@@ -1432,8 +1415,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r4, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r4, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r4, RootIndex::kNullValue, &no_arguments);
+ __ JumpIfRoot(r4, RootIndex::kUndefinedValue, &no_arguments);
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
@@ -1456,7 +1439,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label done;
__ CmpP(r2, Operand::Zero());
__ bne(&done, Label::kNear);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ AddP(r2, Operand(1));
__ bind(&done);
}
@@ -1511,7 +1494,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(scratch, r3);
__ LoadRR(r4, r3);
__ CmpP(arg_size, Operand(kPointerSize));
@@ -1561,7 +1544,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register new_sp = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(r4, r3);
__ LoadRR(r5, r3);
__ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
@@ -1670,21 +1653,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
// Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
- // Make ip the space we have left. The stack might already be overflowed
- // here which will cause ip to become negative.
- __ SubP(ip, sp, ip);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, r6, Operand(kPointerSizeLog2));
- __ CmpP(ip, r0); // Signed comparison.
- __ bgt(&done);
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -1697,9 +1667,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&loop);
__ LoadP(ip, MemOperand(r4, kPointerSize));
__ la(r4, MemOperand(r4, kPointerSize));
- __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(ip, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, RootIndex::kUndefinedValue);
__ bind(&skip);
__ push(ip);
__ BranchOnCount(r1, &loop);
@@ -1709,6 +1679,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
@@ -1845,9 +1818,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bge(&done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy);
- __ JumpIfNotRoot(r5, Heap::kNullValueRootIndex, &convert_to_object);
+ __ JumpIfRoot(r5, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(r5, RootIndex::kNullValue, &convert_to_object);
__ bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
@@ -1936,7 +1908,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, RootIndex::kRealStackLimit);
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
__ LoadRR(sp, r8);
@@ -2069,7 +2041,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Calling convention for function specific ConstructStubs require
// r4 to contain either an AllocationSite or undefined.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r4, RootIndex::kUndefinedValue);
Label call_generic_stub;
@@ -2251,7 +2223,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r3: function
// r4: expected number of argumentus
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r0, RootIndex::kUndefinedValue);
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, fp, r6);
// Adjust for frame.
@@ -2408,6 +2380,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ LoadRR(r3, r2);
__ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
isolate_reg = r5;
+ // Clang doesn't preserve r2 (result buffer)
+ // write to r8 (preserved) before entry
+ __ LoadRR(r8, r2);
}
// Call C built-in.
__ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
@@ -2433,13 +2408,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
+ __ LoadRR(r2, r8);
__ LoadP(r3, MemOperand(r2, kPointerSize));
__ LoadP(r2, MemOperand(r2));
}
// Check result for exception sentinel.
Label exception_returned;
- __ CompareRoot(r2, Heap::kExceptionRootIndex);
+ __ CompareRoot(r2, RootIndex::kException);
__ beq(&exception_returned, Label::kNear);
// Check that there is no pending exception, otherwise we
@@ -2450,7 +2426,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ Move(r1, pending_exception_address);
__ LoadP(r1, MemOperand(r1));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r1, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay, Label::kNear);
__ stop("Unexpected pending exception");
@@ -2511,10 +2487,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register.
- if (FLAG_branch_load_poisoning) {
- __ ResetSpeculationPoisonRegister();
- }
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
__ Move(r3, pending_handler_entrypoint_address);
@@ -2729,6 +2706,10 @@ namespace {
void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
ElementsKind kind) {
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
+
__ CmpLogicalP(r2, Operand(1));
__ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 93a2b8b5f3..630473f407 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -12,6 +12,7 @@
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/shared-function-info.h"
@@ -26,6 +27,7 @@ BUILTIN_LIST_C(FORWARD_DECLARE)
#undef FORWARD_DECLARE
namespace {
+
void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
const char* name) {
PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
@@ -48,10 +50,11 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
return options;
}
- CodeRange* code_range = isolate->heap()->memory_allocator()->code_range();
+ const base::AddressRegion& code_range =
+ isolate->heap()->memory_allocator()->code_range();
bool pc_relative_calls_fit_in_code_range =
- code_range->valid() &&
- code_range->size() <= kMaxPCRelativeCodeRangeInMB * MB;
+ !code_range.is_empty() &&
+ code_range.size() <= kMaxPCRelativeCodeRangeInMB * MB;
options.isolate_independent_code = true;
options.use_pc_relative_calls_and_jumps = pc_relative_calls_fit_in_code_range;
@@ -180,6 +183,7 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
+
} // anonymous namespace
// static
@@ -246,26 +250,36 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
}
}
-#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
namespace {
+
Code* GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
- const char* name, interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale) {
- if (!interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
- // TODO(v8:8068): Consider returning something else to avoid placeholders
- // being serialized with the snapshot.
- return nullptr;
- }
+ const char* name,
+ interpreter::OperandScale operand_scale,
+ interpreter::Bytecode bytecode) {
+ DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
Handle<Code> code = interpreter::GenerateBytecodeHandler(
- isolate, bytecode, operand_scale, builtin_index);
+ isolate, bytecode, operand_scale, builtin_index,
+ BuiltinAssemblerOptions(isolate, builtin_index));
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
+
+Code* GenerateLazyBytecodeHandler(Isolate* isolate, int builtin_index,
+ const char* name,
+ interpreter::OperandScale operand_scale) {
+ Handle<Code> code = interpreter::GenerateDeserializeLazyHandler(
+ isolate, operand_scale, builtin_index,
+ BuiltinAssemblerOptions(isolate, builtin_index));
+
+ PostBuildProfileAndTracing(isolate, *code, name);
+
+ return *code;
+}
+
} // namespace
-#endif
// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
@@ -309,19 +323,15 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
CallDescriptors::InterfaceDescriptor, #Name, 1); \
AddBuiltin(builtins, index++, code);
-#define BUILD_BCH_WITH_SCALE(Code, Scale) \
+#define BUILD_BCH(Name, OperandScale, Bytecode) \
code = GenerateBytecodeHandler(isolate, index, Builtins::name(index), \
- interpreter::Bytecode::k##Code, \
- interpreter::OperandScale::k##Scale); \
- if (code) { \
- AddBuiltin(builtins, index, code); \
- } \
- ++index;
-
-#define BUILD_BCH(Code, ...) \
- BUILD_BCH_WITH_SCALE(Code, Single) \
- BUILD_BCH_WITH_SCALE(Code, Double) \
- BUILD_BCH_WITH_SCALE(Code, Quadruple)
+ OperandScale, Bytecode); \
+ AddBuiltin(builtins, index++, code);
+
+#define BUILD_DLH(Name, OperandScale) \
+ code = GenerateLazyBytecodeHandler(isolate, index, Builtins::name(index), \
+ OperandScale); \
+ AddBuiltin(builtins, index++, code);
#define BUILD_ASM(Name) \
code = BuildWithMacroAssembler(isolate, index, Builtins::Generate_##Name, \
@@ -329,7 +339,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
AddBuiltin(builtins, index++, code);
BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH,
- BUILD_BCH, BUILD_ASM);
+ BUILD_BCH, BUILD_DLH, BUILD_ASM);
#undef BUILD_CPP
#undef BUILD_API
@@ -338,7 +348,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
#undef BUILD_TFS
#undef BUILD_TFH
#undef BUILD_BCH
-#undef BUILD_BCH_WITH_SCALE
+#undef BUILD_DLH
#undef BUILD_ASM
CHECK_EQ(Builtins::builtin_count, index);
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 7552b094e7..b3ff7dbca1 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -16,7 +16,7 @@ module typed_array {
type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object;
- macro KindForArrayType<T : type>(): constexpr ElementsKind;
+ macro KindForArrayType<T: type>(): constexpr ElementsKind;
KindForArrayType<FixedUint8Array>(): constexpr ElementsKind {
return UINT8_ELEMENTS;
}
@@ -51,17 +51,17 @@ module typed_array {
return BIGINT64_ELEMENTS;
}
- builtin LoadFixedElement<T : type>(
+ builtin LoadFixedElement<T: type>(
context: Context, array: JSTypedArray, index: Smi): Object {
return LoadFixedTypedArrayElementAsTagged(
array.data_ptr, index, KindForArrayType<T>(), SMI_PARAMETERS);
}
- builtin StoreFixedElement<T : type>(
+ builtin StoreFixedElement<T: type>(
context: Context, array: JSTypedArray, index: Smi,
value: Object): Object {
const elements: FixedTypedArrayBase =
- unsafe_cast<FixedTypedArrayBase>(array.elements);
+ UnsafeCast<FixedTypedArrayBase>(array.elements);
StoreFixedTypedArrayElementFromTagged(
context, elements, index, value, KindForArrayType<T>(), SMI_PARAMETERS);
return Undefined;
@@ -69,7 +69,8 @@ module typed_array {
macro CallCompareWithDetachedCheck(
context: Context, array: JSTypedArray, comparefn: Callable, a: Object,
- b: Object): Number labels Detached {
+ b: Object): Number
+ labels Detached {
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
const v: Number =
ToNumber_Inline(context, Call(context, comparefn, Undefined, a, b));
@@ -86,37 +87,37 @@ module typed_array {
// InsertionSort is used for smaller arrays.
macro TypedArrayInsertionSort(
- context: Context, array: JSTypedArray, from_arg: Smi, to_arg: Smi,
- comparefn: Callable, Load: LoadFn, Store: StoreFn)
+ context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi,
+ comparefn: Callable, load: LoadFn, store: StoreFn)
labels Detached {
- let from: Smi = from_arg;
- let to: Smi = to_arg;
+ let from: Smi = fromArg;
+ let to: Smi = toArg;
if (IsDetachedBuffer(array.buffer)) goto Detached;
for (let i: Smi = from + 1; i < to; ++i) {
- const element: Object = Load(context, array, i);
+ const element: Object = load(context, array, i);
let j: Smi = i - 1;
for (; j >= from; --j) {
- const tmp: Object = Load(context, array, j);
+ const tmp: Object = load(context, array, j);
const order: Number = CallCompareWithDetachedCheck(
context, array, comparefn, tmp, element) otherwise Detached;
if (order > 0) {
- Store(context, array, j + 1, tmp);
+ store(context, array, j + 1, tmp);
} else {
break;
}
}
- Store(context, array, j + 1, element);
+ store(context, array, j + 1, element);
}
}
macro TypedArrayQuickSortImpl(
- context: Context, array: JSTypedArray, from_arg: Smi, to_arg: Smi,
- comparefn: Callable, Load: LoadFn, Store: StoreFn)
+ context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi,
+ comparefn: Callable, load: LoadFn, store: StoreFn)
labels Detached {
- let from: Smi = from_arg;
- let to: Smi = to_arg;
+ let from: Smi = fromArg;
+ let to: Smi = toArg;
while (to - from > 1) {
if (to - from <= 10) {
@@ -124,21 +125,21 @@ module typed_array {
// Currently it does not make any difference when the
// benchmarks are run locally.
TypedArrayInsertionSort(
- context, array, from, to, comparefn, Load, Store)
- otherwise Detached;
+ context, array, from, to, comparefn, load, store)
+ otherwise Detached;
break;
}
- // TODO(szuend): Check if a more involved third_index calculation is
+ // TODO(szuend): Check if a more involved thirdIndex calculation is
// worth it for very large arrays.
- const third_index: Smi = from + ((to - from) >>> 1);
+ const thirdIndex: Smi = from + ((to - from) >>> 1);
if (IsDetachedBuffer(array.buffer)) goto Detached;
// Find a pivot as the median of first, last and middle element.
- let v0: Object = Load(context, array, from);
- let v1: Object = Load(context, array, to - 1);
- let v2: Object = Load(context, array, third_index);
+ let v0: Object = load(context, array, from);
+ let v1: Object = load(context, array, to - 1);
+ let v2: Object = load(context, array, thirdIndex);
const c01: Number = CallCompareWithDetachedCheck(
context, array, comparefn, v0, v1) otherwise Detached;
@@ -170,81 +171,82 @@ module typed_array {
}
// v0 <= v1 <= v2.
- Store(context, array, from, v0);
- Store(context, array, to - 1, v2);
+ store(context, array, from, v0);
+ store(context, array, to - 1, v2);
const pivot: Object = v1;
- let low_end: Smi = from + 1; // Upper bound of elems lower than pivot.
- let high_start: Smi = to - 1; // Lower bound of elems greater than pivot.
+ let lowEnd: Smi = from + 1; // Upper bound of elems lower than pivot.
+ let highStart: Smi = to - 1; // Lower bound of elems greater than pivot.
- let low_end_value: Object = Load(context, array, low_end);
- Store(context, array, third_index, low_end_value);
- Store(context, array, low_end, pivot);
+ let lowEndValue: Object = load(context, array, lowEnd);
+ store(context, array, thirdIndex, lowEndValue);
+ store(context, array, lowEnd, pivot);
- // From low_end to idx are elements equal to pivot.
- // From idx to high_start are elements that haven"t been compared yet.
- for (let idx: Smi = low_end + 1; idx < high_start; idx++) {
- let element: Object = Load(context, array, idx);
+ // From lowEnd to idx are elements equal to pivot.
+ // From idx to highStart are elements that haven"t been compared yet.
+ for (let idx: Smi = lowEnd + 1; idx < highStart; idx++) {
+ let element: Object = load(context, array, idx);
let order: Number = CallCompareWithDetachedCheck(
context, array, comparefn, element, pivot) otherwise Detached;
if (order < 0) {
- low_end_value = Load(context, array, low_end);
- Store(context, array, idx, low_end_value);
- Store(context, array, low_end, element);
- low_end++;
+ lowEndValue = load(context, array, lowEnd);
+ store(context, array, idx, lowEndValue);
+ store(context, array, lowEnd, element);
+ lowEnd++;
} else if (order > 0) {
- let break_for: bool = false;
+ let breakFor: bool = false;
while (order > 0) {
- high_start--;
- if (high_start == idx) {
- break_for = true;
+ highStart--;
+ if (highStart == idx) {
+ breakFor = true;
break;
}
- const top_elem: Object = Load(context, array, high_start);
+ const topElement: Object = load(context, array, highStart);
order = CallCompareWithDetachedCheck(
- context, array, comparefn, top_elem, pivot) otherwise Detached;
+ context, array, comparefn, topElement, pivot)
+ otherwise Detached;
}
- if (break_for) {
+ if (breakFor) {
break;
}
- const high_start_value: Object = Load(context, array, high_start);
- Store(context, array, idx, high_start_value);
- Store(context, array, high_start, element);
+ const highStartValue: Object = load(context, array, highStart);
+ store(context, array, idx, highStartValue);
+ store(context, array, highStart, element);
if (order < 0) {
- element = Load(context, array, idx);
+ element = load(context, array, idx);
- low_end_value = Load(context, array, low_end);
- Store(context, array, idx, low_end_value);
- Store(context, array, low_end, element);
- low_end++;
+ lowEndValue = load(context, array, lowEnd);
+ store(context, array, idx, lowEndValue);
+ store(context, array, lowEnd, element);
+ lowEnd++;
}
}
}
- if ((to - high_start) < (low_end - from)) {
+ if ((to - highStart) < (lowEnd - from)) {
TypedArrayQuickSort(
- context, array, high_start, to, comparefn, Load, Store);
- to = low_end;
+ context, array, highStart, to, comparefn, load, store);
+ to = lowEnd;
} else {
TypedArrayQuickSort(
- context, array, from, low_end, comparefn, Load, Store);
- from = high_start;
+ context, array, from, lowEnd, comparefn, load, store);
+ from = highStart;
}
}
}
builtin TypedArrayQuickSort(
context: Context, array: JSTypedArray, from: Smi, to: Smi,
- comparefn: Callable, Load: LoadFn, Store: StoreFn): JSTypedArray {
+ comparefn: Callable, load: LoadFn, store: StoreFn): JSTypedArray {
try {
- TypedArrayQuickSortImpl(context, array, from, to, comparefn, Load, Store)
- otherwise Detached;
+ TypedArrayQuickSortImpl(context, array, from, to, comparefn, load, store)
+ otherwise Detached;
}
label Detached {
ThrowTypeError(
@@ -258,10 +260,10 @@ module typed_array {
context: Context, receiver: Object, ...arguments): JSTypedArray {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
- const comparefn_obj: Object =
+ const comparefnObj: Object =
arguments.length > 0 ? arguments[0] : Undefined;
- if (comparefn_obj != Undefined && !TaggedIsCallable(comparefn_obj)) {
- ThrowTypeError(context, kBadSortComparisonFunction, comparefn_obj);
+ if (comparefnObj != Undefined && !TaggedIsCallable(comparefnObj)) {
+ ThrowTypeError(context, kBadSortComparisonFunction, comparefnObj);
}
// 2. Let obj be the this value.
@@ -273,7 +275,7 @@ module typed_array {
ValidateTypedArray(context, obj, '%TypedArray%.prototype.sort');
// Default sorting is done in C++ using std::sort
- if (comparefn_obj == Undefined) {
+ if (comparefnObj == Undefined) {
return TypedArraySortFast(context, obj);
}
@@ -282,48 +284,48 @@ module typed_array {
try {
const comparefn: Callable =
- cast<Callable>(comparefn_obj) otherwise CastError;
+ Cast<Callable>(comparefnObj) otherwise CastError;
let loadfn: LoadFn;
let storefn: StoreFn;
- let elements_kind: ElementsKind = array.elements_kind;
+ let elementsKind: ElementsKind = array.elements_kind;
- if (IsElementsKindGreaterThan(elements_kind, UINT32_ELEMENTS)) {
- if (elements_kind == INT32_ELEMENTS) {
+ if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
+ if (elementsKind == INT32_ELEMENTS) {
loadfn = LoadFixedElement<FixedInt32Array>;
storefn = StoreFixedElement<FixedInt32Array>;
- } else if (elements_kind == FLOAT32_ELEMENTS) {
+ } else if (elementsKind == FLOAT32_ELEMENTS) {
loadfn = LoadFixedElement<FixedFloat32Array>;
storefn = StoreFixedElement<FixedFloat32Array>;
- } else if (elements_kind == FLOAT64_ELEMENTS) {
+ } else if (elementsKind == FLOAT64_ELEMENTS) {
loadfn = LoadFixedElement<FixedFloat64Array>;
storefn = StoreFixedElement<FixedFloat64Array>;
- } else if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+ } else if (elementsKind == UINT8_CLAMPED_ELEMENTS) {
loadfn = LoadFixedElement<FixedUint8ClampedArray>;
storefn = StoreFixedElement<FixedUint8ClampedArray>;
- } else if (elements_kind == BIGUINT64_ELEMENTS) {
+ } else if (elementsKind == BIGUINT64_ELEMENTS) {
loadfn = LoadFixedElement<FixedBigUint64Array>;
storefn = StoreFixedElement<FixedBigUint64Array>;
- } else if (elements_kind == BIGINT64_ELEMENTS) {
+ } else if (elementsKind == BIGINT64_ELEMENTS) {
loadfn = LoadFixedElement<FixedBigInt64Array>;
storefn = StoreFixedElement<FixedBigInt64Array>;
} else {
unreachable;
}
} else {
- if (elements_kind == UINT8_ELEMENTS) {
+ if (elementsKind == UINT8_ELEMENTS) {
loadfn = LoadFixedElement<FixedUint8Array>;
storefn = StoreFixedElement<FixedUint8Array>;
- } else if (elements_kind == INT8_ELEMENTS) {
+ } else if (elementsKind == INT8_ELEMENTS) {
loadfn = LoadFixedElement<FixedInt8Array>;
storefn = StoreFixedElement<FixedInt8Array>;
- } else if (elements_kind == UINT16_ELEMENTS) {
+ } else if (elementsKind == UINT16_ELEMENTS) {
loadfn = LoadFixedElement<FixedUint16Array>;
storefn = StoreFixedElement<FixedUint16Array>;
- } else if (elements_kind == INT16_ELEMENTS) {
+ } else if (elementsKind == INT16_ELEMENTS) {
loadfn = LoadFixedElement<FixedInt16Array>;
storefn = StoreFixedElement<FixedInt16Array>;
- } else if (elements_kind == UINT32_ELEMENTS) {
+ } else if (elementsKind == UINT32_ELEMENTS) {
loadfn = LoadFixedElement<FixedUint32Array>;
storefn = StoreFixedElement<FixedUint32Array>;
} else {
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 5c2094105c..2bc7768417 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -87,7 +87,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(rcx);
// The receiver for the builtin/api call.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
@@ -135,6 +135,26 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ ret(0);
}
+
+void Generate_StackOverflowCheck(
+ MacroAssembler* masm, Register num_args, Register scratch,
+ Label* stack_overflow,
+ Label::Distance stack_overflow_distance = Label::kFar) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(kScratchRegister, RootIndex::kRealStackLimit);
+ __ movp(scratch, rsp);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ subp(scratch, kScratchRegister);
+ __ sarp(scratch, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(scratch, num_args);
+ // Signed comparison.
+ __ j(less_equal, stack_overflow, stack_overflow_distance);
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -157,7 +177,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(rsi);
__ Push(rcx);
__ Push(rdi);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ Push(rdx);
// ----------- S t a t e -------------
@@ -181,7 +201,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver);
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(rax, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
// -- rax implicit receiver
@@ -221,6 +241,21 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ // Check if we have enough stack space to push all arguments.
+ // Argument count in rax. Clobbers rcx.
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
+ __ jmp(&enough_stack_space, Label::kNear);
+
+ __ bind(&stack_overflow);
+ // Restore context from the frame.
+ __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+
+ __ bind(&enough_stack_space);
+
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ movp(rcx, rax);
@@ -269,8 +304,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label use_receiver, do_throw, leave_frame;
// If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &use_receiver,
- Label::kNear);
+ __ JumpIfRoot(rax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear);
// Otherwise we do a smi check and fall through to check if the return value
// is a valid receiver.
@@ -292,7 +326,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on-stack receiver as the result.
__ bind(&use_receiver);
__ movp(rax, Operand(rsp, 0 * kPointerSize));
- __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw, Label::kNear);
+ __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
__ bind(&leave_frame);
// Restore the arguments count.
@@ -317,25 +351,6 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-static void Generate_StackOverflowCheck(
- MacroAssembler* masm, Register num_args, Register scratch,
- Label* stack_overflow,
- Label::Distance stack_overflow_distance = Label::kFar) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(scratch, rsp);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ subp(scratch, kScratchRegister);
- __ sarp(scratch, Immediate(kPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpp(scratch, num_args);
- // Signed comparison.
- __ j(less_equal, stack_overflow, stack_overflow_distance);
-}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -533,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(rsp, RootIndex::kRealStackLimit);
__ j(below, &stack_overflow);
// Pop return address.
@@ -602,7 +617,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(rdx);
__ Push(rdi);
// Push hole as receiver since we do not use it for stepping.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
__ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
@@ -905,7 +920,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label ok;
__ movp(rax, rsp);
__ subp(rax, rcx);
- __ CompareRoot(rax, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(rax, RootIndex::kRealStackLimit);
__ j(above_equal, &ok, Label::kNear);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
@@ -913,7 +928,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(rax, RootIndex::kUndefinedValue);
__ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
@@ -937,7 +952,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
@@ -1026,7 +1041,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ decl(rcx); // Subtract one for receiver.
}
@@ -1251,7 +1266,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
rbp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
}
for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
}
if (j < 3) {
__ jmp(&args_done, Label::kNear);
@@ -1370,7 +1385,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
Label no_arg_array, no_this_arg;
StackArgumentsAccessor args(rsp, rax);
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(rdx, RootIndex::kUndefinedValue);
__ movp(rbx, rdx);
__ movp(rdi, args.GetReceiverOperand());
__ testp(rax, rax);
@@ -1402,9 +1417,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(rbx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &no_arguments,
- Label::kNear);
+ __ JumpIfRoot(rbx, RootIndex::kNullValue, &no_arguments, Label::kNear);
+ __ JumpIfRoot(rbx, RootIndex::kUndefinedValue, &no_arguments, Label::kNear);
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
@@ -1438,7 +1452,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ testp(rax, rax);
__ j(not_zero, &done, Label::kNear);
__ PopReturnAddressTo(rbx);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(rbx);
__ incp(rax);
__ bind(&done);
@@ -1488,7 +1502,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
{
Label done;
StackArgumentsAccessor args(rsp, rax);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movp(rdx, rdi);
__ movp(rbx, rdi);
__ cmpp(rax, Immediate(1));
@@ -1539,7 +1553,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
{
Label done;
StackArgumentsAccessor args(rsp, rax);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movp(rdx, rdi);
__ movp(rbx, rdi);
__ cmpp(rax, Immediate(1));
@@ -1554,7 +1568,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done);
__ PopReturnAddressTo(rcx);
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(rcx);
}
@@ -1601,7 +1615,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
@@ -1701,7 +1714,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill remaining expected arguments with undefined values.
Label fill;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
__ bind(&fill);
__ incp(r8);
__ Push(kScratchRegister);
@@ -1777,23 +1790,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&ok);
}
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(r8, rsp);
- // Make r8 the space we have left. The stack might already be overflowed
- // here which will cause r8 to become negative.
- __ subp(r8, kScratchRegister);
- __ sarp(r8, Immediate(kPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpp(r8, rcx);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
+ Label stack_overflow;
+ Generate_StackOverflowCheck(masm, rcx, r8, &stack_overflow, Label::kNear);
// Push additional arguments onto the stack.
{
@@ -1806,9 +1804,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Turn the hole into undefined as we go.
__ movp(r11,
FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
- __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r11, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
- __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r11, RootIndex::kUndefinedValue);
__ bind(&push);
__ Push(r11);
__ incl(r9);
@@ -1820,6 +1818,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
}
// static
@@ -1957,9 +1958,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(above_equal, &done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
- __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy, Label::kNear);
- __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object,
+ __ JumpIfRoot(rcx, RootIndex::kUndefinedValue, &convert_global_proxy,
+ Label::kNear);
+ __ JumpIfNotRoot(rcx, RootIndex::kNullValue, &convert_to_object,
Label::kNear);
__ bind(&convert_global_proxy);
{
@@ -2049,8 +2050,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex);
- __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ CompareRoot(rsp, RootIndex::kRealStackLimit);
+ __ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
__ leap(rsp, Operand(rsp, rbx, times_pointer_size, 0));
{
@@ -2183,7 +2184,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Calling convention for function specific ConstructStubs require
// rbx to contain either an AllocationSite or undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(rbx, RootIndex::kUndefinedValue);
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -2277,15 +2278,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2302,11 +2298,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ bind(&skip);
- // Drop any potential handler frame that is be sitting on top of the actual
+ // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ leave();
- }
+ __ leave();
// Load deoptimization data from the code object.
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
@@ -2326,14 +2320,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ ret(0);
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
__ Pop(r11);
@@ -2486,14 +2472,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Check result for exception sentinel.
Label exception_returned;
- __ CompareRoot(rax, Heap::kExceptionRootIndex);
+ __ CompareRoot(rax, RootIndex::kException);
__ j(equal, &exception_returned);
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
- __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r14, RootIndex::kTheHoleValue);
ExternalReference pending_exception_address = ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
@@ -2547,9 +2533,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ bind(&skip);
// Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
- // both configurations. It is safe to always do this, because the underlying
- // register is caller-saved and can be arbitrarily clobbered.
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
@@ -2789,6 +2775,9 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
RelocInfo::CODE_TARGET);
__ bind(&not_one_case);
+ // Load undefined into the allocation site parameter as required by
+ // ArrayNArgumentsConstructor.
+ __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/callable.h b/deps/v8/src/callable.h
index 3d9eb274b0..c24c9ae554 100644
--- a/deps/v8/src/callable.h
+++ b/deps/v8/src/callable.h
@@ -14,7 +14,7 @@ namespace internal {
class Code;
// Associates a body of code with an interface descriptor.
-class Callable final BASE_EMBEDDED {
+class Callable final {
public:
Callable(Handle<Code> code, CallInterfaceDescriptor descriptor)
: code_(code), descriptor_(descriptor) {}
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index 64ca681416..0ef3ca5a15 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -134,7 +134,6 @@ class V8_EXPORT_PRIVATE Cancelable {
DISALLOW_COPY_AND_ASSIGN(Cancelable);
};
-
// Multiple inheritance can be used because Task is a pure interface.
class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
NON_EXPORTED_BASE(public Task) {
@@ -155,6 +154,32 @@ class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
DISALLOW_COPY_AND_ASSIGN(CancelableTask);
};
+// TODO(clemensh): Use std::function and move implementation to cc file.
+template <typename Func>
+class CancelableLambdaTask final : public CancelableTask {
+ public:
+ CancelableLambdaTask(Isolate* isolate, Func func)
+ : CancelableTask(isolate), func_(std::move(func)) {}
+ CancelableLambdaTask(CancelableTaskManager* manager, Func func)
+ : CancelableTask(manager), func_(std::move(func)) {}
+ void RunInternal() final { func_(); }
+
+ private:
+ Func func_;
+};
+
+template <typename Func>
+std::unique_ptr<CancelableTask> MakeCancelableLambdaTask(Isolate* isolate,
+ Func func) {
+ return std::unique_ptr<CancelableTask>(
+ new CancelableLambdaTask<Func>(isolate, std::move(func)));
+}
+template <typename Func>
+std::unique_ptr<CancelableTask> MakeCancelableLambdaTask(
+ CancelableTaskManager* manager, Func func) {
+ return std::unique_ptr<CancelableTask>(
+ new CancelableLambdaTask<Func>(manager, std::move(func)));
+}
// Multiple inheritance can be used because IdleTask is a pure interface.
class CancelableIdleTask : public Cancelable, public IdleTask {
@@ -175,6 +200,33 @@ class CancelableIdleTask : public Cancelable, public IdleTask {
DISALLOW_COPY_AND_ASSIGN(CancelableIdleTask);
};
+template <typename Func>
+class CancelableIdleLambdaTask final : public CancelableIdleTask {
+ public:
+ CancelableIdleLambdaTask(Isolate* isolate, Func func)
+ : CancelableIdleTask(isolate), func_(std::move(func)) {}
+ CancelableIdleLambdaTask(CancelableTaskManager* manager, Func func)
+ : CancelableIdleTask(manager), func_(std::move(func)) {}
+ void RunInternal(double deadline_in_seconds) final {
+ func_(deadline_in_seconds);
+ }
+
+ private:
+ Func func_;
+};
+
+template <typename Func>
+std::unique_ptr<CancelableIdleTask> MakeCancelableIdleLambdaTask(
+ Isolate* isolate, Func func) {
+ return std::unique_ptr<CancelableIdleTask>(
+ new CancelableIdleLambdaTask<Func>(isolate, std::move(func)));
+}
+template <typename Func>
+std::unique_ptr<CancelableIdleTask> MakeCancelableIdleLambdaTask(
+ CancelableTaskManager* manager, Func func) {
+ return std::unique_ptr<CancelableIdleTask>(
+ new CancelableIdleLambdaTask<Func>(manager, std::move(func)));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index 7e198d5808..3662514bca 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -18,23 +18,14 @@ inline int AsciiAlphaToLower(uc32 c) {
return c | 0x20;
}
-
inline bool IsCarriageReturn(uc32 c) {
return c == 0x000D;
}
-
inline bool IsLineFeed(uc32 c) {
return c == 0x000A;
}
-
-inline bool IsInRange(int value, int lower_limit, int higher_limit) {
- DCHECK(lower_limit <= higher_limit);
- return static_cast<unsigned int>(value - lower_limit) <=
- static_cast<unsigned int>(higher_limit - lower_limit);
-}
-
inline bool IsAsciiIdentifier(uc32 c) {
return IsAlphaNumeric(c) || c == '$' || c == '_';
}
@@ -58,6 +49,8 @@ inline bool IsOctalDigit(uc32 c) {
return IsInRange(c, '0', '7');
}
+inline bool IsNonOctalDecimalDigit(uc32 c) { return IsInRange(c, '8', '9'); }
+
inline bool IsBinaryDigit(uc32 c) {
// ECMA-262, 6th, 7.8.3
return c == '0' || c == '1';
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 8eb10f43dd..4a0afa4f57 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -5,7 +5,7 @@
#ifndef V8_CHECKS_H_
#define V8_CHECKS_H_
-#include "include/v8.h"
+#include "include/v8-internal.h"
#include "src/base/logging.h"
#include "src/globals.h"
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index ec07a2e107..07a883be0d 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -66,7 +66,7 @@ class CodeEventListener {
};
#undef DECLARE_ENUM
- virtual ~CodeEventListener() {}
+ virtual ~CodeEventListener() = default;
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
const char* comment) = 0;
@@ -98,7 +98,7 @@ class CodeEventDispatcher {
public:
using LogEventsAndTags = CodeEventListener::LogEventsAndTags;
- CodeEventDispatcher() {}
+ CodeEventDispatcher() = default;
bool AddListener(CodeEventListener* listener) {
base::LockGuard<base::Mutex> guard(&mutex_);
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index b6eb03f81b..cffb16b7d4 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -166,27 +166,15 @@ Callable CodeFactory::OrdinaryToPrimitive(Isolate* isolate,
}
// static
-Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
- PretenureFlag pretenure_flag) {
- if (pretenure_flag == NOT_TENURED) {
- switch (flags) {
- case STRING_ADD_CHECK_NONE:
- return Builtins::CallableFor(isolate,
- Builtins::kStringAdd_CheckNone_NotTenured);
- case STRING_ADD_CONVERT_LEFT:
- return Builtins::CallableFor(
- isolate, Builtins::kStringAdd_ConvertLeft_NotTenured);
- case STRING_ADD_CONVERT_RIGHT:
- return Builtins::CallableFor(
- isolate, Builtins::kStringAdd_ConvertRight_NotTenured);
- }
- } else {
- CHECK_EQ(TENURED, pretenure_flag);
- CHECK_EQ(STRING_ADD_CHECK_NONE, flags);
- return Builtins::CallableFor(isolate,
- Builtins::kStringAdd_CheckNone_Tenured);
+Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags) {
+ switch (flags) {
+ case STRING_ADD_CHECK_NONE:
+ return Builtins::CallableFor(isolate, Builtins::kStringAdd_CheckNone);
+ case STRING_ADD_CONVERT_LEFT:
+ return Builtins::CallableFor(isolate, Builtins::kStringAdd_ConvertLeft);
+ case STRING_ADD_CONVERT_RIGHT:
+ return Builtins::CallableFor(isolate, Builtins::kStringAdd_ConvertRight);
}
-
UNREACHABLE();
}
@@ -218,7 +206,7 @@ Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
// static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, ArgumentsAdaptorTrampoline),
- ArgumentAdaptorDescriptor{});
+ ArgumentsAdaptorDescriptor{});
}
// static
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index cba6136a38..3e8bc3790c 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -59,8 +59,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
OrdinaryToPrimitiveHint hint);
static Callable StringAdd(Isolate* isolate,
- StringAddFlags flags = STRING_ADD_CHECK_NONE,
- PretenureFlag pretenure_flag = NOT_TENURED);
+ StringAddFlags flags = STRING_ADD_CHECK_NONE);
static Callable FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 5bd780b520..e307ca5cc3 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -225,7 +225,7 @@ TNode<Object> CodeStubAssembler::NoContextConstant() {
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_reference<decltype( \
*std::declval<Heap>().rootAccessorName())>::type>( \
- LoadRoot(Heap::k##rootIndexName##RootIndex)); \
+ LoadRoot(RootIndex::k##rootIndexName)); \
}
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
@@ -236,7 +236,7 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_reference<decltype( \
*std::declval<ReadOnlyRoots>().rootAccessorName())>::type>( \
- LoadRoot(Heap::k##rootIndexName##RootIndex)); \
+ LoadRoot(RootIndex::k##rootIndexName)); \
}
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
@@ -906,6 +906,17 @@ TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor,
return SmiFromInt32(untagged_result);
}
+TNode<Smi> CodeStubAssembler::SmiLexicographicCompare(TNode<Smi> x,
+ TNode<Smi> y) {
+ TNode<ExternalReference> smi_lexicographic_compare =
+ ExternalConstant(ExternalReference::smi_lexicographic_compare_function());
+ TNode<ExternalReference> isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ return CAST(CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ smi_lexicographic_compare, isolate_ptr, x, y));
+}
+
TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
SloppyTNode<IntPtrT> value) {
if (Is64()) {
@@ -962,9 +973,9 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
CSA_SLOW_ASSERT(this, IsMap(receiver_map));
VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map);
Label loop_body(this, &var_map);
- Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray);
Node* empty_slow_element_dictionary =
- LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
+ LoadRoot(RootIndex::kEmptySlowElementDictionary);
Goto(&loop_body);
BIND(&loop_body);
@@ -1039,7 +1050,7 @@ TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
{
// Check that the Array.prototype hasn't been modified in a way that would
// affect iteration.
- Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
+ Node* protector_cell = LoadRoot(RootIndex::kArrayIteratorProtector);
DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
var_result =
WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
@@ -1098,6 +1109,19 @@ void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
#endif
}
+void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects(
+ Label* if_true) {
+ STATIC_ASSERT(sizeof(DebugInfo::ExecutionMode) >= sizeof(int32_t));
+
+ TNode<ExternalReference> execution_mode_address = ExternalConstant(
+ ExternalReference::debug_execution_mode_address(isolate()));
+ TNode<Int32T> execution_mode =
+ UncheckedCast<Int32T>(Load(MachineType::Int32(), execution_mode_address));
+
+ GotoIf(Word32Equal(execution_mode, Int32Constant(DebugInfo::kSideEffects)),
+ if_true);
+}
+
Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address) {
// TODO(jgruber, chromium:848672): TNodeify AllocateRaw.
@@ -1192,7 +1216,7 @@ Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
BIND(&needs_filler);
// Store a filler and increase the address by kPointerSize.
StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
- LoadRoot(Heap::kOnePointerFillerMapRootIndex));
+ LoadRoot(RootIndex::kOnePointerFillerMap));
address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4)));
Goto(&done_filling);
@@ -1370,14 +1394,14 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType rep) {
- CSA_ASSERT(this, IsStrongHeapObject(object));
+ CSA_ASSERT(this, IsStrong(object));
return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
MachineType rep) {
- CSA_ASSERT(this, IsStrongHeapObject(object));
+ CSA_ASSERT(this, IsStrong(object));
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
@@ -1423,19 +1447,19 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32Root(
- Heap::RootListIndex root_index) {
+ RootIndex root_index) {
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
- int index = root_index * kPointerSize;
+ int offset = static_cast<int>(root_index) * kPointerSize;
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
- index += kPointerSize / 2;
+ offset += kPointerSize / 2;
#endif
return UncheckedCast<Int32T>(
- Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index)));
+ Load(MachineType::Int32(), roots_array_start, IntPtrConstant(offset)));
} else {
return SmiToInt32(Load(MachineType::AnyTagged(), roots_array_start,
- IntPtrConstant(index)));
+ IntPtrConstant(offset)));
}
}
@@ -1521,6 +1545,11 @@ TNode<Number> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
return CAST(LoadObjectField(array, JSArray::kLengthOffset));
}
+TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectWithLength(
+ SloppyTNode<JSArgumentsObjectWithLength> array) {
+ return LoadObjectField(array, JSArgumentsObjectWithLength::kLengthOffset);
+}
+
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
SloppyTNode<JSArray> array) {
TNode<Object> length = LoadJSArrayLength(array);
@@ -1557,11 +1586,6 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength(
return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset);
}
-TNode<Smi> CodeStubAssembler::LoadTypedArrayLength(
- TNode<JSTypedArray> typed_array) {
- return CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset));
-}
-
TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Int32T>(
@@ -1620,7 +1644,7 @@ TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
BIND(&if_strong_heap_object);
GotoIfNot(WordEqual(LoadMap(CAST(prototype_info.value())),
- LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
+ LoadRoot(RootIndex::kPrototypeInfoMap)),
if_no_proto_info);
return CAST(prototype_info.value());
}
@@ -1686,6 +1710,19 @@ TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
[=] { return UndefinedConstant(); });
}
+TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties(
+ TNode<Map> map, TNode<Int32T> instance_type, Label* bailout) {
+ // This check can have false positives, since it applies to any JSValueType.
+ GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout);
+
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask |
+ Map::HasHiddenPrototypeBit::kMask),
+ bailout);
+
+ return bit_field3;
+}
+
TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
SloppyTNode<Object> receiver, Label* if_no_hash) {
TVARIABLE(IntPtrT, var_hash);
@@ -1758,16 +1795,20 @@ TNode<Uint32T> CodeStubAssembler::LoadNameHash(SloppyTNode<Name> name,
return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift)));
}
+TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(
+ SloppyTNode<String> string) {
+ return SmiFromIntPtr(LoadStringLengthAsWord(string));
+}
+
TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(
- SloppyTNode<String> object) {
- return SmiUntag(LoadStringLengthAsSmi(object));
+ SloppyTNode<String> string) {
+ return Signed(ChangeUint32ToWord(LoadStringLengthAsWord32(string)));
}
-TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(
- SloppyTNode<String> object) {
- CSA_ASSERT(this, IsString(object));
- return CAST(LoadObjectField(object, String::kLengthOffset,
- MachineType::TaggedPointer()));
+TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32(
+ SloppyTNode<String> string) {
+ CSA_ASSERT(this, IsString(string));
+ return LoadObjectField<Uint32T>(string, String::kLengthOffset);
}
Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
@@ -1817,49 +1858,46 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Goto(if_strong);
}
-TNode<BoolT> CodeStubAssembler::IsStrongHeapObject(TNode<MaybeObject> value) {
+TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) {
return WordEqual(WordAnd(BitcastMaybeObjectToWord(value),
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kHeapObjectTag));
}
-TNode<HeapObject> CodeStubAssembler::ToStrongHeapObject(
+TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong(
TNode<MaybeObject> value, Label* if_not_strong) {
- GotoIfNot(IsStrongHeapObject(value), if_not_strong);
+ GotoIfNot(IsStrong(value), if_not_strong);
return CAST(value);
}
-TNode<BoolT> CodeStubAssembler::IsWeakOrClearedHeapObject(
- TNode<MaybeObject> value) {
+TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) {
return WordEqual(WordAnd(BitcastMaybeObjectToWord(value),
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kWeakHeapObjectTag));
}
-TNode<BoolT> CodeStubAssembler::IsClearedWeakHeapObject(
- TNode<MaybeObject> value) {
+TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
return WordEqual(BitcastMaybeObjectToWord(value),
IntPtrConstant(kClearedWeakHeapObject));
}
-TNode<BoolT> CodeStubAssembler::IsNotClearedWeakHeapObject(
- TNode<MaybeObject> value) {
+TNode<BoolT> CodeStubAssembler::IsNotCleared(TNode<MaybeObject> value) {
return WordNotEqual(BitcastMaybeObjectToWord(value),
IntPtrConstant(kClearedWeakHeapObject));
}
-TNode<HeapObject> CodeStubAssembler::ToWeakHeapObject(
+TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
TNode<MaybeObject> value) {
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(value));
- CSA_ASSERT(this, IsNotClearedWeakHeapObject(value));
+ CSA_ASSERT(this, IsWeakOrCleared(value));
+ CSA_ASSERT(this, IsNotCleared(value));
return UncheckedCast<HeapObject>(BitcastWordToTagged(WordAnd(
BitcastMaybeObjectToWord(value), IntPtrConstant(~kWeakHeapObjectMask))));
}
-TNode<HeapObject> CodeStubAssembler::ToWeakHeapObject(TNode<MaybeObject> value,
- Label* if_cleared) {
- GotoIf(IsClearedWeakHeapObject(value), if_cleared);
- return ToWeakHeapObject(value);
+TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
+ TNode<MaybeObject> value, Label* if_cleared) {
+ GotoIf(IsCleared(value), if_cleared);
+ return GetHeapObjectAssumeWeak(value);
}
TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object,
@@ -1999,108 +2037,88 @@ TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
Node* data_pointer, Node* offset) {
- TVARIABLE(BigInt, var_result);
- Label done(this), if_zero(this);
if (Is64()) {
TNode<IntPtrT> value = UncheckedCast<IntPtrT>(
Load(MachineType::IntPtr(), data_pointer, offset));
- Label if_positive(this), if_negative(this);
- GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
- var_result = AllocateRawBigInt(IntPtrConstant(1));
- Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive,
- &if_negative);
-
- BIND(&if_positive);
- {
- StoreBigIntBitfield(var_result.value(),
- IntPtrConstant(BigInt::SignBits::encode(false) |
- BigInt::LengthBits::encode(1)));
- StoreBigIntDigit(var_result.value(), 0, Unsigned(value));
- Goto(&done);
- }
-
- BIND(&if_negative);
- {
- StoreBigIntBitfield(var_result.value(),
- IntPtrConstant(BigInt::SignBits::encode(true) |
- BigInt::LengthBits::encode(1)));
- StoreBigIntDigit(var_result.value(), 0,
- Unsigned(IntPtrSub(IntPtrConstant(0), value)));
- Goto(&done);
- }
+ return BigIntFromInt64(value);
} else {
DCHECK(!Is64());
- TVARIABLE(WordT, var_sign, IntPtrConstant(BigInt::SignBits::encode(false)));
- TVARIABLE(IntPtrT, var_low);
- TVARIABLE(IntPtrT, var_high);
#if defined(V8_TARGET_BIG_ENDIAN)
- var_high = UncheckedCast<IntPtrT>(
+ TNode<IntPtrT> high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
- var_low = UncheckedCast<IntPtrT>(
+ TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
#else
- var_low = UncheckedCast<IntPtrT>(
+ TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
- var_high = UncheckedCast<IntPtrT>(
+ TNode<IntPtrT> high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
#endif
+ return BigIntFromInt32Pair(low, high);
+ }
+}
- Label high_zero(this), negative(this), allocate_one_digit(this),
- allocate_two_digits(this);
-
- GotoIf(WordEqual(var_high.value(), IntPtrConstant(0)), &high_zero);
- Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative,
+TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
+ TNode<IntPtrT> high) {
+ DCHECK(!Is64());
+ TVARIABLE(BigInt, var_result);
+ TVARIABLE(WordT, var_sign, IntPtrConstant(BigInt::SignBits::encode(false)));
+ TVARIABLE(IntPtrT, var_high, high);
+ TVARIABLE(IntPtrT, var_low, low);
+ Label high_zero(this), negative(this), allocate_one_digit(this),
+ allocate_two_digits(this), if_zero(this), done(this);
+
+ GotoIf(WordEqual(var_high.value(), IntPtrConstant(0)), &high_zero);
+ Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative,
+ &allocate_two_digits);
+
+ BIND(&high_zero);
+ Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &if_zero,
+ &allocate_one_digit);
+
+ BIND(&negative);
+ {
+ var_sign = IntPtrConstant(BigInt::SignBits::encode(true));
+ // We must negate the value by computing "0 - (high|low)", performing
+ // both parts of the subtraction separately and manually taking care
+ // of the carry bit (which is 1 iff low != 0).
+ var_high = IntPtrSub(IntPtrConstant(0), var_high.value());
+ Label carry(this), no_carry(this);
+ Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry);
+ BIND(&carry);
+ var_high = IntPtrSub(var_high.value(), IntPtrConstant(1));
+ Goto(&no_carry);
+ BIND(&no_carry);
+ var_low = IntPtrSub(IntPtrConstant(0), var_low.value());
+ // var_high was non-zero going into this block, but subtracting the
+ // carry bit from it could bring us back onto the "one digit" path.
+ Branch(WordEqual(var_high.value(), IntPtrConstant(0)), &allocate_one_digit,
&allocate_two_digits);
+ }
- BIND(&high_zero);
- Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &if_zero,
- &allocate_one_digit);
-
- BIND(&negative);
- {
- var_sign = IntPtrConstant(BigInt::SignBits::encode(true));
- // We must negate the value by computing "0 - (high|low)", performing
- // both parts of the subtraction separately and manually taking care
- // of the carry bit (which is 1 iff low != 0).
- var_high = IntPtrSub(IntPtrConstant(0), var_high.value());
- Label carry(this), no_carry(this);
- Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry);
- BIND(&carry);
- var_high = IntPtrSub(var_high.value(), IntPtrConstant(1));
- Goto(&no_carry);
- BIND(&no_carry);
- var_low = IntPtrSub(IntPtrConstant(0), var_low.value());
- // var_high was non-zero going into this block, but subtracting the
- // carry bit from it could bring us back onto the "one digit" path.
- Branch(WordEqual(var_high.value(), IntPtrConstant(0)),
- &allocate_one_digit, &allocate_two_digits);
- }
-
- BIND(&allocate_one_digit);
- {
- var_result = AllocateRawBigInt(IntPtrConstant(1));
- StoreBigIntBitfield(
- var_result.value(),
- WordOr(var_sign.value(),
- IntPtrConstant(BigInt::LengthBits::encode(1))));
- StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
- Goto(&done);
- }
+ BIND(&allocate_one_digit);
+ {
+ var_result = AllocateRawBigInt(IntPtrConstant(1));
+ StoreBigIntBitfield(var_result.value(),
+ WordOr(var_sign.value(),
+ IntPtrConstant(BigInt::LengthBits::encode(1))));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
+ Goto(&done);
+ }
- BIND(&allocate_two_digits);
- {
- var_result = AllocateRawBigInt(IntPtrConstant(2));
- StoreBigIntBitfield(
- var_result.value(),
- WordOr(var_sign.value(),
- IntPtrConstant(BigInt::LengthBits::encode(2))));
- StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
- StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value()));
- Goto(&done);
- }
+ BIND(&allocate_two_digits);
+ {
+ var_result = AllocateRawBigInt(IntPtrConstant(2));
+ StoreBigIntBitfield(var_result.value(),
+ WordOr(var_sign.value(),
+ IntPtrConstant(BigInt::LengthBits::encode(2))));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
+ StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value()));
+ Goto(&done);
}
+
BIND(&if_zero);
var_result = AllocateBigInt(IntPtrConstant(0));
Goto(&done);
@@ -2109,21 +2127,53 @@ Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
return var_result.value();
}
+TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
+ DCHECK(Is64());
+ TVARIABLE(BigInt, var_result);
+ Label done(this), if_positive(this), if_negative(this), if_zero(this);
+ GotoIf(WordEqual(value, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateRawBigInt(IntPtrConstant(1));
+ Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive,
+ &if_negative);
+
+ BIND(&if_positive);
+ {
+ StoreBigIntBitfield(var_result.value(),
+ IntPtrConstant(BigInt::SignBits::encode(false) |
+ BigInt::LengthBits::encode(1)));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(value));
+ Goto(&done);
+ }
+
+ BIND(&if_negative);
+ {
+ StoreBigIntBitfield(var_result.value(),
+ IntPtrConstant(BigInt::SignBits::encode(true) |
+ BigInt::LengthBits::encode(1)));
+ StoreBigIntDigit(var_result.value(), 0,
+ Unsigned(IntPtrSub(IntPtrConstant(0), value)));
+ Goto(&done);
+ }
+
+ BIND(&if_zero);
+ {
+ var_result = AllocateBigInt(IntPtrConstant(0));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
Node* data_pointer, Node* offset) {
- TVARIABLE(BigInt, var_result);
Label if_zero(this), done(this);
if (Is64()) {
TNode<UintPtrT> value = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
- GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
- var_result = AllocateBigInt(IntPtrConstant(1));
- StoreBigIntDigit(var_result.value(), 0, value);
- Goto(&done);
+ return BigIntFromUint64(value);
} else {
DCHECK(!Is64());
- Label high_zero(this);
-
#if defined(V8_TARGET_BIG_ENDIAN)
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
@@ -2137,19 +2187,28 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
#endif
+ return BigIntFromUint32Pair(low, high);
+ }
+}
- GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero);
- var_result = AllocateBigInt(IntPtrConstant(2));
- StoreBigIntDigit(var_result.value(), 0, low);
- StoreBigIntDigit(var_result.value(), 1, high);
- Goto(&done);
+TNode<BigInt> CodeStubAssembler::BigIntFromUint32Pair(TNode<UintPtrT> low,
+ TNode<UintPtrT> high) {
+ DCHECK(!Is64());
+ TVARIABLE(BigInt, var_result);
+ Label high_zero(this), if_zero(this), done(this);
+
+ GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero);
+ var_result = AllocateBigInt(IntPtrConstant(2));
+ StoreBigIntDigit(var_result.value(), 0, low);
+ StoreBigIntDigit(var_result.value(), 1, high);
+ Goto(&done);
+
+ BIND(&high_zero);
+ GotoIf(WordEqual(low, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(1));
+ StoreBigIntDigit(var_result.value(), 0, low);
+ Goto(&done);
- BIND(&high_zero);
- GotoIf(WordEqual(low, IntPtrConstant(0)), &if_zero);
- var_result = AllocateBigInt(IntPtrConstant(1));
- StoreBigIntDigit(var_result.value(), 0, low);
- Goto(&done);
- }
BIND(&if_zero);
var_result = AllocateBigInt(IntPtrConstant(0));
Goto(&done);
@@ -2158,6 +2217,22 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
return var_result.value();
}
+TNode<BigInt> CodeStubAssembler::BigIntFromUint64(TNode<UintPtrT> value) {
+ DCHECK(Is64());
+ TVARIABLE(BigInt, var_result);
+ Label done(this), if_zero(this);
+ GotoIf(WordEqual(value, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(1));
+ StoreBigIntDigit(var_result.value(), 0, value);
+ Goto(&done);
+
+ BIND(&if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(0));
+ Goto(&done);
+ BIND(&done);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
@@ -2452,8 +2527,16 @@ TNode<Object> CodeStubAssembler::LoadContextElement(
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
Node* offset =
- IntPtrAdd(TimesPointerSize(slot_index),
- IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+ ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS,
+ Context::kHeaderSize - kHeapObjectTag);
+ return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
+}
+
+TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context,
+ TNode<Smi> slot_index) {
+ Node* offset =
+ ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, SMI_PARAMETERS,
+ Context::kHeaderSize - kHeapObjectTag);
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
@@ -2488,7 +2571,7 @@ TNode<Context> CodeStubAssembler::LoadNativeContext(
TNode<Context> CodeStubAssembler::LoadModuleContext(
SloppyTNode<Context> context) {
- Node* module_map = LoadRoot(Heap::kModuleContextMapRootIndex);
+ Node* module_map = LoadRoot(RootIndex::kModuleContextMap);
Variable cur_context(this, MachineRepresentation::kTaggedPointer);
cur_context.Bind(context);
@@ -2671,8 +2754,8 @@ Node* CodeStubAssembler::StoreMap(Node* object, Node* map) {
object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
}
-Node* CodeStubAssembler::StoreMapNoWriteBarrier(
- Node* object, Heap::RootListIndex map_root_index) {
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object,
+ RootIndex map_root_index) {
return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index));
}
@@ -2684,7 +2767,7 @@ Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
}
Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
- Heap::RootListIndex root_index) {
+ RootIndex root_index) {
if (Heap::RootIsImmortalImmovable(root_index)) {
return StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
} else {
@@ -2800,7 +2883,7 @@ void CodeStubAssembler::EnsureArrayLengthWritable(TNode<Map> map,
TNode<Name> maybe_length = CAST(LoadWeakFixedArrayElement(
descriptors, DescriptorArray::ToKeyIndex(length_index)));
CSA_ASSERT(this,
- WordEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)));
+ WordEqual(maybe_length, LoadRoot(RootIndex::klength_string)));
#endif
TNode<Uint32T> details = LoadDetailsByKeyIndex(
@@ -2941,7 +3024,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
Node* CodeStubAssembler::AllocateCellWithValue(Node* value,
WriteBarrierMode mode) {
Node* result = Allocate(Cell::kSize, kNone);
- StoreMapNoWriteBarrier(result, Heap::kCellMapRootIndex);
+ StoreMapNoWriteBarrier(result, RootIndex::kCellMap);
StoreCellValue(result, value, mode);
return result;
}
@@ -2965,7 +3048,7 @@ Node* CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
Node* result = Allocate(HeapNumber::kSize, kNone);
- Heap::RootListIndex heap_map_index = Heap::kHeapNumberMapRootIndex;
+ RootIndex heap_map_index = RootIndex::kHeapNumberMap;
StoreMapNoWriteBarrier(result, heap_map_index);
return UncheckedCast<HeapNumber>(result);
}
@@ -2979,7 +3062,7 @@ TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
TNode<MutableHeapNumber> CodeStubAssembler::AllocateMutableHeapNumber() {
Node* result = Allocate(MutableHeapNumber::kSize, kNone);
- Heap::RootListIndex heap_map_index = Heap::kMutableHeapNumberMapRootIndex;
+ RootIndex heap_map_index = RootIndex::kMutableHeapNumberMap;
StoreMapNoWriteBarrier(result, heap_map_index);
return UncheckedCast<MutableHeapNumber>(result);
}
@@ -3005,7 +3088,7 @@ TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
TNode<IntPtrT> size = IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
Signed(WordShl(length, kPointerSizeLog2)));
Node* raw_result = Allocate(size, kNone);
- StoreMapNoWriteBarrier(raw_result, Heap::kBigIntMapRootIndex);
+ StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
return UncheckedCast<BigInt>(raw_result);
}
@@ -3035,20 +3118,20 @@ TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
}
TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
- int length, AllocationFlags flags) {
+ uint32_t length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
if (length == 0) {
- return CAST(LoadRoot(Heap::kempty_stringRootIndex));
+ return CAST(LoadRoot(RootIndex::kempty_string));
}
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
- StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kOneByteStringMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
- SmiConstant(length),
- MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
- IntPtrConstant(String::kEmptyHashField),
- MachineType::PointerRepresentation());
+ Uint32Constant(length),
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
return CAST(result);
}
@@ -3059,7 +3142,7 @@ TNode<BoolT> CodeStubAssembler::IsZeroOrContext(SloppyTNode<Object> object) {
}
TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
- Node* context, TNode<Smi> length, AllocationFlags flags) {
+ Node* context, TNode<Uint32T> length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
CSA_SLOW_ASSERT(this, IsZeroOrContext(context));
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -3067,10 +3150,10 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
// Compute the SeqOneByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
- GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero);
+ GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero);
Node* raw_size = GetArrayAllocationSize(
- SmiUntag(length), UINT8_ELEMENTS, INTPTR_PARAMETERS,
+ Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
@@ -3080,13 +3163,13 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
{
// Just allocate the SeqOneByteString in new space.
Node* result = AllocateInNewSpace(size, flags);
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
- StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kOneByteStringMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
- length, MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
- IntPtrConstant(String::kEmptyHashField),
- MachineType::PointerRepresentation());
+ length, MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
var_result.Bind(result);
Goto(&if_join);
}
@@ -3094,15 +3177,15 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
BIND(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result =
- CallRuntime(Runtime::kAllocateSeqOneByteString, context, length);
+ Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+ ChangeUint32ToTagged(length));
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_lengthiszero);
{
- var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
+ var_result.Bind(LoadRoot(RootIndex::kempty_string));
Goto(&if_join);
}
@@ -3111,25 +3194,25 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
}
TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
- int length, AllocationFlags flags) {
+ uint32_t length, AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
if (length == 0) {
- return CAST(LoadRoot(Heap::kempty_stringRootIndex));
+ return CAST(LoadRoot(RootIndex::kempty_string));
}
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
- StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kStringMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
- SmiConstant(Smi::FromInt(length)),
- MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
- IntPtrConstant(String::kEmptyHashField),
- MachineType::PointerRepresentation());
+ Uint32Constant(length),
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
return CAST(result);
}
TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
- Node* context, TNode<Smi> length, AllocationFlags flags) {
+ Node* context, TNode<Uint32T> length, AllocationFlags flags) {
CSA_SLOW_ASSERT(this, IsZeroOrContext(context));
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -3137,10 +3220,10 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
// Compute the SeqTwoByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
- GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero);
+ GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero);
Node* raw_size = GetArrayAllocationSize(
- SmiUntag(length), UINT16_ELEMENTS, INTPTR_PARAMETERS,
+ Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
@@ -3150,13 +3233,13 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
{
// Just allocate the SeqTwoByteString in new space.
Node* result = AllocateInNewSpace(size, flags);
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
- StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kStringMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
- length, MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
- IntPtrConstant(String::kEmptyHashField),
- MachineType::PointerRepresentation());
+ length, MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
var_result.Bind(result);
Goto(&if_join);
}
@@ -3164,15 +3247,15 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
BIND(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result =
- CallRuntime(Runtime::kAllocateSeqTwoByteString, context, length);
+ Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
+ ChangeUint32ToTagged(length));
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_lengthiszero);
{
- var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
+ var_result.Bind(LoadRoot(RootIndex::kempty_string));
Goto(&if_join);
}
@@ -3180,19 +3263,20 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
return CAST(var_result.value());
}
-TNode<String> CodeStubAssembler::AllocateSlicedString(
- Heap::RootListIndex map_root_index, TNode<Smi> length, TNode<String> parent,
- TNode<Smi> offset) {
- DCHECK(map_root_index == Heap::kSlicedOneByteStringMapRootIndex ||
- map_root_index == Heap::kSlicedStringMapRootIndex);
+TNode<String> CodeStubAssembler::AllocateSlicedString(RootIndex map_root_index,
+ TNode<Uint32T> length,
+ TNode<String> parent,
+ TNode<Smi> offset) {
+ DCHECK(map_root_index == RootIndex::kSlicedOneByteStringMap ||
+ map_root_index == RootIndex::kSlicedStringMap);
Node* result = Allocate(SlicedString::kSize);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
- MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot,
- IntPtrConstant(String::kEmptyHashField),
- MachineType::PointerRepresentation());
+ MachineRepresentation::kWord32);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
@@ -3201,30 +3285,32 @@ TNode<String> CodeStubAssembler::AllocateSlicedString(
}
TNode<String> CodeStubAssembler::AllocateSlicedOneByteString(
- TNode<Smi> length, TNode<String> parent, TNode<Smi> offset) {
- return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
+ TNode<Uint32T> length, TNode<String> parent, TNode<Smi> offset) {
+ return AllocateSlicedString(RootIndex::kSlicedOneByteStringMap, length,
parent, offset);
}
TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString(
- TNode<Smi> length, TNode<String> parent, TNode<Smi> offset) {
- return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
+ TNode<Uint32T> length, TNode<String> parent, TNode<Smi> offset) {
+ return AllocateSlicedString(RootIndex::kSlicedStringMap, length, parent,
offset);
}
-TNode<String> CodeStubAssembler::AllocateConsString(
- Heap::RootListIndex map_root_index, TNode<Smi> length, TNode<String> first,
- TNode<String> second, AllocationFlags flags) {
- DCHECK(map_root_index == Heap::kConsOneByteStringMapRootIndex ||
- map_root_index == Heap::kConsStringMapRootIndex);
+TNode<String> CodeStubAssembler::AllocateConsString(RootIndex map_root_index,
+ TNode<Uint32T> length,
+ TNode<String> first,
+ TNode<String> second,
+ AllocationFlags flags) {
+ DCHECK(map_root_index == RootIndex::kConsOneByteStringMap ||
+ map_root_index == RootIndex::kConsStringMap);
Node* result = Allocate(ConsString::kSize, flags);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
- MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot,
- IntPtrConstant(String::kEmptyHashField),
- MachineType::PointerRepresentation());
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
bool const new_space = !(flags & kPretenured);
if (new_space) {
StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
@@ -3239,24 +3325,23 @@ TNode<String> CodeStubAssembler::AllocateConsString(
}
TNode<String> CodeStubAssembler::AllocateOneByteConsString(
- TNode<Smi> length, TNode<String> first, TNode<String> second,
+ TNode<Uint32T> length, TNode<String> first, TNode<String> second,
AllocationFlags flags) {
- return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
+ return AllocateConsString(RootIndex::kConsOneByteStringMap, length, first,
second, flags);
}
TNode<String> CodeStubAssembler::AllocateTwoByteConsString(
- TNode<Smi> length, TNode<String> first, TNode<String> second,
+ TNode<Uint32T> length, TNode<String> first, TNode<String> second,
AllocationFlags flags) {
- return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
- second, flags);
+ return AllocateConsString(RootIndex::kConsStringMap, length, first, second,
+ flags);
}
-TNode<String> CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
+TNode<String> CodeStubAssembler::NewConsString(TNode<Uint32T> length,
TNode<String> left,
TNode<String> right,
AllocationFlags flags) {
- CSA_ASSERT(this, IsContext(context));
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
@@ -3333,8 +3418,8 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
UncheckedCast<NameDictionary>(AllocateInNewSpace(store_size));
Comment("Initialize NameDictionary");
// Initialize FixedArray fields.
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kNameDictionaryMapRootIndex));
- StoreMapNoWriteBarrier(result, Heap::kNameDictionaryMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kNameDictionaryMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
SmiFromIntPtr(length));
// Initialized HashTable fields.
@@ -3398,8 +3483,8 @@ Node* CodeStubAssembler::AllocateOrderedHashTable() {
// Allocate the table and add the proper map.
const ElementsKind elements_kind = HOLEY_ELEMENTS;
TNode<IntPtrT> length_intptr = IntPtrConstant(kFixedArrayLength);
- TNode<Map> fixed_array_map = CAST(LoadRoot(
- static_cast<Heap::RootListIndex>(CollectionType::GetMapRootIndex())));
+ TNode<Map> fixed_array_map =
+ CAST(LoadRoot(CollectionType::GetMapRootIndex()));
TNode<FixedArray> table =
CAST(AllocateFixedArray(elements_kind, length_intptr,
kAllowLargeObjectAllocation, fixed_array_map));
@@ -3472,8 +3557,8 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
UncheckedCast<IntPtrT>(TimesPointerSize(total_size_word_aligned));
// Allocate the table and add the proper map.
- TNode<Map> small_ordered_hash_map = CAST(LoadRoot(
- static_cast<Heap::RootListIndex>(CollectionType::GetMapRootIndex())));
+ TNode<Map> small_ordered_hash_map =
+ CAST(LoadRoot(CollectionType::GetMapRootIndex()));
TNode<Object> table_obj = CAST(AllocateInNewSpace(total_size_word_aligned));
StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
@@ -3521,7 +3606,7 @@ CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
template <typename CollectionType>
void CodeStubAssembler::FindOrderedHashTableEntry(
Node* table, Node* hash,
- std::function<void(Node*, Label*, Label*)> key_compare,
+ const std::function<void(Node*, Label*, Label*)>& key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found) {
// Get the index of the bucket.
Node* const number_of_buckets = SmiUntag(CAST(LoadFixedArrayElement(
@@ -3588,11 +3673,11 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashMap>(
Node* table, Node* hash,
- std::function<void(Node*, Label*, Label*)> key_compare,
+ const std::function<void(Node*, Label*, Label*)>& key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found);
template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashSet>(
Node* table, Node* hash,
- std::function<void(Node*, Label*, Label*)> key_compare,
+ const std::function<void(Node*, Label*, Label*)>& key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found);
Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
@@ -3643,7 +3728,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
if (properties == nullptr) {
CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
StoreObjectFieldRoot(object, JSObject::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
} else {
CSA_ASSERT(this, Word32Or(Word32Or(IsPropertyArray(properties),
IsNameDictionary(properties)),
@@ -3653,7 +3738,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
}
if (elements == nullptr) {
StoreObjectFieldRoot(object, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
} else {
CSA_ASSERT(this, IsFixedArray(elements));
StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
@@ -3672,7 +3757,7 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
CSA_ASSERT(
this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
@@ -3711,11 +3796,11 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Comment("iInitialize filler fields");
InitializeFieldsWithRoot(object, used_size, instance_size,
- Heap::kOnePointerFillerMapRootIndex);
+ RootIndex::kOnePointerFillerMap);
Comment("Initialize undefined fields");
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
- Heap::kUndefinedValueRootIndex);
+ RootIndex::kUndefinedValue);
STATIC_ASSERT(Map::kNoSlackTracking == 0);
GotoIf(IsClearWord32<Map::ConstructionCounterBits>(new_bit_field3),
@@ -3792,9 +3877,9 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements);
// Setup elements object.
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
- Heap::RootListIndex elements_map_index =
- IsDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex
- : Heap::kFixedArrayMapRootIndex;
+ RootIndex elements_map_index = IsDoubleElementsKind(kind)
+ ? RootIndex::kFixedDoubleArrayMap
+ : RootIndex::kFixedArrayMap;
DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements, elements_map_index);
TNode<Smi> capacity_smi = ParameterToTagged(capacity, capacity_mode);
@@ -3820,7 +3905,7 @@ Node* CodeStubAssembler::AllocateUninitializedJSArray(Node* array_map,
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
if (allocation_site != nullptr) {
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize),
@@ -3845,7 +3930,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
allocation_site);
StoreObjectFieldRoot(array, JSArray::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
} else if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_as_constant,
capacity_mode) &&
capacity_as_constant > 0) {
@@ -3855,7 +3940,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
// Fill in the elements with holes.
FillFixedArrayWithValue(kind, elements,
IntPtrOrSmiConstant(0, capacity_mode), capacity,
- Heap::kTheHoleValueRootIndex, capacity_mode);
+ RootIndex::kTheHoleValue, capacity_mode);
} else {
Label out(this), empty(this), nonempty(this);
VARIABLE(var_array, MachineRepresentation::kTagged);
@@ -3870,7 +3955,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
var_array.Bind(AllocateUninitializedJSArrayWithoutElements(
array_map, length, allocation_site));
StoreObjectFieldRoot(var_array.value(), JSArray::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
Goto(&out);
}
@@ -3884,7 +3969,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
// Fill in the elements with holes.
FillFixedArrayWithValue(kind, elements,
IntPtrOrSmiConstant(0, capacity_mode), capacity,
- Heap::kTheHoleValueRootIndex, capacity_mode);
+ RootIndex::kTheHoleValue, capacity_mode);
Goto(&out);
}
@@ -3918,23 +4003,66 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
ParameterMode mode,
- Node* allocation_site) {
- Node* original_array_map = LoadMap(array);
- Node* elements_kind = LoadMapElementsKind(original_array_map);
+ Node* allocation_site,
+ HoleConversionMode convert_holes) {
+ // TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this
+ // function is also used to copy boilerplates even when the no-elements
+ // protector is invalid. This function should be renamed to reflect its uses.
+ CSA_ASSERT(this, IsJSArray(array));
Node* length = LoadJSArrayLength(array);
- Node* new_elements = ExtractFixedArray(
+ Node* new_elements = nullptr;
+ VARIABLE(var_new_elements, MachineRepresentation::kTagged);
+ TVARIABLE(Int32T, var_elements_kind, LoadMapElementsKind(LoadMap(array)));
+
+ Label allocate_jsarray(this), holey_extract(this);
+
+ bool need_conversion =
+ convert_holes == HoleConversionMode::kConvertToUndefined;
+ if (need_conversion) {
+ // We need to take care of holes, if the array is of holey elements kind.
+ GotoIf(IsHoleyFastElementsKind(var_elements_kind.value()), &holey_extract);
+ }
+
+ // Simple extraction that preserves holes.
+ new_elements = ExtractFixedArray(
LoadElements(array), IntPtrOrSmiConstant(0, mode),
TaggedToParameter(length, mode), nullptr,
ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode);
-
- // Use the cannonical map for the Array's ElementsKind
+ var_new_elements.Bind(new_elements);
+ Goto(&allocate_jsarray);
+
+ if (need_conversion) {
+ BIND(&holey_extract);
+ // Convert holes to undefined.
+ TVARIABLE(BoolT, var_holes_converted, Int32FalseConstant());
+ // Copy |array|'s elements store. The copy will be compatible with the
+ // original elements kind unless there are holes in the source. Any holes
+ // get converted to undefined, hence in that case the copy is compatible
+ // only with PACKED_ELEMENTS and HOLEY_ELEMENTS, and we will choose
+ // PACKED_ELEMENTS. Also, if we want to replace holes, we must not use
+ // ExtractFixedArrayFlag::kDontCopyCOW.
+ new_elements = ExtractFixedArray(
+ LoadElements(array), IntPtrOrSmiConstant(0, mode),
+ TaggedToParameter(length, mode), nullptr,
+ ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted);
+ var_new_elements.Bind(new_elements);
+ // If the array type didn't change, use the original elements kind.
+ GotoIfNot(var_holes_converted.value(), &allocate_jsarray);
+ // Otherwise use PACKED_ELEMENTS for the target's elements kind.
+ var_elements_kind = Int32Constant(PACKED_ELEMENTS);
+ Goto(&allocate_jsarray);
+ }
+
+ BIND(&allocate_jsarray);
+ // Use the cannonical map for the chosen elements kind.
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+ Node* array_map =
+ LoadJSArrayElementsMap(var_elements_kind.value(), native_context);
Node* result = AllocateUninitializedJSArrayWithoutElements(array_map, length,
allocation_site);
- StoreObjectField(result, JSObject::kElementsOffset, new_elements);
+ StoreObjectField(result, JSObject::kElementsOffset, var_new_elements.value());
return result;
}
@@ -3963,9 +4091,9 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
StoreMap(array, fixed_array_map);
}
} else {
- Heap::RootListIndex map_index = IsDoubleElementsKind(kind)
- ? Heap::kFixedDoubleArrayMapRootIndex
- : Heap::kFixedArrayMapRootIndex;
+ RootIndex map_index = IsDoubleElementsKind(kind)
+ ? RootIndex::kFixedDoubleArrayMap
+ : RootIndex::kFixedArrayMap;
DCHECK(Heap::RootIsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
}
@@ -3974,62 +4102,59 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
return UncheckedCast<FixedArray>(array);
}
-TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
- Node* fixed_array, Node* first, Node* count, Node* capacity,
- ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode) {
+TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
+ Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
+ ElementsKind from_kind, AllocationFlags allocation_flags,
+ ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode,
+ HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted) {
+ DCHECK_NE(first, nullptr);
+ DCHECK_NE(count, nullptr);
+ DCHECK_NE(capacity, nullptr);
+ DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays);
+ CSA_ASSERT(this,
+ WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity));
+ CSA_ASSERT(this, WordEqual(source_map, LoadMap(source)));
+
VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_fixed_array_map, MachineRepresentation::kTagged);
- const AllocationFlags flags =
- (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly)
- ? CodeStubAssembler::kNone
- : CodeStubAssembler::kAllowLargeObjectAllocation;
- if (first == nullptr) {
- first = IntPtrOrSmiConstant(0, parameter_mode);
- }
- if (count == nullptr) {
- count =
- IntPtrOrSmiSub(TaggedToParameter(LoadFixedArrayBaseLength(fixed_array),
- parameter_mode),
- first, parameter_mode);
+ VARIABLE(var_target_map, MachineRepresentation::kTagged, source_map);
- CSA_ASSERT(
- this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0, parameter_mode),
- count, parameter_mode));
- }
- if (capacity == nullptr) {
- capacity = count;
- } else {
- CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
- IntPtrOrSmiAdd(first, count, parameter_mode), capacity,
- parameter_mode)));
- }
+ Label done(this, {&var_result}), is_cow(this),
+ new_space_check(this, {&var_target_map});
- Label if_fixed_double_array(this), empty(this), cow(this),
- done(this, {&var_result, &var_fixed_array_map});
- var_fixed_array_map.Bind(LoadMap(fixed_array));
- GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity), &empty);
+ // If source_map is either FixedDoubleArrayMap, or FixedCOWArrayMap but
+ // we can't just use COW, use FixedArrayMap as the target map. Otherwise, use
+ // source_map as the target map.
+ if (IsDoubleElementsKind(from_kind)) {
+ CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map));
+ var_target_map.Bind(LoadRoot(RootIndex::kFixedArrayMap));
+ Goto(&new_space_check);
+ } else {
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
+ Branch(WordEqual(var_target_map.value(),
+ LoadRoot(RootIndex::kFixedCOWArrayMap)),
+ &is_cow, &new_space_check);
- if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
- if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
- GotoIf(IsFixedDoubleArrayMap(var_fixed_array_map.value()),
- &if_fixed_double_array);
- } else {
- CSA_ASSERT(this, IsFixedDoubleArrayMap(var_fixed_array_map.value()));
+ BIND(&is_cow);
+ {
+ // |source| is a COW array, so we don't actually need to allocate a new
+ // array unless:
+ // 1) |extract_flags| forces us to, or
+ // 2) we're asked to extract only part of the |source| (|first| != 0).
+ if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
+ Branch(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first),
+ &new_space_check, [&] {
+ var_result.Bind(source);
+ Goto(&done);
+ });
+ } else {
+ var_target_map.Bind(LoadRoot(RootIndex::kFixedArrayMap));
+ Goto(&new_space_check);
+ }
}
- } else {
- DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays);
- CSA_ASSERT(this, Word32BinaryNot(
- IsFixedDoubleArrayMap(var_fixed_array_map.value())));
}
- if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
- Label new_space_check(this, {&var_fixed_array_map});
- Branch(WordEqual(var_fixed_array_map.value(),
- LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
- &cow, &new_space_check);
-
- BIND(&new_space_check);
-
+ BIND(&new_space_check);
+ {
bool handle_old_space = true;
if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) {
handle_old_space = false;
@@ -4050,60 +4175,208 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
capacity, &old_space, FixedArray::kHeaderSize, parameter_mode);
}
- Comment("Copy PACKED_ELEMENTS new space");
-
- ElementsKind kind = PACKED_ELEMENTS;
+ Comment("Copy FixedArray new space");
+ // We use PACKED_ELEMENTS to tell AllocateFixedArray and
+ // CopyFixedArrayElements that we want a FixedArray.
+ ElementsKind to_kind = PACKED_ELEMENTS;
Node* to_elements =
- AllocateFixedArray(kind, capacity, parameter_mode,
- AllocationFlag::kNone, var_fixed_array_map.value());
+ AllocateFixedArray(to_kind, capacity, parameter_mode,
+ AllocationFlag::kNone, var_target_map.value());
var_result.Bind(to_elements);
- CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first, count,
- capacity, SKIP_WRITE_BARRIER, parameter_mode);
+ CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
+ count, capacity, SKIP_WRITE_BARRIER, parameter_mode,
+ convert_holes, var_holes_converted);
Goto(&done);
if (handle_old_space) {
BIND(&old_space);
{
- Comment("Copy PACKED_ELEMENTS old space");
+ Comment("Copy FixedArray old space");
- to_elements = AllocateFixedArray(kind, capacity, parameter_mode, flags,
- var_fixed_array_map.value());
+ to_elements =
+ AllocateFixedArray(to_kind, capacity, parameter_mode,
+ allocation_flags, var_target_map.value());
var_result.Bind(to_elements);
- CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first,
+ CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
count, capacity, UPDATE_WRITE_BARRIER,
- parameter_mode);
+ parameter_mode, convert_holes,
+ var_holes_converted);
Goto(&done);
}
}
+ }
- BIND(&cow);
- {
- if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
- Branch(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first),
- &new_space_check, [&] {
- var_result.Bind(fixed_array);
- Goto(&done);
- });
- } else {
- var_fixed_array_map.Bind(LoadRoot(Heap::kFixedArrayMapRootIndex));
- Goto(&new_space_check);
- }
- }
+ BIND(&done);
+ return UncheckedCast<FixedArray>(var_result.value());
+}
+
+TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
+ Node* from_array, Node* first, Node* count, Node* capacity,
+ Node* fixed_array_map, TVariable<BoolT>* var_holes_converted,
+ AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
+ ParameterMode mode) {
+ DCHECK_NE(first, nullptr);
+ DCHECK_NE(count, nullptr);
+ DCHECK_NE(capacity, nullptr);
+ DCHECK_NE(var_holes_converted, nullptr);
+ CSA_ASSERT(this, IsFixedDoubleArrayMap(fixed_array_map));
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
+ Node* to_elements = AllocateFixedArray(kind, capacity, mode, allocation_flags,
+ fixed_array_map);
+ var_result.Bind(to_elements);
+ // We first try to copy the FixedDoubleArray to a new FixedDoubleArray.
+ // |var_holes_converted| is set to False preliminarily.
+ *var_holes_converted = Int32FalseConstant();
+
+ // The construction of the loop and the offsets for double elements is
+ // extracted from CopyFixedArrayElements.
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(count, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, kind));
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+
+ Comment("[ ExtractFixedDoubleArrayFillingHoles");
+
+ // This copy can trigger GC, so we pre-initialize the array with holes.
+ FillFixedArrayWithValue(kind, to_elements, IntPtrOrSmiConstant(0, mode),
+ capacity, RootIndex::kTheHoleValue, mode);
+
+ const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ Node* first_from_element_offset =
+ ElementOffsetFromIndex(first, kind, mode, 0);
+ Node* limit_offset = IntPtrAdd(first_from_element_offset,
+ IntPtrConstant(first_element_offset));
+ VARIABLE(var_from_offset, MachineType::PointerRepresentation(),
+ ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind,
+ mode, first_element_offset));
+
+ Label decrement(this, {&var_from_offset}), done(this);
+ Node* to_array_adjusted =
+ IntPtrSub(BitcastTaggedToWord(to_elements), first_from_element_offset);
+
+ Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
+
+ BIND(&decrement);
+ {
+ Node* from_offset =
+ IntPtrSub(var_from_offset.value(), IntPtrConstant(kDoubleSize));
+ var_from_offset.Bind(from_offset);
+
+ Node* to_offset = from_offset;
+
+ Label if_hole(this);
+
+ Node* value = LoadElementAndPrepareForStore(
+ from_array, var_from_offset.value(), kind, kind, &if_hole);
+
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
+ to_offset, value);
+
+ Node* compare = WordNotEqual(from_offset, limit_offset);
+ Branch(compare, &decrement, &done);
+
+ BIND(&if_hole);
+ // We are unlucky: there are holes! We need to restart the copy, this time
+ // we will copy the FixedDoubleArray to a new FixedArray with undefined
+ // replacing holes. We signal this to the caller through
+ // |var_holes_converted|.
+ *var_holes_converted = Int32TrueConstant();
+ to_elements =
+ ExtractToFixedArray(from_array, first, count, capacity, fixed_array_map,
+ kind, allocation_flags, extract_flags, mode,
+ HoleConversionMode::kConvertToUndefined);
+ var_result.Bind(to_elements);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ Comment("] ExtractFixedDoubleArrayFillingHoles");
+ return UncheckedCast<FixedArrayBase>(var_result.value());
+}
+
+TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
+ Node* source, Node* first, Node* count, Node* capacity,
+ ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode,
+ TVariable<BoolT>* var_holes_converted) {
+ DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays ||
+ extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays);
+ // If we want to replace holes, ExtractFixedArrayFlag::kDontCopyCOW should not
+ // be used, because that disables the iteration which detects holes.
+ DCHECK_IMPLIES(var_holes_converted != nullptr,
+ !(extract_flags & ExtractFixedArrayFlag::kDontCopyCOW));
+ HoleConversionMode convert_holes =
+ var_holes_converted != nullptr ? HoleConversionMode::kConvertToUndefined
+ : HoleConversionMode::kDontConvert;
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ const AllocationFlags allocation_flags =
+ (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly)
+ ? CodeStubAssembler::kNone
+ : CodeStubAssembler::kAllowLargeObjectAllocation;
+ if (first == nullptr) {
+ first = IntPtrOrSmiConstant(0, parameter_mode);
+ }
+ if (count == nullptr) {
+ count = IntPtrOrSmiSub(
+ TaggedToParameter(LoadFixedArrayBaseLength(source), parameter_mode),
+ first, parameter_mode);
+
+ CSA_ASSERT(
+ this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0, parameter_mode),
+ count, parameter_mode));
+ }
+ if (capacity == nullptr) {
+ capacity = count;
} else {
- Goto(&if_fixed_double_array);
+ CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
+ IntPtrOrSmiAdd(first, count, parameter_mode), capacity,
+ parameter_mode)));
}
- if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
- BIND(&if_fixed_double_array);
+ Label if_fixed_double_array(this), empty(this), done(this, {&var_result});
+ Node* source_map = LoadMap(source);
+ GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity), &empty);
- Comment("Copy PACKED_DOUBLE_ELEMENTS");
+ if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
+ if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
+ GotoIf(IsFixedDoubleArrayMap(source_map), &if_fixed_double_array);
+ } else {
+ CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map));
+ }
+ }
- ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
- Node* to_elements = AllocateFixedArray(kind, capacity, parameter_mode,
- flags, var_fixed_array_map.value());
+ if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
+ // Here we can only get |source| as FixedArray, never FixedDoubleArray.
+ // PACKED_ELEMENTS is used to signify that the source is a FixedArray.
+ Node* to_elements =
+ ExtractToFixedArray(source, first, count, capacity, source_map,
+ PACKED_ELEMENTS, allocation_flags, extract_flags,
+ parameter_mode, convert_holes, var_holes_converted);
var_result.Bind(to_elements);
- CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first, count,
- capacity, SKIP_WRITE_BARRIER, parameter_mode);
+ Goto(&done);
+ }
+
+ if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
+ BIND(&if_fixed_double_array);
+ Comment("Copy FixedDoubleArray");
+
+ if (convert_holes == HoleConversionMode::kConvertToUndefined) {
+ Node* to_elements = ExtractFixedDoubleArrayFillingHoles(
+ source, first, count, capacity, source_map, var_holes_converted,
+ allocation_flags, extract_flags, parameter_mode);
+ var_result.Bind(to_elements);
+ } else {
+ // We use PACKED_DOUBLE_ELEMENTS to signify that both the source and
+ // the target are FixedDoubleArray. That it is PACKED or HOLEY does not
+ // matter.
+ ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
+ Node* to_elements = AllocateFixedArray(kind, capacity, parameter_mode,
+ allocation_flags, source_map);
+ var_result.Bind(to_elements);
+ CopyFixedArrayElements(kind, source, kind, to_elements, first, count,
+ capacity, SKIP_WRITE_BARRIER, parameter_mode);
+ }
Goto(&done);
}
@@ -4145,7 +4418,7 @@ Node* CodeStubAssembler::AllocatePropertyArray(Node* capacity_node,
Node* total_size = GetPropertyArrayAllocationSize(capacity_node, mode);
Node* array = Allocate(total_size, flags);
- Heap::RootListIndex map_index = Heap::kPropertyArrayMapRootIndex;
+ RootIndex map_index = RootIndex::kPropertyArrayMap;
DCHECK(Heap::RootIsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
InitializePropertyArrayLength(array, capacity_node, mode);
@@ -4170,14 +4443,15 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
mode);
}
-void CodeStubAssembler::FillFixedArrayWithValue(
- ElementsKind kind, Node* array, Node* from_node, Node* to_node,
- Heap::RootListIndex value_root_index, ParameterMode mode) {
+void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array,
+ Node* from_node, Node* to_node,
+ RootIndex value_root_index,
+ ParameterMode mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, kind));
- DCHECK(value_root_index == Heap::kTheHoleValueRootIndex ||
- value_root_index == Heap::kUndefinedValueRootIndex);
+ DCHECK(value_root_index == RootIndex::kTheHoleValue ||
+ value_root_index == RootIndex::kUndefinedValue);
// Determine the value to initialize the {array} based
// on the {value_root_index} and the elements {kind}.
@@ -4200,6 +4474,33 @@ void CodeStubAssembler::FillFixedArrayWithValue(
mode);
}
+void CodeStubAssembler::StoreFixedDoubleArrayHole(
+ TNode<FixedDoubleArray> array, Node* index, ParameterMode parameter_mode) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(index, parameter_mode));
+ Node* offset =
+ ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, parameter_mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ CSA_ASSERT(this, IsOffsetInBounds(
+ offset, LoadAndUntagFixedArrayBaseLength(array),
+ FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS));
+ Node* double_hole =
+ Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
+ : ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
+ // TODO(danno): When we have a Float32/Float64 wrapper class that
+ // preserves double bits during manipulation, remove this code/change
+ // this to an indexed Float64 store.
+ if (Is64()) {
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
+ double_hole);
+ } else {
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
+ double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, array,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
+}
+
void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
TNode<IntPtrT> length) {
CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
@@ -4245,7 +4546,10 @@ void CodeStubAssembler::FillFixedDoubleArrayWithZero(
void CodeStubAssembler::CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* first_element, Node* element_count, Node* capacity,
- WriteBarrierMode barrier_mode, ParameterMode mode) {
+ WriteBarrierMode barrier_mode, ParameterMode mode,
+ HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted) {
+ DCHECK_IMPLIES(var_holes_converted != nullptr,
+ convert_holes == HoleConversionMode::kConvertToUndefined);
CSA_SLOW_ASSERT(this, MatchesParameterMode(element_count, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind));
@@ -4273,15 +4577,25 @@ void CodeStubAssembler::CopyFixedArrayElements(
Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
: ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
- if (doubles_to_objects_conversion) {
- // If the copy might trigger a GC, make sure that the FixedArray is
- // pre-initialized with holes to make sure that it's always in a
- // consistent state.
+ // If copying might trigger a GC, we pre-initialize the FixedArray such that
+ // it's always in a consistent state.
+ if (convert_holes == HoleConversionMode::kConvertToUndefined) {
+ DCHECK(IsObjectElementsKind(to_kind));
+ // Use undefined for the part that we copy and holes for the rest.
+ // Later if we run into a hole in the source we can just skip the writing
+ // to the target and are still guaranteed that we get an undefined.
FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode),
- capacity, Heap::kTheHoleValueRootIndex, mode);
+ element_count, RootIndex::kUndefinedValue, mode);
+ FillFixedArrayWithValue(to_kind, to_array, element_count, capacity,
+ RootIndex::kTheHoleValue, mode);
+ } else if (doubles_to_objects_conversion) {
+ // Pre-initialized the target with holes so later if we run into a hole in
+ // the source we can just skip the writing to the target.
+ FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode),
+ capacity, RootIndex::kTheHoleValue, mode);
} else if (element_count != capacity) {
FillFixedArrayWithValue(to_kind, to_array, element_count, capacity,
- Heap::kTheHoleValueRootIndex, mode);
+ RootIndex::kTheHoleValue, mode);
}
Node* first_from_element_offset =
@@ -4302,8 +4616,10 @@ void CodeStubAssembler::CopyFixedArrayElements(
first_element_offset));
}
- Variable* vars[] = {&var_from_offset, &var_to_offset};
- Label decrement(this, 2, vars);
+ Variable* vars[] = {&var_from_offset, &var_to_offset, var_holes_converted};
+ int num_vars =
+ var_holes_converted != nullptr ? arraysize(vars) : arraysize(vars) - 1;
+ Label decrement(this, num_vars, vars);
Node* to_array_adjusted =
element_offset_matches
@@ -4329,9 +4645,13 @@ void CodeStubAssembler::CopyFixedArrayElements(
var_to_offset.Bind(to_offset);
}
- Label next_iter(this), store_double_hole(this);
+ Label next_iter(this), store_double_hole(this), signal_hole(this);
Label* if_hole;
- if (doubles_to_objects_conversion) {
+ if (convert_holes == HoleConversionMode::kConvertToUndefined) {
+ // The target elements array is already preinitialized with undefined
+ // so we only need to signal that a hole was found and continue the loop.
+ if_hole = &signal_hole;
+ } else if (doubles_to_objects_conversion) {
// The target elements array is already preinitialized with holes, so we
// can just proceed with the next iteration.
if_hole = &next_iter;
@@ -4378,6 +4698,13 @@ void CodeStubAssembler::CopyFixedArrayElements(
double_hole);
}
Goto(&next_iter);
+ } else if (if_hole == &signal_hole) {
+ // This case happens only when IsObjectElementsKind(to_kind).
+ BIND(&signal_hole);
+ if (var_holes_converted != nullptr) {
+ *var_holes_converted = Int32TrueConstant();
+ }
+ Goto(&next_iter);
}
BIND(&next_iter);
@@ -4393,9 +4720,8 @@ TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
TNode<HeapObject> base, Label* cast_fail) {
Label fixed_array(this);
TNode<Map> map = LoadMap(base);
- GotoIf(WordEqual(map, LoadRoot(Heap::kFixedArrayMapRootIndex)), &fixed_array);
- GotoIf(WordNotEqual(map, LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
- cast_fail);
+ GotoIf(WordEqual(map, LoadRoot(RootIndex::kFixedArrayMap)), &fixed_array);
+ GotoIf(WordNotEqual(map, LoadRoot(RootIndex::kFixedCOWArrayMap)), cast_fail);
Goto(&fixed_array);
BIND(&fixed_array);
return UncheckedCast<FixedArray>(base);
@@ -4603,7 +4929,7 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base,
Node* allocation_site) {
Comment("[Initialize AllocationMemento");
Node* memento = InnerAllocate(base, base_allocation_size);
- StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
+ StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap);
StoreObjectFieldNoWriteBarrier(
memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
if (FLAG_allocation_site_pretenuring) {
@@ -4891,28 +5217,13 @@ TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
if_join(this);
TVARIABLE(Number, var_result);
// If {value} > 2^31 - 1, we need to store it in a HeapNumber.
- Branch(Uint32LessThan(Int32Constant(Smi::kMaxValue), value), &if_overflow,
+ Branch(Uint32LessThan(Uint32Constant(Smi::kMaxValue), value), &if_overflow,
&if_not_overflow);
BIND(&if_not_overflow);
{
- if (SmiValuesAre32Bits()) {
- var_result =
- SmiTag(ReinterpretCast<IntPtrT>(ChangeUint32ToUint64(value)));
- } else {
- DCHECK(SmiValuesAre31Bits());
- // If tagging {value} results in an overflow, we need to use a HeapNumber
- // to represent it.
- // TODO(tebbi): This overflow can never happen.
- TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(
- UncheckedCast<Int32T>(value), UncheckedCast<Int32T>(value));
- TNode<BoolT> overflow = Projection<1>(pair);
- GotoIf(overflow, &if_overflow);
-
- TNode<IntPtrT> almost_tagged_value =
- ChangeInt32ToIntPtr(Projection<0>(pair));
- var_result = BitcastWordToTaggedSigned(almost_tagged_value);
- }
+ // The {value} is definitely in valid Smi range.
+ var_result = SmiTag(Signed(ChangeUint32ToWord(value)));
}
Goto(&if_join);
@@ -4927,6 +5238,32 @@ TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
return var_result.value();
}
+TNode<Number> CodeStubAssembler::ChangeUintPtrToTagged(TNode<UintPtrT> value) {
+ Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
+ if_join(this);
+ TVARIABLE(Number, var_result);
+ // If {value} > 2^31 - 1, we need to store it in a HeapNumber.
+ Branch(UintPtrLessThan(UintPtrConstant(Smi::kMaxValue), value), &if_overflow,
+ &if_not_overflow);
+
+ BIND(&if_not_overflow);
+ {
+ // The {value} is definitely in valid Smi range.
+ var_result = SmiTag(Signed(value));
+ }
+ Goto(&if_join);
+
+ BIND(&if_overflow);
+ {
+ TNode<Float64T> float64_value = ChangeUintPtrToFloat64(value);
+ var_result = AllocateHeapNumberWithValue(float64_value);
+ }
+ Goto(&if_join);
+
+ BIND(&if_join);
+ return var_result.value();
+}
+
TNode<String> CodeStubAssembler::ToThisString(Node* context, Node* value,
char const* method_name) {
VARIABLE(var_value, MachineRepresentation::kTagged, value);
@@ -5229,6 +5566,13 @@ TNode<BoolT> CodeStubAssembler::IsExtensibleMap(SloppyTNode<Map> map) {
return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField2(map));
}
+TNode<BoolT> CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode<Map> map) {
+ int kMask = Map::IsExtensibleBit::kMask | Map::IsPrototypeMapBit::kMask;
+ int kExpected = Map::IsExtensibleBit::kMask;
+ return Word32Equal(Word32And(LoadMapBitField2(map), Int32Constant(kMask)),
+ Int32Constant(kExpected));
+}
+
TNode<BoolT> CodeStubAssembler::IsCallableMap(SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
return IsSetWord32<Map::IsCallableBit>(LoadMapBitField(map));
@@ -5246,42 +5590,42 @@ TNode<BoolT> CodeStubAssembler::IsUndetectableMap(SloppyTNode<Map> map) {
TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kNoElementsProtectorRootIndex);
+ Node* cell = LoadRoot(RootIndex::kNoElementsProtector);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return WordEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kPromiseResolveProtectorRootIndex);
+ Node* cell = LoadRoot(RootIndex::kPromiseResolveProtector);
Node* cell_value = LoadObjectField(cell, Cell::kValueOffset);
return WordEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kPromiseThenProtectorRootIndex);
+ Node* cell = LoadRoot(RootIndex::kPromiseThenProtector);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return WordEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kArraySpeciesProtectorRootIndex);
+ Node* cell = LoadRoot(RootIndex::kArraySpeciesProtector);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return WordEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kTypedArraySpeciesProtectorRootIndex);
+ Node* cell = LoadRoot(RootIndex::kTypedArraySpeciesProtector);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return WordEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kPromiseSpeciesProtectorRootIndex);
+ Node* cell = LoadRoot(RootIndex::kPromiseSpeciesProtector);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return WordEqual(cell_value, invalid);
}
@@ -5320,7 +5664,7 @@ TNode<BoolT> CodeStubAssembler::IsCallable(SloppyTNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsCell(SloppyTNode<HeapObject> object) {
- return WordEqual(LoadMap(object), LoadRoot(Heap::kCellMapRootIndex));
+ return WordEqual(LoadMap(object), LoadRoot(RootIndex::kCellMap));
}
TNode<BoolT> CodeStubAssembler::IsCode(SloppyTNode<HeapObject> object) {
@@ -5402,11 +5746,11 @@ TNode<BoolT> CodeStubAssembler::IsExternalStringInstanceType(
Int32Constant(kExternalStringTag));
}
-TNode<BoolT> CodeStubAssembler::IsShortExternalStringInstanceType(
+TNode<BoolT> CodeStubAssembler::IsUncachedExternalStringInstanceType(
SloppyTNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
- STATIC_ASSERT(kShortExternalStringTag != 0);
- return IsSetWord32(instance_type, kShortExternalStringMask);
+ STATIC_ASSERT(kUncachedExternalStringTag != 0);
+ return IsSetWord32(instance_type, kUncachedExternalStringMask);
}
TNode<BoolT> CodeStubAssembler::IsJSReceiverInstanceType(
@@ -5619,6 +5963,16 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumber(SloppyTNode<HeapObject> object) {
return IsHeapNumberMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::IsHeapNumberInstanceType(
+ SloppyTNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE);
+}
+
+TNode<BoolT> CodeStubAssembler::IsOddballInstanceType(
+ SloppyTNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, ODDBALL_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsMutableHeapNumber(
SloppyTNode<HeapObject> object) {
return IsMutableHeapNumberMap(LoadMap(object));
@@ -5634,8 +5988,12 @@ TNode<BoolT> CodeStubAssembler::IsFeedbackVector(
}
TNode<BoolT> CodeStubAssembler::IsName(SloppyTNode<HeapObject> object) {
- return Int32LessThanOrEqual(LoadInstanceType(object),
- Int32Constant(LAST_NAME_TYPE));
+ return IsNameInstanceType(LoadInstanceType(object));
+}
+
+TNode<BoolT> CodeStubAssembler::IsNameInstanceType(
+ SloppyTNode<Int32T> instance_type) {
+ return Int32LessThanOrEqual(instance_type, Int32Constant(LAST_NAME_TYPE));
}
TNode<BoolT> CodeStubAssembler::IsString(SloppyTNode<HeapObject> object) {
@@ -5668,20 +6026,19 @@ TNode<BoolT> CodeStubAssembler::IsPrimitiveInstanceType(
TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(
SloppyTNode<HeapObject> object) {
- return Select<BoolT>(
- IsSymbol(object),
- [=] {
- TNode<Symbol> symbol = CAST(object);
- TNode<Int32T> flags =
- SmiToInt32(LoadObjectField<Smi>(symbol, Symbol::kFlagsOffset));
- return IsSetWord32(flags, 1 << Symbol::kPrivateBit);
- },
- [=] { return Int32FalseConstant(); });
+ return Select<BoolT>(IsSymbol(object),
+ [=] {
+ TNode<Symbol> symbol = CAST(object);
+ TNode<Uint32T> flags = LoadObjectField<Uint32T>(
+ symbol, Symbol::kFlagsOffset);
+ return IsSetWord32<Symbol::IsPrivateBit>(flags);
+ },
+ [=] { return Int32FalseConstant(); });
}
TNode<BoolT> CodeStubAssembler::IsNativeContext(
SloppyTNode<HeapObject> object) {
- return WordEqual(LoadMap(object), LoadRoot(Heap::kNativeContextMapRootIndex));
+ return WordEqual(LoadMap(object), LoadRoot(RootIndex::kNativeContextMap));
}
TNode<BoolT> CodeStubAssembler::IsFixedDoubleArray(
@@ -5968,7 +6325,7 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
{
// Load the isolate wide single character string cache.
TNode<FixedArray> cache =
- CAST(LoadRoot(Heap::kSingleCharacterStringCacheRootIndex));
+ CAST(LoadRoot(RootIndex::kSingleCharacterStringCache));
TNode<IntPtrT> code_index = Signed(ChangeUint32ToWord(code));
// Check if we have an entry for the {code} in the single character string
@@ -6021,7 +6378,7 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
Node* from, Node* from_instance_type, TNode<IntPtrT> from_index,
- TNode<Smi> character_count) {
+ TNode<IntPtrT> character_count) {
Label end(this), one_byte_sequential(this), two_byte_sequential(this);
TVARIABLE(String, var_result);
@@ -6031,10 +6388,10 @@ TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
// The subject string is a sequential one-byte string.
BIND(&one_byte_sequential);
{
- TNode<String> result =
- AllocateSeqOneByteString(NoContextConstant(), character_count);
+ TNode<String> result = AllocateSeqOneByteString(
+ NoContextConstant(), Unsigned(TruncateIntPtrToInt32(character_count)));
CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
- SmiUntag(character_count), String::ONE_BYTE_ENCODING,
+ character_count, String::ONE_BYTE_ENCODING,
String::ONE_BYTE_ENCODING);
var_result = result;
Goto(&end);
@@ -6043,10 +6400,10 @@ TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
// The subject string is a sequential two-byte string.
BIND(&two_byte_sequential);
{
- TNode<String> result =
- AllocateSeqTwoByteString(NoContextConstant(), character_count);
+ TNode<String> result = AllocateSeqTwoByteString(
+ NoContextConstant(), Unsigned(TruncateIntPtrToInt32(character_count)));
CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
- SmiUntag(character_count), String::TWO_BYTE_ENCODING,
+ character_count, String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
var_result = result;
Goto(&end);
@@ -6109,15 +6466,17 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
BIND(&one_byte_slice);
{
- var_result = AllocateSlicedOneByteString(SmiTag(substr_length),
- direct_string, SmiTag(offset));
+ var_result = AllocateSlicedOneByteString(
+ Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
+ SmiTag(offset));
Goto(&end);
}
BIND(&two_byte_slice);
{
- var_result = AllocateSlicedTwoByteString(SmiTag(substr_length),
- direct_string, SmiTag(offset));
+ var_result = AllocateSlicedTwoByteString(
+ Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
+ SmiTag(offset));
Goto(&end);
}
@@ -6129,7 +6488,7 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
GotoIf(to_direct.is_external(), &external_string);
var_result = AllocAndCopyStringCharacters(direct_string, instance_type,
- offset, SmiTag(substr_length));
+ offset, substr_length);
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -6143,7 +6502,7 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
Node* const fake_sequential_string = to_direct.PointerToString(&runtime);
var_result = AllocAndCopyStringCharacters(
- fake_sequential_string, instance_type, offset, SmiTag(substr_length));
+ fake_sequential_string, instance_type, offset, substr_length);
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -6314,7 +6673,7 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
BIND(&if_isexternal);
{
- GotoIf(IsShortExternalStringInstanceType(var_instance_type_.value()),
+ GotoIf(IsUncachedExternalStringInstanceType(var_instance_type_.value()),
if_bailout);
TNode<String> string = CAST(var_string_.value());
@@ -6424,36 +6783,37 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
done(this, &result), done_native(this, &result);
Counters* counters = isolate()->counters();
- TNode<Smi> left_length = LoadStringLengthAsSmi(left);
- GotoIf(SmiNotEqual(SmiConstant(0), left_length), &check_right);
+ TNode<Uint32T> left_length = LoadStringLengthAsWord32(left);
+ GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right);
result = right;
Goto(&done_native);
BIND(&check_right);
- TNode<Smi> right_length = LoadStringLengthAsSmi(right);
- GotoIf(SmiNotEqual(SmiConstant(0), right_length), &cons);
+ TNode<Uint32T> right_length = LoadStringLengthAsWord32(right);
+ GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons);
result = left;
Goto(&done_native);
BIND(&cons);
{
- TNode<Smi> new_length = SmiAdd(left_length, right_length);
+ TNode<Uint32T> new_length = Uint32Add(left_length, right_length);
// If new length is greater than String::kMaxLength, goto runtime to
// throw. Note: we also need to invalidate the string length protector, so
// can't just throw here directly.
- GotoIf(SmiAbove(new_length, SmiConstant(String::kMaxLength)), &runtime);
+ GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)),
+ &runtime);
TVARIABLE(String, var_left, left);
TVARIABLE(String, var_right, right);
Variable* input_vars[2] = {&var_left, &var_right};
Label non_cons(this, 2, input_vars);
Label slow(this, Label::kDeferred);
- GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)),
+ GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)),
&non_cons);
- result = NewConsString(context, new_length, var_left.value(),
- var_right.value(), flags);
+ result =
+ NewConsString(new_length, var_left.value(), var_right.value(), flags);
Goto(&done_native);
BIND(&non_cons);
@@ -6472,8 +6832,8 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime);
GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow);
- TNode<IntPtrT> word_left_length = SmiUntag(left_length);
- TNode<IntPtrT> word_right_length = SmiUntag(right_length);
+ TNode<IntPtrT> word_left_length = Signed(ChangeUint32ToWord(left_length));
+ TNode<IntPtrT> word_right_length = Signed(ChangeUint32ToWord(right_length));
Label two_byte(this);
GotoIf(Word32Equal(Word32And(ored_instance_types,
@@ -6613,7 +6973,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
done(this, &result);
// Load the number string cache.
- Node* number_string_cache = LoadRoot(Heap::kNumberStringCacheRootIndex);
+ Node* number_string_cache = LoadRoot(RootIndex::kNumberStringCache);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
@@ -6688,52 +7048,6 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
return result.value();
}
-TNode<Name> CodeStubAssembler::ToName(SloppyTNode<Context> context,
- SloppyTNode<Object> value) {
- Label end(this);
- TVARIABLE(Name, var_result);
-
- Label is_number(this);
- GotoIf(TaggedIsSmi(value), &is_number);
-
- Label not_name(this);
- TNode<Int32T> value_instance_type = LoadInstanceType(CAST(value));
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- GotoIf(Int32GreaterThan(value_instance_type, Int32Constant(LAST_NAME_TYPE)),
- &not_name);
-
- var_result = CAST(value);
- Goto(&end);
-
- BIND(&is_number);
- {
- var_result = CAST(CallBuiltin(Builtins::kNumberToString, context, value));
- Goto(&end);
- }
-
- BIND(&not_name);
- {
- GotoIf(InstanceTypeEqual(value_instance_type, HEAP_NUMBER_TYPE),
- &is_number);
-
- Label not_oddball(this);
- GotoIfNot(InstanceTypeEqual(value_instance_type, ODDBALL_TYPE),
- &not_oddball);
-
- var_result = LoadObjectField<String>(CAST(value), Oddball::kToStringOffset);
- Goto(&end);
-
- BIND(&not_oddball);
- {
- var_result = CAST(CallRuntime(Runtime::kToName, context, value));
- Goto(&end);
- }
- }
-
- BIND(&end);
- return var_result.value();
-}
-
Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
Node* context, Node* input, Object::Conversion mode,
BigIntHandling bigint_handling) {
@@ -8114,6 +8428,125 @@ void CodeStubAssembler::DescriptorArrayForEach(
IndexAdvanceMode::kPost);
}
+void CodeStubAssembler::ForEachEnumerableOwnProperty(
+ TNode<Context> context, TNode<Map> map, TNode<JSObject> object,
+ const ForEachKeyValueFunction& body, Label* bailout) {
+ TNode<Int32T> type = LoadMapInstanceType(map);
+ TNode<Uint32T> bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout);
+
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ TNode<Uint32T> nof_descriptors =
+ DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+
+ TVARIABLE(BoolT, var_stable, Int32TrueConstant());
+ VariableList list({&var_stable}, zone());
+
+ DescriptorArrayForEach(
+ list, Unsigned(Int32Constant(0)), nof_descriptors,
+ [=, &var_stable](TNode<UintPtrT> descriptor_key_index) {
+ TNode<Name> next_key =
+ CAST(LoadWeakFixedArrayElement(descriptors, descriptor_key_index));
+
+ TVARIABLE(Object, var_value, SmiConstant(0));
+ Label callback(this), next_iteration(this);
+
+ {
+ TVARIABLE(Map, var_map);
+ TVARIABLE(HeapObject, var_meta_storage);
+ TVARIABLE(IntPtrT, var_entry);
+ TVARIABLE(Uint32T, var_details);
+ Label if_found(this);
+
+ Label if_found_fast(this), if_found_dict(this);
+
+ Label if_stable(this), if_not_stable(this);
+ Branch(var_stable.value(), &if_stable, &if_not_stable);
+ BIND(&if_stable);
+ {
+ // Directly decode from the descriptor array if |object| did not
+ // change shape.
+ var_map = map;
+ var_meta_storage = descriptors;
+ var_entry = Signed(descriptor_key_index);
+ Goto(&if_found_fast);
+ }
+ BIND(&if_not_stable);
+ {
+ // If the map did change, do a slower lookup. We are still
+ // guaranteed that the object has a simple shape, and that the key
+ // is a name.
+ var_map = LoadMap(object);
+ TryLookupPropertyInSimpleObject(
+ object, var_map.value(), next_key, &if_found_fast,
+ &if_found_dict, &var_meta_storage, &var_entry, &next_iteration);
+ }
+
+ BIND(&if_found_fast);
+ {
+ TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
+ TNode<IntPtrT> name_index = var_entry.value();
+
+ // Skip non-enumerable properties.
+ var_details = LoadDetailsByKeyIndex(descriptors, name_index);
+ GotoIf(IsSetWord32(var_details.value(),
+ PropertyDetails::kAttributesDontEnumMask),
+ &next_iteration);
+
+ LoadPropertyFromFastObject(object, var_map.value(), descriptors,
+ name_index, var_details.value(),
+ &var_value);
+ Goto(&if_found);
+ }
+ BIND(&if_found_dict);
+ {
+ TNode<NameDictionary> dictionary = CAST(var_meta_storage.value());
+ TNode<IntPtrT> entry = var_entry.value();
+
+ TNode<Uint32T> details =
+ LoadDetailsByKeyIndex<NameDictionary>(dictionary, entry);
+ // Skip non-enumerable properties.
+ GotoIf(
+ IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
+ &next_iteration);
+
+ var_details = details;
+ var_value = LoadValueByKeyIndex<NameDictionary>(dictionary, entry);
+ Goto(&if_found);
+ }
+
+ // Here we have details and value which could be an accessor.
+ BIND(&if_found);
+ {
+ Label slow_load(this, Label::kDeferred);
+
+ var_value = CallGetterIfAccessor(var_value.value(),
+ var_details.value(), context,
+ object, &slow_load, kCallJSGetter);
+ Goto(&callback);
+
+ BIND(&slow_load);
+ var_value =
+ CallRuntime(Runtime::kGetProperty, context, object, next_key);
+ Goto(&callback);
+
+ BIND(&callback);
+ body(next_key, var_value.value());
+
+ // Check if |object| is still stable, i.e. we can proceed using
+ // property details from preloaded |descriptors|.
+ var_stable =
+ Select<BoolT>(var_stable.value(),
+ [=] { return WordEqual(LoadMap(object), map); },
+ [=] { return Int32FalseConstant(); });
+
+ Goto(&next_iteration);
+ }
+ }
+
+ BIND(&next_iteration);
+ });
+}
+
void CodeStubAssembler::DescriptorLookup(
SloppyTNode<Name> unique_name, SloppyTNode<DescriptorArray> descriptors,
SloppyTNode<Uint32T> bitfield3, Label* if_found,
@@ -8708,7 +9141,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
GotoIf(IsDetachedBuffer(buffer), if_absent);
- Node* length = SmiUntag(LoadTypedArrayLength(CAST(object)));
+ Node* length = SmiUntag(LoadJSTypedArrayLength(CAST(object)));
Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent);
}
BIND(&if_oob);
@@ -9111,25 +9544,22 @@ void CodeStubAssembler::CheckForAssociatedProtector(Node* name,
Label* if_protector) {
// This list must be kept in sync with LookupIterator::UpdateProtector!
// TODO(jkummerow): Would it be faster to have a bit in Symbol::flags()?
- GotoIf(WordEqual(name, LoadRoot(Heap::kconstructor_stringRootIndex)),
- if_protector);
- GotoIf(WordEqual(name, LoadRoot(Heap::kiterator_symbolRootIndex)),
- if_protector);
- GotoIf(WordEqual(name, LoadRoot(Heap::knext_stringRootIndex)), if_protector);
- GotoIf(WordEqual(name, LoadRoot(Heap::kspecies_symbolRootIndex)),
+ GotoIf(WordEqual(name, LoadRoot(RootIndex::kconstructor_string)),
if_protector);
- GotoIf(WordEqual(name, LoadRoot(Heap::kis_concat_spreadable_symbolRootIndex)),
+ GotoIf(WordEqual(name, LoadRoot(RootIndex::kiterator_symbol)), if_protector);
+ GotoIf(WordEqual(name, LoadRoot(RootIndex::knext_string)), if_protector);
+ GotoIf(WordEqual(name, LoadRoot(RootIndex::kspecies_symbol)), if_protector);
+ GotoIf(WordEqual(name, LoadRoot(RootIndex::kis_concat_spreadable_symbol)),
if_protector);
- GotoIf(WordEqual(name, LoadRoot(Heap::kresolve_stringRootIndex)),
- if_protector);
- GotoIf(WordEqual(name, LoadRoot(Heap::kthen_stringRootIndex)), if_protector);
+ GotoIf(WordEqual(name, LoadRoot(RootIndex::kresolve_string)), if_protector);
+ GotoIf(WordEqual(name, LoadRoot(RootIndex::kthen_string)), if_protector);
// Fall through if no case matched.
}
TNode<Map> CodeStubAssembler::LoadReceiverMap(SloppyTNode<Object> receiver) {
return Select<Map>(
TaggedIsSmi(receiver),
- [=] { return CAST(LoadRoot(Heap::kHeapNumberMapRootIndex)); },
+ [=] { return CAST(LoadRoot(RootIndex::kHeapNumberMap)); },
[=] { return LoadMap(UncheckedCast<HeapObject>(receiver)); });
}
@@ -9452,38 +9882,46 @@ void CodeStubAssembler::EmitBigTypedArrayElementStore(
EmitBigTypedArrayElementStore(elements, backing_store, offset, bigint_value);
}
-void CodeStubAssembler::EmitBigTypedArrayElementStore(
- TNode<FixedTypedArrayBase> elements, TNode<RawPtrT> backing_store,
- TNode<IntPtrT> offset, TNode<BigInt> bigint_value) {
- TNode<WordT> bitfield = LoadBigIntBitfield(bigint_value);
+void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
+ TVariable<UintPtrT>* var_low,
+ TVariable<UintPtrT>* var_high) {
+ Label done(this);
+ *var_low = Unsigned(IntPtrConstant(0));
+ *var_high = Unsigned(IntPtrConstant(0));
+ TNode<WordT> bitfield = LoadBigIntBitfield(bigint);
TNode<UintPtrT> length = DecodeWord<BigIntBase::LengthBits>(bitfield);
TNode<UintPtrT> sign = DecodeWord<BigIntBase::SignBits>(bitfield);
- TVARIABLE(UintPtrT, var_low, Unsigned(IntPtrConstant(0)));
- // Only used on 32-bit platforms.
- TVARIABLE(UintPtrT, var_high, Unsigned(IntPtrConstant(0)));
- Label do_store(this);
- GotoIf(WordEqual(length, IntPtrConstant(0)), &do_store);
- var_low = LoadBigIntDigit(bigint_value, 0);
+ GotoIf(WordEqual(length, IntPtrConstant(0)), &done);
+ *var_low = LoadBigIntDigit(bigint, 0);
if (!Is64()) {
Label load_done(this);
GotoIf(WordEqual(length, IntPtrConstant(1)), &load_done);
- var_high = LoadBigIntDigit(bigint_value, 1);
+ *var_high = LoadBigIntDigit(bigint, 1);
Goto(&load_done);
BIND(&load_done);
}
- GotoIf(WordEqual(sign, IntPtrConstant(0)), &do_store);
+ GotoIf(WordEqual(sign, IntPtrConstant(0)), &done);
// Negative value. Simulate two's complement.
if (!Is64()) {
- var_high = Unsigned(IntPtrSub(IntPtrConstant(0), var_high.value()));
+ *var_high = Unsigned(IntPtrSub(IntPtrConstant(0), var_high->value()));
Label no_carry(this);
- GotoIf(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry);
- var_high = Unsigned(IntPtrSub(var_high.value(), IntPtrConstant(1)));
+ GotoIf(WordEqual(var_low->value(), IntPtrConstant(0)), &no_carry);
+ *var_high = Unsigned(IntPtrSub(var_high->value(), IntPtrConstant(1)));
Goto(&no_carry);
BIND(&no_carry);
}
- var_low = Unsigned(IntPtrSub(IntPtrConstant(0), var_low.value()));
- Goto(&do_store);
- BIND(&do_store);
+ *var_low = Unsigned(IntPtrSub(IntPtrConstant(0), var_low->value()));
+ Goto(&done);
+ BIND(&done);
+}
+
+void CodeStubAssembler::EmitBigTypedArrayElementStore(
+ TNode<FixedTypedArrayBase> elements, TNode<RawPtrT> backing_store,
+ TNode<IntPtrT> offset, TNode<BigInt> bigint_value) {
+ TVARIABLE(UintPtrT, var_low);
+ // Only used on 32-bit platforms.
+ TVARIABLE(UintPtrT, var_high);
+ BigIntToRawBytes(bigint_value, &var_low, &var_high);
// Assert that offset < elements.length. Given that it's an offset for a raw
// pointer we correct it by the usual kHeapObjectTag offset.
@@ -9548,7 +9986,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// Bounds check.
Node* length =
- TaggedToParameter(LoadTypedArrayLength(CAST(object)), parameter_mode);
+ TaggedToParameter(LoadJSTypedArrayLength(CAST(object)), parameter_mode);
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
// Skip the store if we write beyond the length or
@@ -9793,9 +10231,8 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
BIND(&map_check);
{
TNode<Object> memento_map = LoadObjectField(object, kMementoMapOffset);
- Branch(
- WordEqual(memento_map, LoadRoot(Heap::kAllocationMementoMapRootIndex)),
- memento_found, &no_memento_found);
+ Branch(WordEqual(memento_map, LoadRoot(RootIndex::kAllocationMementoMap)),
+ memento_found, &no_memento_found);
}
BIND(&no_memento_found);
Comment("] TrapAllocationMemento");
@@ -9809,7 +10246,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot) {
TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
Node* site = Allocate(size, CodeStubAssembler::kPretenured);
- StoreMapNoWriteBarrier(site, Heap::kAllocationSiteWithWeakNextMapRootIndex);
+ StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
// Should match AllocationSite::Initialize.
TNode<WordT> field = UpdateWord<AllocationSite::ElementsKindBits>(
IntPtrConstant(0), IntPtrConstant(GetInitialFastElementsKind()));
@@ -9833,7 +10270,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
// Store an empty fixed array for the code dependency.
StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
- Heap::kEmptyWeakFixedArrayRootIndex);
+ RootIndex::kEmptyWeakFixedArray);
// Link the object to the allocation site list
TNode<ExternalReference> site_list = ExternalConstant(
@@ -10001,9 +10438,10 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
doesnt_fit);
}
-void CodeStubAssembler::InitializeFieldsWithRoot(
- Node* object, Node* start_offset, Node* end_offset,
- Heap::RootListIndex root_index) {
+void CodeStubAssembler::InitializeFieldsWithRoot(Node* object,
+ Node* start_offset,
+ Node* end_offset,
+ RootIndex root_index) {
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
@@ -10037,6 +10475,10 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
// Both {left} and {right} are Smi, so just perform a fast
// Smi comparison.
switch (op) {
+ case Operation::kEqual:
+ BranchIfSmiEqual(smi_left, smi_right, if_true,
+ if_false);
+ break;
case Operation::kLessThan:
BranchIfSmiLessThan(smi_left, smi_right, if_true,
if_false);
@@ -10083,6 +10525,10 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
BIND(&do_float_comparison);
{
switch (op) {
+ case Operation::kEqual:
+ Branch(Float64Equal(var_left_float.value(), var_right_float.value()),
+ if_true, if_false);
+ break;
case Operation::kLessThan:
Branch(Float64LessThan(var_left_float.value(), var_right_float.value()),
if_true, if_false);
@@ -10493,15 +10939,16 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
}
// If {left} is a receiver, call ToPrimitive(left, hint Number).
- // Otherwise call ToNumeric(left) and then ToNumeric(right).
+ // Otherwise call ToNumeric(right) and then ToNumeric(left), the
+ // order here is important as it's observable by user code.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Label if_left_receiver(this, Label::kDeferred);
GotoIf(IsJSReceiverInstanceType(left_instance_type),
&if_left_receiver);
+ var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
var_left.Bind(
CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
- var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
Goto(&loop);
BIND(&if_left_receiver);
@@ -11448,7 +11895,7 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
BIND(&if_proxy);
{
- TNode<Name> name = ToName(context, key);
+ TNode<Name> name = CAST(CallBuiltin(Builtins::kToName, context, key));
switch (mode) {
case kHasProperty:
GotoIf(IsPrivateSymbol(name), &return_false);
@@ -11914,9 +12361,9 @@ TNode<JSArrayIterator> CodeStubAssembler::CreateArrayIterator(
Node* iterator = Allocate(JSArrayIterator::kSize);
StoreMapNoWriteBarrier(iterator, iterator_map);
StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectOffset, object);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
@@ -11936,9 +12383,9 @@ Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value,
Node* result = Allocate(JSIteratorResult::kSize);
StoreMapNoWriteBarrier(result, map);
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
return result;
@@ -11953,7 +12400,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
TNode<FixedArray> elements = UncheckedCast<FixedArray>(
Allocate(elements_size + JSArray::kSize + JSIteratorResult::kSize));
StoreObjectFieldRoot(elements, FixedArray::kMapOffset,
- Heap::kFixedArrayMapRootIndex);
+ RootIndex::kFixedArrayMap);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
StoreFixedArrayElement(elements, 0, key);
StoreFixedArrayElement(elements, 1, value);
@@ -11962,7 +12409,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
Node* array = InnerAllocate(elements, elements_size);
StoreMapNoWriteBarrier(array, array_map);
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
Node* iterator_map =
@@ -11970,12 +12417,12 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
Node* result = InnerAllocate(array, JSArray::kSize);
StoreMapNoWriteBarrier(result, iterator_map);
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, array);
StoreObjectFieldRoot(result, JSIteratorResult::kDoneOffset,
- Heap::kFalseValueRootIndex);
+ RootIndex::kFalseValue);
return result;
}
@@ -11988,12 +12435,19 @@ Node* CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
len);
}
+Node* CodeStubAssembler::InternalArrayCreate(TNode<Context> context,
+ TNode<Number> len) {
+ Node* native_context = LoadNativeContext(context);
+ Node* const constructor = LoadContextElement(
+ native_context, Context::INTERNAL_ARRAY_FUNCTION_INDEX);
+ return ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
+ len);
+}
+
Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
-
- Node* buffer_bit_field = LoadObjectField(
- buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
- return IsSetWord32<JSArrayBuffer::WasNeutered>(buffer_bit_field);
+ TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(CAST(buffer));
+ return IsSetWord32<JSArrayBuffer::WasNeuteredBit>(buffer_bit_field);
}
void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
@@ -12009,22 +12463,44 @@ void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
const char* method_name) {
- TNode<JSArrayBuffer> buffer = LoadArrayBufferViewBuffer(array_buffer_view);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array_buffer_view);
ThrowIfArrayBufferIsDetached(context, buffer, method_name);
}
-TNode<JSArrayBuffer> CodeStubAssembler::LoadArrayBufferViewBuffer(
- TNode<JSArrayBufferView> array_buffer_view) {
- return LoadObjectField<JSArrayBuffer>(array_buffer_view,
- JSArrayBufferView::kBufferOffset);
+TNode<Uint32T> CodeStubAssembler::LoadJSArrayBufferBitField(
+ TNode<JSArrayBuffer> array_buffer) {
+ return LoadObjectField<Uint32T>(array_buffer, JSArrayBuffer::kBitFieldOffset);
}
-TNode<RawPtrT> CodeStubAssembler::LoadArrayBufferBackingStore(
+TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStore(
TNode<JSArrayBuffer> array_buffer) {
return LoadObjectField<RawPtrT>(array_buffer,
JSArrayBuffer::kBackingStoreOffset);
}
+TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
+ TNode<JSArrayBufferView> array_buffer_view) {
+ return LoadObjectField<JSArrayBuffer>(array_buffer_view,
+ JSArrayBufferView::kBufferOffset);
+}
+
+TNode<UintPtrT> CodeStubAssembler::LoadJSArrayBufferViewByteLength(
+ TNode<JSArrayBufferView> array_buffer_view) {
+ return LoadObjectField<UintPtrT>(array_buffer_view,
+ JSArrayBufferView::kByteLengthOffset);
+}
+
+TNode<UintPtrT> CodeStubAssembler::LoadJSArrayBufferViewByteOffset(
+ TNode<JSArrayBufferView> array_buffer_view) {
+ return LoadObjectField<UintPtrT>(array_buffer_view,
+ JSArrayBufferView::kByteOffsetOffset);
+}
+
+TNode<Smi> CodeStubAssembler::LoadJSTypedArrayLength(
+ TNode<JSTypedArray> typed_array) {
+ return LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
+}
+
CodeStubArguments::CodeStubArguments(
CodeStubAssembler* assembler, Node* argc, Node* fp,
CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode)
@@ -12367,11 +12843,11 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreMapNoWriteBarrier(fun, map);
StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(fun, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ RootIndex::kEmptyFixedArray);
StoreObjectFieldRoot(fun, JSFunction::kFeedbackCellOffset,
- Heap::kManyClosuresCellRootIndex);
+ RootIndex::kManyClosuresCell);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
@@ -12522,7 +12998,7 @@ void CodeStubAssembler::PerformStackCheck(TNode<Context> context) {
void CodeStubAssembler::InitializeFunctionContext(Node* native_context,
Node* context, int slots) {
DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
- StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ StoreMapNoWriteBarrier(context, RootIndex::kFunctionContextMap);
StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
SmiConstant(slots));
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 38fc9717de..69ac5e27bb 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -11,6 +11,7 @@
#include "src/compiler/code-assembler.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
#include "src/roots.h"
@@ -32,8 +33,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(PromiseSpeciesProtector, promise_species_protector, \
PromiseSpeciesProtector) \
V(TypedArraySpeciesProtector, typed_array_species_protector, \
- TypedArraySpeciesProtector) \
- V(StoreHandler0Map, store_handler0_map, StoreHandler0Map)
+ TypedArraySpeciesProtector)
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
@@ -70,6 +70,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(PreParsedScopeDataMap, pre_parsed_scope_data_map, PreParsedScopeDataMap) \
V(prototype_string, prototype_string, PrototypeString) \
V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
+ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
V(SymbolMap, symbol_map, SymbolMap) \
V(TheHoleValue, the_hole_value, TheHole) \
V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
@@ -346,6 +347,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return CAST(p_o);
}
+ TNode<Context> UnsafeCastObjectToContext(TNode<Object> p_o) {
+ return CAST(p_o);
+ }
+
TNode<FixedDoubleArray> UnsafeCastObjectToFixedDoubleArray(
TNode<Object> p_o) {
return CAST(p_o);
@@ -403,6 +408,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Map> UnsafeCastObjectToMap(TNode<Object> p_o) { return CAST(p_o); }
+ TNode<JSArgumentsObjectWithLength> RawCastObjectToJSArgumentsObjectWithLength(
+ TNode<Object> p_o) {
+ return TNode<JSArgumentsObjectWithLength>::UncheckedCast(p_o);
+ }
+
Node* MatchesParameterMode(Node* value, ParameterMode mode);
#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
@@ -592,6 +602,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// if the division needs to be performed as a floating point operation.
TNode<Smi> TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor, Label* bailout);
+ // Compares two Smis a and b as if they were converted to strings and then
+ // compared lexicographically. Returns:
+ // -1 iff x < y.
+ // 0 iff x == y.
+ // 1 iff x > y.
+ TNode<Smi> SmiLexicographicCompare(TNode<Smi> x, TNode<Smi> y);
+
// Smi | HeapNumber operations.
TNode<Number> NumberInc(SloppyTNode<Number> value);
TNode<Number> NumberDec(SloppyTNode<Number> value);
@@ -765,6 +782,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
+ // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect.
+ void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true);
+
// Load value from current frame by given offset in bytes.
Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged());
// Load value from current parent frame by given offset in bytes.
@@ -817,7 +837,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load a SMI and untag it.
TNode<IntPtrT> LoadAndUntagSmi(Node* base, int index);
// Load a SMI root, untag it, and convert to Word32.
- TNode<Int32T> LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
+ TNode<Int32T> LoadAndUntagToWord32Root(RootIndex root_index);
TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
int offset) {
@@ -847,6 +867,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load the elements backing store of a JSObject.
TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object);
// Load the length of a JSArray instance.
+ TNode<Object> LoadJSArgumentsObjectWithLength(
+ SloppyTNode<JSArgumentsObjectWithLength> array);
+ // Load the length of a JSArray instance.
TNode<Number> LoadJSArrayLength(SloppyTNode<JSArray> array);
// Load the length of a fast JSArray instance. Returns a positive Smi.
TNode<Smi> LoadFastJSArrayLength(SloppyTNode<JSArray> array);
@@ -859,8 +882,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Smi> LoadWeakFixedArrayLength(TNode<WeakFixedArray> array);
TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(
SloppyTNode<WeakFixedArray> array);
- // Load the length of a JSTypedArray instance.
- TNode<Smi> LoadTypedArrayLength(TNode<JSTypedArray> typed_array);
// Load the bit field of a Map.
TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
// Load bit field 2 of a map.
@@ -892,6 +913,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadMapEnumLength(SloppyTNode<Map> map);
// Load the back-pointer of a Map.
TNode<Object> LoadMapBackPointer(SloppyTNode<Map> map);
+ // Checks that |map| has only simple properties, returns bitfield3.
+ TNode<Uint32T> EnsureOnlyHasSimpleProperties(TNode<Map> map,
+ TNode<Int32T> instance_type,
+ Label* bailout);
// Load the identity hash of a JSRececiver.
TNode<IntPtrT> LoadJSReceiverIdentityHash(SloppyTNode<Object> receiver,
Label* if_no_hash = nullptr);
@@ -912,10 +937,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Uint32T> LoadNameHash(SloppyTNode<Name> name,
Label* if_hash_not_computed = nullptr);
- // Load length field of a String object as intptr_t value.
- TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> object);
// Load length field of a String object as Smi value.
- TNode<Smi> LoadStringLengthAsSmi(SloppyTNode<String> object);
+ TNode<Smi> LoadStringLengthAsSmi(SloppyTNode<String> string);
+ // Load length field of a String object as intptr_t value.
+ TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string);
+ // Load length field of a String object as uint32_t value.
+ TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
// Loads a pointer to the sequential String char array.
Node* PointerToSeqStringData(Node* seq_string);
// Load value field of a JSValue object.
@@ -933,23 +960,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_cleared, Label* if_weak, Label* if_strong,
TVariable<Object>* extracted);
// See MaybeObject for semantics of these functions.
- TNode<BoolT> IsStrongHeapObject(TNode<MaybeObject> value);
+ TNode<BoolT> IsStrong(TNode<MaybeObject> value);
// This variant is for overzealous checking.
- TNode<BoolT> IsStrongHeapObject(TNode<Object> value) {
- return IsStrongHeapObject(ReinterpretCast<MaybeObject>(value));
+ TNode<BoolT> IsStrong(TNode<Object> value) {
+ return IsStrong(ReinterpretCast<MaybeObject>(value));
}
- TNode<HeapObject> ToStrongHeapObject(TNode<MaybeObject> value,
- Label* if_not_strong);
+ TNode<HeapObject> GetHeapObjectIfStrong(TNode<MaybeObject> value,
+ Label* if_not_strong);
- TNode<BoolT> IsWeakOrClearedHeapObject(TNode<MaybeObject> value);
- TNode<BoolT> IsClearedWeakHeapObject(TNode<MaybeObject> value);
- TNode<BoolT> IsNotClearedWeakHeapObject(TNode<MaybeObject> value);
+ TNode<BoolT> IsWeakOrCleared(TNode<MaybeObject> value);
+ TNode<BoolT> IsCleared(TNode<MaybeObject> value);
+ TNode<BoolT> IsNotCleared(TNode<MaybeObject> value);
// Removes the weak bit + asserts it was set.
- TNode<HeapObject> ToWeakHeapObject(TNode<MaybeObject> value);
+ TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value);
- TNode<HeapObject> ToWeakHeapObject(TNode<MaybeObject> value,
- Label* if_cleared);
+ TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value,
+ Label* if_cleared);
TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> object,
TNode<Object> value);
@@ -1089,6 +1116,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
Node* offset);
+ // 64-bit platforms only:
+ TNode<BigInt> BigIntFromInt64(TNode<IntPtrT> value);
+ TNode<BigInt> BigIntFromUint64(TNode<UintPtrT> value);
+ // 32-bit platforms only:
+ TNode<BigInt> BigIntFromInt32Pair(TNode<IntPtrT> low, TNode<IntPtrT> high);
+ TNode<BigInt> BigIntFromUint32Pair(TNode<UintPtrT> low, TNode<UintPtrT> high);
void StoreFixedTypedArrayElementFromTagged(
TNode<Context> context, TNode<FixedTypedArrayBase> elements,
@@ -1100,6 +1133,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
int slot_index);
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
SloppyTNode<IntPtrT> slot_index);
+ TNode<Object> LoadContextElement(TNode<Context> context,
+ TNode<Smi> slot_index);
void StoreContextElement(SloppyTNode<Context> context, int slot_index,
SloppyTNode<Object> value);
void StoreContextElement(SloppyTNode<Context> context,
@@ -1151,11 +1186,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
MachineRepresentation rep = MachineRepresentation::kTagged);
// Store the Map of an HeapObject.
Node* StoreMap(Node* object, Node* map);
- Node* StoreMapNoWriteBarrier(Node* object,
- Heap::RootListIndex map_root_index);
+ Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);
Node* StoreMapNoWriteBarrier(Node* object, Node* map);
- Node* StoreObjectFieldRoot(Node* object, int offset,
- Heap::RootListIndex root);
+ Node* StoreObjectFieldRoot(Node* object, int offset, RootIndex root);
// Store an array element to a FixedArray.
void StoreFixedArrayElement(
TNode<FixedArray> object, int index, SloppyTNode<Object> value,
@@ -1203,6 +1236,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ void StoreFixedDoubleArrayElementSmi(TNode<FixedDoubleArray> object,
+ TNode<Smi> index,
+ TNode<Float64T> value) {
+ StoreFixedDoubleArrayElement(object, index, value, SMI_PARAMETERS);
+ }
+
+ void StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array, Node* index,
+ ParameterMode mode = INTPTR_PARAMETERS);
+ void StoreFixedDoubleArrayHoleSmi(TNode<FixedDoubleArray> array,
+ TNode<Smi> index) {
+ StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS);
+ }
+
Node* StoreFeedbackVectorSlot(
Node* object, Node* index, Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
@@ -1267,47 +1313,47 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
// Allocate a SeqOneByteString with the given length.
- TNode<String> AllocateSeqOneByteString(int length,
+ TNode<String> AllocateSeqOneByteString(uint32_t length,
AllocationFlags flags = kNone);
- TNode<String> AllocateSeqOneByteString(Node* context, TNode<Smi> length,
+ TNode<String> AllocateSeqOneByteString(Node* context, TNode<Uint32T> length,
AllocationFlags flags = kNone);
// Allocate a SeqTwoByteString with the given length.
- TNode<String> AllocateSeqTwoByteString(int length,
+ TNode<String> AllocateSeqTwoByteString(uint32_t length,
AllocationFlags flags = kNone);
- TNode<String> AllocateSeqTwoByteString(Node* context, TNode<Smi> length,
+ TNode<String> AllocateSeqTwoByteString(Node* context, TNode<Uint32T> length,
AllocationFlags flags = kNone);
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- TNode<String> AllocateSlicedOneByteString(TNode<Smi> length,
+ TNode<String> AllocateSlicedOneByteString(TNode<Uint32T> length,
TNode<String> parent,
TNode<Smi> offset);
// Allocate a SlicedTwoByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- TNode<String> AllocateSlicedTwoByteString(TNode<Smi> length,
+ TNode<String> AllocateSlicedTwoByteString(TNode<Uint32T> length,
TNode<String> parent,
TNode<Smi> offset);
// Allocate a one-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be one-byte strings.
- TNode<String> AllocateOneByteConsString(TNode<Smi> length,
+ TNode<String> AllocateOneByteConsString(TNode<Uint32T> length,
TNode<String> first,
TNode<String> second,
AllocationFlags flags = kNone);
// Allocate a two-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be two-byte strings.
- TNode<String> AllocateTwoByteConsString(TNode<Smi> length,
+ TNode<String> AllocateTwoByteConsString(TNode<Uint32T> length,
TNode<String> first,
TNode<String> second,
AllocationFlags flags = kNone);
// Allocate an appropriate one- or two-byte ConsString with the first and
// second parts specified by |left| and |right|.
- TNode<String> NewConsString(Node* context, TNode<Smi> length,
- TNode<String> left, TNode<String> right,
+ TNode<String> NewConsString(TNode<Uint32T> length, TNode<String> left,
+ TNode<String> right,
AllocationFlags flags = kNone);
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
@@ -1333,7 +1379,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <typename CollectionType>
void FindOrderedHashTableEntry(
Node* table, Node* hash,
- std::function<void(Node*, Label*, Label*)> key_compare,
+ const std::function<void(Node*, Label*, Label*)>& key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found);
template <typename CollectionType>
@@ -1386,9 +1432,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
INTPTR_PARAMETERS);
}
- Node* CloneFastJSArray(Node* context, Node* array,
- ParameterMode mode = INTPTR_PARAMETERS,
- Node* allocation_site = nullptr);
+ enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
+ // Clone a fast JSArray |array| into a new fast JSArray.
+ // |convert_holes| tells the function to convert holes into undefined or not.
+ // If |convert_holes| is set to kConvertToUndefined, but the function did not
+ // find any hole in |array|, the resulting array will have the same elements
+ // kind as |array|. If the function did find a hole, it will convert holes in
+ // |array| to undefined in the resulting array, who will now have
+ // PACKED_ELEMENTS kind.
+ // If |convert_holes| is set kDontConvert, holes are also copied to the
+ // resulting array, who will have the same elements kind as |array|. The
+ // function generates significantly less code in this case.
+ Node* CloneFastJSArray(
+ Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS,
+ Node* allocation_site = nullptr,
+ HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count,
ParameterMode mode = INTPTR_PARAMETERS,
@@ -1418,7 +1476,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<FixedDoubleArray> AllocateZeroedFixedDoubleArray(
TNode<IntPtrT> capacity) {
TNode<FixedDoubleArray> result = UncheckedCast<FixedDoubleArray>(
- AllocateFixedArray(FLOAT64_ELEMENTS, capacity,
+ AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity,
AllocationFlag::kAllowLargeObjectAllocation));
FillFixedDoubleArrayWithZero(result, capacity);
return result;
@@ -1438,10 +1496,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ArraySpeciesCreate(TNode<Context> context, TNode<Object> originalArray,
TNode<Number> len);
+ Node* InternalArrayCreate(TNode<Context> context, TNode<Number> len);
void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
- Node* to_index,
- Heap::RootListIndex value_root_index,
+ Node* to_index, RootIndex value_root_index,
ParameterMode mode = INTPTR_PARAMETERS);
// Uses memset to effectively initialize the given FixedArray with zeroes.
@@ -1486,11 +1544,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Copies |element_count| elements from |from_array| starting from element
// |first_element| to |to_array| of |capacity| size respecting both array's
// elements kinds.
+ // |convert_holes| tells the function whether to convert holes to undefined.
+ // |var_holes_converted| can be used to signify that the conversion happened
+ // (i.e. that there were holes). If |convert_holes_to_undefined| is
+ // HoleConversionMode::kConvertToUndefined, then it must not be the case that
+ // IsDoubleElementsKind(to_kind).
void CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* first_element, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode mode = INTPTR_PARAMETERS);
+ ParameterMode mode = INTPTR_PARAMETERS,
+ HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
+ TVariable<BoolT>* var_holes_converted = nullptr);
void CopyFixedArrayElements(
ElementsKind from_kind, TNode<FixedArrayBase> from_array,
@@ -1507,12 +1572,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<FixedDoubleArray> HeapObjectToFixedDoubleArray(TNode<HeapObject> base,
Label* cast_fail) {
- GotoIf(WordNotEqual(LoadMap(base),
- LoadRoot(Heap::kFixedDoubleArrayMapRootIndex)),
- cast_fail);
+ GotoIf(
+ WordNotEqual(LoadMap(base), LoadRoot(RootIndex::kFixedDoubleArrayMap)),
+ cast_fail);
return UncheckedCast<FixedDoubleArray>(base);
}
+ TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
+ return UncheckedCast<Int32T>(elements_kind);
+ }
+
enum class ExtractFixedArrayFlag {
kFixedArrays = 1,
kFixedDoubleArrays = 2,
@@ -1525,8 +1594,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
typedef base::Flags<ExtractFixedArrayFlag> ExtractFixedArrayFlags;
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
- // FixedArray, including special appropriate handling for empty arrays and COW
- // arrays.
+ // array, including special appropriate handling for empty arrays and COW
+ // arrays. The result array will be of the same type as the original array.
//
// * |source| is either a FixedArray or FixedDoubleArray from which to copy
// elements.
@@ -1546,12 +1615,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// passed as the |source| parameter.
// * |parameter_mode| determines the parameter mode of |first|, |count| and
// |capacity|.
+ // * If |var_holes_converted| is given, any holes will be converted to
+ // undefined and the variable will be set according to whether or not there
+ // were any hole.
TNode<FixedArrayBase> ExtractFixedArray(
Node* source, Node* first, Node* count = nullptr,
Node* capacity = nullptr,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ TVariable<BoolT>* var_holes_converted = nullptr);
TNode<FixedArrayBase> ExtractFixedArray(
TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
@@ -1562,6 +1635,68 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SMI_PARAMETERS);
}
+ // Copy a portion of an existing FixedArray or FixedDoubleArray into a new
+ // FixedArray, including special appropriate handling for COW arrays.
+ // * |source| is either a FixedArray or FixedDoubleArray from which to copy
+ // elements. |source| is assumed to be non-empty.
+ // * |first| is the starting element index to copy from.
+ // * |count| is the number of elements to copy out of the source array
+ // starting from and including the element indexed by |start|.
+ // * |capacity| determines the size of the allocated result array, with
+ // |capacity| >= |count|.
+ // * |source_map| is the map of the |source|.
+ // * |from_kind| is the elements kind that is consistent with |source| being
+ // a FixedArray or FixedDoubleArray. This function only cares about double vs.
+ // non-double, so as to distinguish FixedDoubleArray vs. FixedArray. It does
+ // not care about holeyness. For example, when |source| is a FixedArray,
+ // PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS.
+ // * |allocation_flags| and |extract_flags| influence how the target
+ // FixedArray is allocated.
+ // * |parameter_mode| determines the parameter mode of |first|, |count| and
+ // |capacity|.
+ // * |convert_holes| is used to signify that the target array should use
+ // undefined in places of holes.
+ // * If |convert_holes| is true and |var_holes_converted| not nullptr, then
+ // |var_holes_converted| is used to signal whether any holes were found and
+ // converted. The caller should use this information to decide which map is
+ // compatible with the result array. For example, if the input was of
+ // HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be
+ // compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS.
+ TNode<FixedArray> ExtractToFixedArray(
+ Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
+ ElementsKind from_kind = PACKED_ELEMENTS,
+ AllocationFlags allocation_flags = AllocationFlag::kNone,
+ ExtractFixedArrayFlags extract_flags =
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
+ TVariable<BoolT>* var_holes_converted = nullptr);
+
+ // Attempt to copy a FixedDoubleArray to another FixedDoubleArray. In the case
+ // where the source array has a hole, produce a FixedArray instead where holes
+ // are replaced with undefined.
+ // * |source| is a FixedDoubleArray from which to copy elements.
+ // * |first| is the starting element index to copy from.
+ // * |count| is the number of elements to copy out of the source array
+ // starting from and including the element indexed by |start|.
+ // * |capacity| determines the size of the allocated result array, with
+ // |capacity| >= |count|.
+ // * |source_map| is the map of |source|. It will be used as the map of the
+ // target array if the target can stay a FixedDoubleArray. Otherwise if the
+ // target array needs to be a FixedArray, the FixedArrayMap will be used.
+ // * |var_holes_converted| is used to signal whether a FixedAray
+ // is produced or not.
+ // * |allocation_flags| and |extract_flags| influence how the target array is
+ // allocated.
+ // * |parameter_mode| determines the parameter mode of |first|, |count| and
+ // |capacity|.
+ TNode<FixedArrayBase> ExtractFixedDoubleArrayFillingHoles(
+ Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
+ TVariable<BoolT>* var_holes_converted, AllocationFlags allocation_flags,
+ ExtractFixedArrayFlags extract_flags =
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
+
// Copy the entire contents of a FixedArray or FixedDoubleArray to a new
// array, including special appropriate handling for empty arrays and COW
// arrays.
@@ -1657,6 +1792,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
+ TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value);
@@ -1736,6 +1872,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsFastJSArray(SloppyTNode<Object> object,
SloppyTNode<Context> context);
@@ -1756,6 +1893,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsHeapNumber(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsHeapNumberInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
@@ -1788,6 +1927,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
TNode<BoolT> IsMutableHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsNameInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsNativeContext(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNullOrJSReceiver(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
@@ -1804,7 +1944,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SloppyTNode<Map> map);
TNode<BoolT> IsSequentialStringInstanceType(
SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsShortExternalStringInstanceType(
+ TNode<BoolT> IsUncachedExternalStringInstanceType(
SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsCustomElementsReceiverInstanceType(
@@ -1929,8 +2069,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> StringToNumber(TNode<String> input);
// Convert a Number to a String.
TNode<String> NumberToString(TNode<Number> input);
- // Convert an object to a name.
- TNode<Name> ToName(SloppyTNode<Context> context, SloppyTNode<Object> value);
// Convert a Non-Number object to a Number.
TNode<Number> NonNumberToNumber(
SloppyTNode<Context> context, SloppyTNode<HeapObject> input,
@@ -2530,11 +2668,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Object> value,
TNode<Context> context,
Label* opt_if_neutered);
- // Part of the above, refactored out to reuse in another place
+ // Part of the above, refactored out to reuse in another place.
void EmitBigTypedArrayElementStore(TNode<FixedTypedArrayBase> elements,
TNode<RawPtrT> backing_store,
TNode<IntPtrT> offset,
TNode<BigInt> bigint_value);
+ // Implements the BigInt part of
+ // https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes,
+ // including truncation to 64 bits (i.e. modulo 2^64).
+ // {var_high} is only used on 32-bit platforms.
+ void BigIntToRawBytes(TNode<BigInt> bigint, TVariable<UintPtrT>* var_low,
+ TVariable<UintPtrT>* var_high);
void EmitElementStore(Node* object, Node* key, Node* value, bool is_jsarray,
ElementsKind elements_kind,
@@ -2553,6 +2697,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
ElementsKind to_kind, bool is_jsarray,
Label* bailout);
+ void TransitionElementsKind(TNode<JSReceiver> object, TNode<Map> map,
+ ElementsKind from_kind, ElementsKind to_kind,
+ Label* bailout) {
+ TransitionElementsKind(object, map, from_kind, to_kind, true, bailout);
+ }
+
void TrapAllocationMemento(Node* object, Label* memento_found);
TNode<IntPtrT> PageFromAddress(TNode<IntPtrT> address);
@@ -2635,7 +2785,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode mode);
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
- Node* end_offset, Heap::RootListIndex root);
+ Node* end_offset, RootIndex root);
Node* RelationalComparison(Operation op, Node* left, Node* right,
Node* context,
@@ -2644,26 +2794,32 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right,
Label* if_true, Label* if_false);
- void BranchIfNumberLessThan(Node* left, Node* right, Label* if_true,
- Label* if_false) {
+ void BranchIfNumberEqual(TNode<Number> left, TNode<Number> right,
+ Label* if_true, Label* if_false) {
+ BranchIfNumberRelationalComparison(Operation::kEqual, left, right, if_true,
+ if_false);
+ }
+
+ void BranchIfNumberLessThan(TNode<Number> left, TNode<Number> right,
+ Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kLessThan, left, right,
if_true, if_false);
}
- void BranchIfNumberLessThanOrEqual(Node* left, Node* right, Label* if_true,
- Label* if_false) {
+ void BranchIfNumberLessThanOrEqual(TNode<Number> left, TNode<Number> right,
+ Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kLessThanOrEqual, left, right,
if_true, if_false);
}
- void BranchIfNumberGreaterThan(Node* left, Node* right, Label* if_true,
- Label* if_false) {
+ void BranchIfNumberGreaterThan(TNode<Number> left, TNode<Number> right,
+ Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kGreaterThan, left, right,
if_true, if_false);
}
- void BranchIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_true,
- Label* if_false) {
+ void BranchIfNumberGreaterThanOrEqual(TNode<Number> left, TNode<Number> right,
+ Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left,
right, if_true, if_false);
}
@@ -2694,6 +2850,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SloppyTNode<Object> key,
HasPropertyLookupMode mode);
+ // Due to naming conflict with the builtin function namespace.
+ TNode<Oddball> HasProperty_Inline(TNode<Context> context,
+ TNode<JSReceiver> object,
+ TNode<Object> key) {
+ return HasProperty(context, object, key,
+ HasPropertyLookupMode::kHasProperty);
+ }
+
Node* Typeof(Node* value);
TNode<Object> GetSuperConstructor(SloppyTNode<Context> context,
@@ -2710,17 +2874,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsRuntimeCallStatsEnabled();
- // TypedArray/ArrayBuffer helpers
+ // JSArrayBuffer helpers
+ TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer);
+ TNode<RawPtrT> LoadJSArrayBufferBackingStore(
+ TNode<JSArrayBuffer> array_buffer);
Node* IsDetachedBuffer(Node* buffer);
void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
const char* method_name);
+
+ // JSArrayBufferView helpers
+ TNode<JSArrayBuffer> LoadJSArrayBufferViewBuffer(
+ TNode<JSArrayBufferView> array_buffer_view);
+ TNode<UintPtrT> LoadJSArrayBufferViewByteLength(
+ TNode<JSArrayBufferView> array_buffer_view);
+ TNode<UintPtrT> LoadJSArrayBufferViewByteOffset(
+ TNode<JSArrayBufferView> array_buffer_view);
void ThrowIfArrayBufferViewBufferIsDetached(
SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
const char* method_name);
- TNode<JSArrayBuffer> LoadArrayBufferViewBuffer(
- TNode<JSArrayBufferView> array_buffer_view);
- TNode<RawPtrT> LoadArrayBufferBackingStore(TNode<JSArrayBuffer> array_buffer);
+
+ // JSTypedArray helpers
+ TNode<Smi> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind,
ParameterMode mode, int base_size = 0);
@@ -2851,6 +3026,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Uint32T> end_descriptor,
const ForEachDescriptorBodyFunction& body);
+ typedef std::function<void(TNode<Name> key, TNode<Object> value)>
+ ForEachKeyValueFunction;
+
+ // For each JSObject property (in DescriptorArray order), check if the key is
+ // enumerable, and if so, load the value from the receiver and evaluate the
+ // closure.
+ void ForEachEnumerableOwnProperty(TNode<Context> context, TNode<Map> map,
+ TNode<JSObject> object,
+ const ForEachKeyValueFunction& body,
+ Label* bailout);
+
TNode<Object> CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout,
GetOwnPropertyMode mode = kCallJSGetter);
@@ -2889,12 +3075,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
Label* bailout);
- TNode<String> AllocateSlicedString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, TNode<String> parent,
- TNode<Smi> offset);
+ TNode<String> AllocateSlicedString(RootIndex map_root_index,
+ TNode<Uint32T> length,
+ TNode<String> parent, TNode<Smi> offset);
- TNode<String> AllocateConsString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, TNode<String> first,
+ TNode<String> AllocateConsString(RootIndex map_root_index,
+ TNode<Uint32T> length, TNode<String> first,
TNode<String> second, AllocationFlags flags);
// Allocate a MutableHeapNumber without initializing its value.
@@ -2918,7 +3104,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<String> AllocAndCopyStringCharacters(Node* from,
Node* from_instance_type,
TNode<IntPtrT> from_index,
- TNode<Smi> character_count);
+ TNode<IntPtrT> character_count);
static const int kElementLoopUnrollThreshold = 8;
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 745aa1aa24..4630fe7639 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -12,6 +12,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class CodeStubDescriptor;
class Isolate;
namespace compiler {
class CodeAssemblerState;
@@ -114,7 +115,7 @@ class CodeStub : public ZoneObject {
static const char* MajorName(Major major_key);
explicit CodeStub(Isolate* isolate) : minor_key_(0), isolate_(isolate) {}
- virtual ~CodeStub() {}
+ virtual ~CodeStub() = default;
static void GenerateStubsAheadOfTime(Isolate* isolate);
@@ -299,7 +300,9 @@ class CodeStubDescriptor {
DCHECK(!stack_parameter_count_.is_valid());
}
- void set_call_descriptor(CallInterfaceDescriptor d) { call_descriptor_ = d; }
+ void set_call_descriptor(CallInterfaceDescriptor d) {
+ call_descriptor_ = std::move(d);
+ }
CallInterfaceDescriptor call_descriptor() const { return call_descriptor_; }
int GetRegisterParameterCount() const {
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 10dfdbbd4a..198ee8f572 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -12,19 +12,17 @@
namespace v8 {
namespace internal {
-#define UNARY_MATH_FUNCTION(name, generator) \
- static UnaryMathFunctionWithIsolate fast_##name##_function = nullptr; \
- double std_##name(double x, Isolate* isolate) { return std::name(x); } \
- void init_fast_##name##_function(Isolate* isolate) { \
- if (FLAG_fast_math) fast_##name##_function = generator(isolate); \
- if (!fast_##name##_function) fast_##name##_function = std_##name; \
- } \
- void lazily_initialize_fast_##name(Isolate* isolate) { \
- if (!fast_##name##_function) init_fast_##name##_function(isolate); \
- } \
- double fast_##name(double x, Isolate* isolate) { \
- return (*fast_##name##_function)(x, isolate); \
- }
+#define UNARY_MATH_FUNCTION(name, generator) \
+ static UnaryMathFunction fast_##name##_function = nullptr; \
+ double std_##name(double x) { return std::name(x); } \
+ void init_fast_##name##_function() { \
+ if (FLAG_fast_math) fast_##name##_function = generator(); \
+ if (!fast_##name##_function) fast_##name##_function = std_##name; \
+ } \
+ void lazily_initialize_fast_##name() { \
+ if (!fast_##name##_function) init_fast_##name##_function(); \
+ } \
+ double fast_##name(double x) { return (*fast_##name##_function)(x); }
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 1b57c74447..3e07c86fc2 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -5,21 +5,19 @@
#ifndef V8_CODEGEN_H_
#define V8_CODEGEN_H_
-#include "src/globals.h"
-
namespace v8 {
namespace internal {
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
-typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate);
+typedef double (*UnaryMathFunction)(double x);
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
+UnaryMathFunction CreateSqrtFunction();
// Custom implementation of math functions.
-double fast_sqrt(double input, Isolate* isolate);
-void lazily_initialize_fast_sqrt(Isolate* isolate);
+double fast_sqrt(double input);
+void lazily_initialize_fast_sqrt();
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/collector.h b/deps/v8/src/collector.h
index a3e940663f..bfaa9d42ce 100644
--- a/deps/v8/src/collector.h
+++ b/deps/v8/src/collector.h
@@ -184,7 +184,7 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
: Collector<T, growth_factor, max_growth>(initial_capacity),
sequence_start_(kNoSequence) {}
- virtual ~SequenceCollector() {}
+ ~SequenceCollector() override = default;
void StartSequence() {
DCHECK_EQ(sequence_start_, kNoSequence);
@@ -208,7 +208,7 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
sequence_start_ = kNoSequence;
}
- virtual void Reset() {
+ void Reset() override {
sequence_start_ = kNoSequence;
this->Collector<T, growth_factor, max_growth>::Reset();
}
@@ -218,7 +218,7 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
int sequence_start_;
// Move the currently active sequence to the new chunk.
- virtual void NewChunk(int new_capacity) {
+ void NewChunk(int new_capacity) override {
if (sequence_start_ == kNoSequence) {
// Fall back on default behavior if no sequence has been started.
this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 61b83b1a18..0068c83362 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -34,8 +34,6 @@ CompilationCache::CompilationCache(Isolate* isolate)
}
}
-CompilationCache::~CompilationCache() {}
-
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
DCHECK(generation < generations_);
Handle<CompilationCacheTable> result;
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 3a4fe2e7b5..ed3f1986b6 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -213,7 +213,7 @@ class CompilationCache {
private:
explicit CompilationCache(Isolate* isolate);
- ~CompilationCache();
+ ~CompilationCache() = default;
base::HashMap* EagerOptimizingSet();
diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/compilation-statistics.h
index cb66f86532..bfd9a5c66a 100644
--- a/deps/v8/src/compilation-statistics.h
+++ b/deps/v8/src/compilation-statistics.h
@@ -24,7 +24,7 @@ struct AsPrintableStatistics {
class CompilationStatistics final : public Malloced {
public:
- CompilationStatistics() {}
+ CompilationStatistics() = default;
class BasicStats {
public:
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index aed4960119..827a2aa18d 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -21,16 +21,14 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
enum class Status {
kInitial,
- kPrepared,
- kCompiled,
- kHasErrorsToReport,
+ kReadyToFinalize,
kDone,
kFailed,
};
CompilerDispatcherJob(Type type) : type_(type), status_(Status::kInitial) {}
- virtual ~CompilerDispatcherJob() {}
+ virtual ~CompilerDispatcherJob() = default;
Type type() const { return type_; }
@@ -48,26 +46,19 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
// Return true if the next step can be run on any thread.
bool NextStepCanRunOnAnyThread() const {
- return status() == Status::kPrepared;
+ return status() == Status::kInitial;
}
// Casts to implementations.
const UnoptimizedCompileJob* AsUnoptimizedCompileJob() const;
- // Transition from kInitial to kPrepared. Must only be invoked on the
- // main thread.
- virtual void PrepareOnMainThread(Isolate* isolate) = 0;
-
- // Transition from kPrepared to kCompiled (or kReportErrors).
+ // Transition from kInitial to kReadyToFinalize.
virtual void Compile(bool on_background_thread) = 0;
- // Transition from kCompiled to kDone (or kFailed). Must only be invoked on
- // the main thread.
- virtual void FinalizeOnMainThread(Isolate* isolate) = 0;
-
- // Transition from kReportErrors to kFailed. Must only be invoked on the main
- // thread.
- virtual void ReportErrorsOnMainThread(Isolate* isolate) = 0;
+ // Transition from kReadyToFinalize to kDone (or kFailed). Must only be
+ // invoked on the main thread.
+ virtual void FinalizeOnMainThread(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) = 0;
// Free all resources. Must only be invoked on the main thread.
virtual void ResetOnMainThread(Isolate* isolate) = 0;
@@ -75,9 +66,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
// Estimate how long the next step will take using the tracer.
virtual double EstimateRuntimeOfNextStepInMs() const = 0;
- // Print short description of job. Must only be invoked on the main thread.
- virtual void ShortPrintOnMainThread() = 0;
-
protected:
void set_status(Status status) { status_ = status; }
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
index 862efda83e..ab8bc5adec 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -63,7 +63,7 @@ CompilerDispatcherTracer::CompilerDispatcherTracer(Isolate* isolate)
}
}
-CompilerDispatcherTracer::~CompilerDispatcherTracer() {}
+CompilerDispatcherTracer::~CompilerDispatcherTracer() = default;
void CompilerDispatcherTracer::RecordPrepare(double duration_ms) {
base::LockGuard<base::Mutex> lock(&mutex_);
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 6bbcefa781..6148770385 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -4,8 +4,7 @@
#include "src/compiler-dispatcher/compiler-dispatcher.h"
-#include "include/v8-platform.h"
-#include "include/v8.h"
+#include "src/ast/ast.h"
#include "src/base/platform/time.h"
#include "src/base/template-utils.h"
#include "src/cancelable-task.h"
@@ -13,6 +12,7 @@
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/compiler-dispatcher/unoptimized-compile-job.h"
#include "src/flags.h"
+#include "src/global-handles.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -22,157 +22,35 @@ namespace {
enum class ExceptionHandling { kSwallow, kThrow };
-bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
- ExceptionHandling exception_handling) {
+void FinalizeJobOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
+ Handle<SharedFunctionInfo> shared,
+ ExceptionHandling exception_handling) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherForgroundStep");
- switch (job->status()) {
- case CompilerDispatcherJob::Status::kInitial:
- job->PrepareOnMainThread(isolate);
- break;
- case CompilerDispatcherJob::Status::kPrepared:
- job->Compile(false);
- break;
- case CompilerDispatcherJob::Status::kCompiled:
- job->FinalizeOnMainThread(isolate);
- break;
- case CompilerDispatcherJob::Status::kHasErrorsToReport:
- job->ReportErrorsOnMainThread(isolate);
- break;
- case CompilerDispatcherJob::Status::kFailed:
- case CompilerDispatcherJob::Status::kDone:
- UNREACHABLE();
- }
+ DCHECK_EQ(job->status(), CompilerDispatcherJob::Status::kReadyToFinalize);
+ job->FinalizeOnMainThread(isolate, shared);
DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception());
if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) {
isolate->clear_pending_exception();
}
- return job->IsFailed();
-}
-
-void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
- DCHECK(job->NextStepCanRunOnAnyThread());
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherBackgroundStep");
- switch (job->status()) {
- case CompilerDispatcherJob::Status::kPrepared:
- job->Compile(true);
- break;
- default:
- UNREACHABLE();
- }
}
// Theoretically we get 50ms of idle time max, however it's unlikely that
// we'll get all of it so try to be a conservative.
const double kMaxIdleTimeToExpectInMs = 40;
-class MemoryPressureTask : public CancelableTask {
- public:
- MemoryPressureTask(CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher);
- ~MemoryPressureTask() override;
-
- // CancelableTask implementation.
- void RunInternal() override;
-
- private:
- CompilerDispatcher* dispatcher_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
-};
-
-MemoryPressureTask::MemoryPressureTask(CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher)
- : CancelableTask(task_manager), dispatcher_(dispatcher) {}
-
-MemoryPressureTask::~MemoryPressureTask() {}
-
-void MemoryPressureTask::RunInternal() {
- dispatcher_->AbortAll(BlockingBehavior::kDontBlock);
-}
-
} // namespace
-class CompilerDispatcher::AbortTask : public CancelableTask {
- public:
- AbortTask(CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher);
- ~AbortTask() override;
-
- // CancelableTask implementation.
- void RunInternal() override;
-
- private:
- CompilerDispatcher* dispatcher_;
-
- DISALLOW_COPY_AND_ASSIGN(AbortTask);
-};
-
-CompilerDispatcher::AbortTask::AbortTask(CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher)
- : CancelableTask(task_manager), dispatcher_(dispatcher) {}
-
-CompilerDispatcher::AbortTask::~AbortTask() {}
-
-void CompilerDispatcher::AbortTask::RunInternal() {
- dispatcher_->AbortInactiveJobs();
-}
-
-class CompilerDispatcher::WorkerTask : public CancelableTask {
- public:
- WorkerTask(CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher);
- ~WorkerTask() override;
-
- // CancelableTask implementation.
- void RunInternal() override;
-
- private:
- CompilerDispatcher* dispatcher_;
-
- DISALLOW_COPY_AND_ASSIGN(WorkerTask);
-};
-
-CompilerDispatcher::WorkerTask::WorkerTask(CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher)
- : CancelableTask(task_manager), dispatcher_(dispatcher) {}
-
-CompilerDispatcher::WorkerTask::~WorkerTask() {}
-
-void CompilerDispatcher::WorkerTask::RunInternal() {
- dispatcher_->DoBackgroundWork();
-}
-
-class CompilerDispatcher::IdleTask : public CancelableIdleTask {
- public:
- IdleTask(CancelableTaskManager* task_manager, CompilerDispatcher* dispatcher);
- ~IdleTask() override;
-
- // CancelableIdleTask implementation.
- void RunInternal(double deadline_in_seconds) override;
-
- private:
- CompilerDispatcher* dispatcher_;
-
- DISALLOW_COPY_AND_ASSIGN(IdleTask);
-};
-
-CompilerDispatcher::IdleTask::IdleTask(CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher)
- : CancelableIdleTask(task_manager), dispatcher_(dispatcher) {}
-
-CompilerDispatcher::IdleTask::~IdleTask() {}
-
-void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
- dispatcher_->DoIdleWork(deadline_in_seconds);
-}
-
CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
size_t max_stack_size)
: isolate_(isolate),
+ allocator_(isolate->allocator()),
+ worker_thread_runtime_call_stats_(
+ isolate->counters()->worker_thread_runtime_call_stats()),
+ background_compile_timer_(
+ isolate->counters()->compile_function_on_background()),
+ taskrunner_(platform->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(isolate))),
platform_(platform),
max_stack_size_(max_stack_size),
trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
@@ -201,6 +79,8 @@ CompilerDispatcher::~CompilerDispatcher() {
bool CompilerDispatcher::CanEnqueue() {
if (!IsEnabled()) return false;
+ // TODO(rmcilroy): Investigate if MemoryPressureLevel::kNone is ever sent on
+ // Android, if not, remove this check.
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
return false;
}
@@ -213,86 +93,65 @@ bool CompilerDispatcher::CanEnqueue() {
return true;
}
-bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
- if (!CanEnqueue()) return false;
-
- // We only handle functions (no eval / top-level code / native) that are
- // attached to a script.
- if (!function->script()->IsScript() || function->is_toplevel() ||
- function->native()) {
- return false;
- }
+base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
+ const ParseInfo* outer_parse_info, const AstRawString* function_name,
+ const FunctionLiteral* function_literal) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherEnqueue");
+ RuntimeCallTimerScope runtimeTimer(
+ isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
- return true;
-}
+ if (!CanEnqueue()) return base::nullopt;
-CompilerDispatcher::JobId CompilerDispatcher::Enqueue(
- std::unique_ptr<CompilerDispatcherJob> job) {
- DCHECK(!job->IsFinished());
- JobMap::const_iterator it = InsertJob(std::move(job));
- ConsiderJobForBackgroundProcessing(it->second.get());
- ScheduleIdleTaskIfNeeded();
- return it->first;
-}
-
-CompilerDispatcher::JobId CompilerDispatcher::EnqueueAndStep(
- std::unique_ptr<CompilerDispatcherJob> job) {
- DCHECK(!job->IsFinished());
+ std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
+ tracer_.get(), allocator_, outer_parse_info, function_name,
+ function_literal, worker_thread_runtime_call_stats_,
+ background_compile_timer_, max_stack_size_));
JobMap::const_iterator it = InsertJob(std::move(job));
+ JobId id = it->first;
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: stepping ");
- it->second->ShortPrintOnMainThread();
- PrintF("\n");
+ PrintF("CompilerDispatcher: enqueued job %zu for function literal id %d\n",
+ id, function_literal->function_literal_id());
}
- DoNextStepOnMainThread(isolate_, it->second.get(),
- ExceptionHandling::kSwallow);
+
+ // Post a idle task and a background worker task to perform the compilation
+ // either on the worker thread or during idle time (whichever is first).
ConsiderJobForBackgroundProcessing(it->second.get());
- RemoveIfFinished(it);
ScheduleIdleTaskIfNeeded();
- return it->first;
+ return base::make_optional(id);
}
-bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherEnqueue");
- if (!CanEnqueue(function)) return false;
- if (IsEnqueued(function)) return true;
-
- if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: enqueuing ");
- function->ShortPrint();
- PrintF(" for parse and compile\n");
- }
+bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
- std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
- isolate_, tracer_.get(), function, max_stack_size_));
- Enqueue(std::move(job));
- return true;
+bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
+ if (jobs_.empty()) return false;
+ return GetJobFor(function) != jobs_.end();
}
-bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherEnqueueAndStep");
- if (!CanEnqueue(function)) return false;
- if (IsEnqueued(function)) return true;
+bool CompilerDispatcher::IsEnqueued(JobId job_id) const {
+ return jobs_.find(job_id) != jobs_.end();
+}
+void CompilerDispatcher::RegisterSharedFunctionInfo(
+ JobId job_id, SharedFunctionInfo* function) {
+ DCHECK_NE(jobs_.find(job_id), jobs_.end());
+ DCHECK_EQ(job_id_to_shared_.find(job_id), job_id_to_shared_.end());
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: enqueuing ");
+ PrintF("CompilerDispatcher: registering ");
function->ShortPrint();
- PrintF(" for parse and compile\n");
+ PrintF(" with job id %zu\n", job_id);
}
- std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
- isolate_, tracer_.get(), function, max_stack_size_));
- EnqueueAndStep(std::move(job));
- return true;
-}
+ // Make a global handle to the function.
+ Handle<SharedFunctionInfo> function_handle =
+ isolate_->global_handles()->Create(function);
-bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
+ // Register mapping.
+ job_id_to_shared_.insert(std::make_pair(job_id, function_handle));
+ shared_to_unoptimized_job_id_.Set(function_handle, job_id);
-bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
- if (jobs_.empty()) return false;
- return GetJobFor(function) != jobs_.end();
+ // Schedule an idle task to finalize job if it is ready.
+ ScheduleIdleTaskIfNeeded();
}
void CompilerDispatcher::WaitForJobIfRunningOnBackground(
@@ -316,54 +175,41 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground(
DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
}
-bool CompilerDispatcher::FinishNow(CompilerDispatcherJob* job) {
+bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherFinishNow");
+ RuntimeCallTimerScope runtimeTimer(
+ isolate_, RuntimeCallCounterId::kCompileFinishNowOnDispatcher);
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: finishing ");
- job->ShortPrintOnMainThread();
+ function->ShortPrint();
PrintF(" now\n");
}
+
+ JobMap::const_iterator it = GetJobFor(function);
+ CHECK(it != jobs_.end());
+ CompilerDispatcherJob* job = it->second.get();
WaitForJobIfRunningOnBackground(job);
while (!job->IsFinished()) {
- DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
- }
- return !job->IsFailed();
-}
-
-bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherFinishNow");
- JobMap::const_iterator job = GetJobFor(function);
- CHECK(job != jobs_.end());
- bool result = FinishNow(job->second.get());
- RemoveIfFinished(job);
- return result;
-}
-
-void CompilerDispatcher::FinishAllNow() {
- // First finish all jobs not running in background
- for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
- CompilerDispatcherJob* job = it->second.get();
- bool is_running_in_background;
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- is_running_in_background =
- running_background_jobs_.find(job) != running_background_jobs_.end();
- pending_background_jobs_.erase(job);
- }
- if (!is_running_in_background) {
- while (!job->IsFinished()) {
- DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
+ switch (job->status()) {
+ case CompilerDispatcherJob::Status::kInitial:
+ job->Compile(false);
+ break;
+ case CompilerDispatcherJob::Status::kReadyToFinalize: {
+ FinalizeJobOnMainThread(isolate_, job, function,
+ ExceptionHandling::kThrow);
+ break;
}
- it = RemoveIfFinished(it);
- } else {
- ++it;
+ case CompilerDispatcherJob::Status::kFailed:
+ case CompilerDispatcherJob::Status::kDone:
+ UNREACHABLE();
}
}
- // Potentially wait for jobs that were running in background
- for (auto it = jobs_.cbegin(); it != jobs_.cend();
- it = RemoveIfFinished(it)) {
- FinishNow(it->second.get());
- }
+ DCHECK_EQ(job->IsFailed(), isolate_->has_pending_exception());
+ DCHECK(job->IsFinished());
+ bool result = !job->IsFailed();
+ RemoveJob(it);
+ return result;
}
void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
@@ -373,9 +219,7 @@ void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
for (auto& it : jobs_) {
WaitForJobIfRunningOnBackground(it.second.get());
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: aborted ");
- it.second->ShortPrintOnMainThread();
- PrintF("\n");
+ PrintF("CompilerDispatcher: aborted job %zu\n", it.first);
}
it.second->ResetOnMainThread(isolate_);
}
@@ -394,6 +238,7 @@ void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
base::LockGuard<base::Mutex> lock(&mutex_);
abort_ = true;
pending_background_jobs_.clear();
+ idle_task_scheduled_ = false; // Idle task cancelled by TryAbortAll.
}
AbortInactiveJobs();
@@ -421,9 +266,7 @@ void CompilerDispatcher::AbortInactiveJobs() {
}
}
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: aborted ");
- job->second->ShortPrintOnMainThread();
- PrintF("\n");
+ PrintF("CompilerDispatcher: aborted job %zu\n", job->first);
}
it = RemoveJob(job);
}
@@ -458,9 +301,9 @@ void CompilerDispatcher::MemoryPressureNotification(
abort_ = true;
pending_background_jobs_.clear();
}
- platform_->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate_),
- new MemoryPressureTask(task_manager_.get(), this));
+ taskrunner_->PostTask(MakeCancelableLambdaTask(task_manager_.get(), [this] {
+ AbortAll(BlockingBehavior::kDontBlock);
+ }));
}
}
@@ -470,22 +313,20 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
JobMap::const_iterator job = jobs_.end();
if (job_id_ptr) {
job = jobs_.find(*job_id_ptr);
- DCHECK(job == jobs_.end() ||
- job->second->AsUnoptimizedCompileJob()->IsAssociatedWith(shared));
}
return job;
}
void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- if (!platform_->IdleTasksEnabled(v8_isolate)) return;
+ if (!taskrunner_->IdleTasksEnabled()) return;
{
base::LockGuard<base::Mutex> lock(&mutex_);
- if (idle_task_scheduled_) return;
+ if (idle_task_scheduled_ || abort_) return;
idle_task_scheduled_ = true;
}
- platform_->CallIdleOnForegroundThread(
- v8_isolate, new IdleTask(task_manager_.get(), this));
+ taskrunner_->PostIdleTask(MakeCancelableIdleLambdaTask(
+ task_manager_.get(),
+ [this](double deadline_in_seconds) { DoIdleWork(deadline_in_seconds); }));
}
void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
@@ -494,9 +335,8 @@ void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
}
void CompilerDispatcher::ScheduleAbortTask() {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- platform_->CallOnForegroundThread(v8_isolate,
- new AbortTask(task_manager_.get(), this));
+ taskrunner_->PostTask(MakeCancelableLambdaTask(
+ task_manager_.get(), [this] { AbortInactiveJobs(); }));
}
void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
@@ -520,11 +360,13 @@ void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
}
++num_worker_tasks_;
}
- platform_->CallOnWorkerThread(
- base::make_unique<WorkerTask>(task_manager_.get(), this));
+ platform_->CallOnWorkerThread(MakeCancelableLambdaTask(
+ task_manager_.get(), [this] { DoBackgroundWork(); }));
}
void CompilerDispatcher::DoBackgroundWork() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherDoBackgroundWork");
for (;;) {
CompilerDispatcherJob* job = nullptr;
{
@@ -547,7 +389,10 @@ void CompilerDispatcher::DoBackgroundWork() {
PrintF("CompilerDispatcher: doing background work\n");
}
- DoNextStepOnBackgroundThread(job);
+ DCHECK(job->NextStepCanRunOnAnyThread());
+ DCHECK_EQ(job->status(), CompilerDispatcherJob::Status::kInitial);
+ job->Compile(true);
+
// Unconditionally schedule an idle task, as all background steps have to be
// followed by a main thread step.
ScheduleIdleTaskFromAnyThread();
@@ -579,6 +424,8 @@ void CompilerDispatcher::DoBackgroundWork() {
}
void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompilerDispatcherDoIdleWork");
bool aborted = false;
{
base::LockGuard<base::Mutex> lock(&mutex_);
@@ -593,11 +440,11 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
// Number of jobs that are unlikely to make progress during any idle callback
// due to their estimated duration.
- size_t too_long_jobs = 0;
+ size_t jobs_unlikely_to_progress = 0;
// Iterate over all available jobs & remaining time. For each job, decide
// whether to 1) skip it (if it would take too long), 2) erase it (if it's
- // finished), or 3) make progress on it.
+ // finished), or 3) make progress on it if possible.
double idle_time_in_seconds =
deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
@@ -620,6 +467,7 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
++job;
continue;
}
+ DCHECK(!job->second->IsFinished());
auto it = pending_background_jobs_.find(job->second.get());
double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
if (idle_time_in_seconds <
@@ -628,29 +476,44 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
// If there's not enough time left, try to estimate whether we would
// have managed to finish the job in a large idle task to assess
// whether we should ask for another idle callback.
- if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
+ // TODO(rmcilroy): Consider running the job anyway when we have a long
+ // idle time since this would probably be the best time to run.
+ if (estimate_in_ms > kMaxIdleTimeToExpectInMs)
+ ++jobs_unlikely_to_progress;
if (it == pending_background_jobs_.end()) {
lock.reset();
ConsiderJobForBackgroundProcessing(job->second.get());
}
++job;
- } else if (job->second->IsFinished()) {
- DCHECK(it == pending_background_jobs_.end());
- lock.reset();
- job = RemoveJob(job);
- continue;
- } else {
- // Do one step, and keep processing the job (as we don't advance the
- // iterator).
+ } else if (job->second->status() ==
+ CompilerDispatcherJob::Status::kInitial) {
if (it != pending_background_jobs_.end()) {
pending_background_jobs_.erase(it);
}
lock.reset();
- DoNextStepOnMainThread(isolate_, job->second.get(),
- ExceptionHandling::kSwallow);
+ job->second->Compile(false);
+ // Don't update job so we can immediately finalize it on the next loop.
+ } else {
+ DCHECK_EQ(job->second->status(),
+ CompilerDispatcherJob::Status::kReadyToFinalize);
+ DCHECK(it == pending_background_jobs_.end());
+ lock.reset();
+
+ auto shared_it = job_id_to_shared_.find(job->first);
+ if (shared_it != job_id_to_shared_.end()) {
+ Handle<SharedFunctionInfo> shared = shared_it->second;
+ FinalizeJobOnMainThread(isolate_, job->second.get(), shared,
+ ExceptionHandling::kSwallow);
+ DCHECK(job->second->IsFinished());
+ job = RemoveJob(job);
+ } else {
+ // If we can't step the job yet, go to the next job.
+ ++jobs_unlikely_to_progress;
+ ++job;
+ }
}
}
- if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
+ if (jobs_.size() > jobs_unlikely_to_progress) ScheduleIdleTaskIfNeeded();
}
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
@@ -661,9 +524,8 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
if (trace_compiler_dispatcher_) {
bool result = !job->second->IsFailed();
- PrintF("CompilerDispatcher: finished working on ");
- job->second->ShortPrintOnMainThread();
- PrintF(": %s\n", result ? "success" : "failure");
+ PrintF("CompilerDispatcher: finished working on job %zu: %s\n", job->first,
+ result ? "success" : "failure");
tracer_->DumpStatistics();
}
@@ -677,44 +539,34 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
std::tie(it, added) =
jobs_.insert(std::make_pair(next_job_id_++, std::move(job)));
DCHECK(added);
-
- JobId id = it->first;
- CompilerDispatcherJob* inserted_job = it->second.get();
-
- // Maps unoptimized jobs' SFIs to their job id.
- if (inserted_job->type() ==
- CompilerDispatcherJob::Type::kUnoptimizedCompile) {
- Handle<SharedFunctionInfo> shared =
- inserted_job->AsUnoptimizedCompileJob()->shared();
- if (!shared.is_null()) {
- shared_to_unoptimized_job_id_.Set(shared, id);
- }
- }
-
return it;
}
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
CompilerDispatcher::JobMap::const_iterator it) {
CompilerDispatcherJob* job = it->second.get();
- job->ResetOnMainThread(isolate_);
- // Unmaps unoptimized jobs' SFIs to their job id.
- if (job->type() == CompilerDispatcherJob::Type::kUnoptimizedCompile) {
- Handle<SharedFunctionInfo> shared =
- job->AsUnoptimizedCompileJob()->shared();
- if (!shared.is_null()) {
- JobId deleted_id;
- shared_to_unoptimized_job_id_.Delete(shared, &deleted_id);
- DCHECK_EQ(it->first, deleted_id);
- }
+ // Delete SFI associated with job if its been registered.
+ auto shared_it = job_id_to_shared_.find(it->first);
+ if (shared_it != job_id_to_shared_.end()) {
+ Handle<SharedFunctionInfo> shared = shared_it->second;
+
+ JobId deleted_id;
+ shared_to_unoptimized_job_id_.Delete(shared, &deleted_id);
+ DCHECK_EQ(it->first, deleted_id);
+
+ job_id_to_shared_.erase(shared_it);
+ GlobalHandles::Destroy(Handle<Object>::cast(shared).location());
}
+ job->ResetOnMainThread(isolate_);
+
it = jobs_.erase(it);
if (jobs_.empty()) {
base::LockGuard<base::Mutex> lock(&mutex_);
if (num_worker_tasks_ == 0) abort_ = false;
}
+
return it;
}
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index d7b2dc802f..dd024e297a 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -13,6 +13,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
@@ -27,6 +28,7 @@ enum class MemoryPressureLevel;
namespace internal {
+class AstRawString;
class AstValueFactory;
class CancelableTaskManager;
class CompilerDispatcherJob;
@@ -37,6 +39,8 @@ class FunctionLiteral;
class Isolate;
class ParseInfo;
class SharedFunctionInfo;
+class TimedHistogram;
+class WorkerThreadRuntimeCallStats;
class Zone;
template <typename T>
@@ -79,24 +83,23 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// Returns true if the compiler dispatcher is enabled.
bool IsEnabled() const;
- // Enqueue a job for parse and compile. Returns true if a job was enqueued.
- bool Enqueue(Handle<SharedFunctionInfo> function);
+ base::Optional<JobId> Enqueue(const ParseInfo* outer_parse_info,
+ const AstRawString* function_name,
+ const FunctionLiteral* function_literal);
- // Like Enqueue, but also advances the job so that it can potentially
- // continue running on a background thread (if at all possible). Returns
- // true if the job was enqueued.
- bool EnqueueAndStep(Handle<SharedFunctionInfo> function);
+ // Registers the given |function| with the compilation job |job_id|.
+ void RegisterSharedFunctionInfo(JobId job_id, SharedFunctionInfo* function);
- // Returns true if there is a pending job for the given function.
+ // Returns true if there is a pending job with the given id.
+ bool IsEnqueued(JobId job_id) const;
+
+ // Returns true if there is a pending job registered for the given function.
bool IsEnqueued(Handle<SharedFunctionInfo> function) const;
// Blocks until the given function is compiled (and does so as fast as
// possible). Returns true if the compile job was successful.
bool FinishNow(Handle<SharedFunctionInfo> function);
- // Blocks until all jobs are finished.
- void FinishAllNow();
-
// Aborts a given job. Blocks if requested.
void Abort(Handle<SharedFunctionInfo> function, BlockingBehavior blocking);
@@ -124,15 +127,15 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread);
typedef std::map<JobId, std::unique_ptr<CompilerDispatcherJob>> JobMap;
+ typedef std::map<JobId, Handle<SharedFunctionInfo>> JobIdToSharedMap;
typedef IdentityMap<JobId, FreeStoreAllocationPolicy> SharedToJobIdMap;
class AbortTask;
class WorkerTask;
class IdleTask;
+ bool CanEnqueue();
void WaitForJobIfRunningOnBackground(CompilerDispatcherJob* job);
void AbortInactiveJobs();
- bool CanEnqueue();
- bool CanEnqueue(Handle<SharedFunctionInfo> function);
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
void ConsiderJobForBackgroundProcessing(CompilerDispatcherJob* job);
void ScheduleMoreWorkerTasksIfNeeded();
@@ -141,17 +144,18 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
void ScheduleAbortTask();
void DoBackgroundWork();
void DoIdleWork(double deadline_in_seconds);
- JobId Enqueue(std::unique_ptr<CompilerDispatcherJob> job);
- JobId EnqueueAndStep(std::unique_ptr<CompilerDispatcherJob> job);
// Returns job if not removed otherwise iterator following the removed job.
JobMap::const_iterator RemoveIfFinished(JobMap::const_iterator job);
// Returns iterator to the inserted job.
JobMap::const_iterator InsertJob(std::unique_ptr<CompilerDispatcherJob> job);
// Returns iterator following the removed job.
JobMap::const_iterator RemoveJob(JobMap::const_iterator job);
- bool FinishNow(CompilerDispatcherJob* job);
Isolate* isolate_;
+ AccountingAllocator* allocator_;
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
+ TimedHistogram* background_compile_timer_;
+ std::shared_ptr<v8::TaskRunner> taskrunner_;
Platform* platform_;
size_t max_stack_size_;
@@ -168,6 +172,9 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// Mapping from job_id to job.
JobMap jobs_;
+ // Mapping from job_id to SharedFunctionInfo.
+ JobIdToSharedMap job_id_to_shared_;
+
// Mapping from SharedFunctionInfo to the corresponding unoptimized
// compilation's JobId;
SharedToJobIdMap shared_to_unoptimized_job_id_;
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 47b2181a88..492e80abe0 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -41,12 +41,16 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
public:
explicit CompileTask(Isolate* isolate,
OptimizingCompileDispatcher* dispatcher)
- : CancelableTask(isolate), isolate_(isolate), dispatcher_(dispatcher) {
+ : CancelableTask(isolate),
+ isolate_(isolate),
+ worker_thread_runtime_call_stats_(
+ isolate->counters()->worker_thread_runtime_call_stats()),
+ dispatcher_(dispatcher) {
base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
++dispatcher_->ref_count_;
}
- virtual ~CompileTask() {}
+ ~CompileTask() override = default;
private:
// v8::Task overrides.
@@ -56,8 +60,13 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
DisallowHandleDereference no_deref;
{
- TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
+ WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
+ worker_thread_runtime_call_stats_);
+ RuntimeCallTimerScope runtimeTimer(
+ runtime_call_stats_scope.Get(),
+ RuntimeCallCounterId::kRecompileConcurrent);
+ TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileConcurrent");
@@ -77,6 +86,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
}
Isolate* isolate_;
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
OptimizingCompileDispatcher* dispatcher_;
DISALLOW_COPY_AND_ASSIGN(CompileTask);
@@ -210,7 +220,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
DisposeCompilationJob(job, false);
} else {
- Compiler::FinalizeCompilationJob(job, isolate_);
+ Compiler::FinalizeOptimizedCompilationJob(job, isolate_);
}
}
}
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
index 2e8065ed11..59f4c3e8ff 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
@@ -5,11 +5,9 @@
#include "src/compiler-dispatcher/unoptimized-compile-job.h"
#include "src/assert-scope.h"
-#include "src/base/optional.h"
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/compiler.h"
#include "src/flags.h"
-#include "src/global-handles.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -23,329 +21,51 @@
namespace v8 {
namespace internal {
-namespace {
-
-class OneByteWrapper : public v8::String::ExternalOneByteStringResource {
- public:
- OneByteWrapper(const void* data, int length) : data_(data), length_(length) {}
- ~OneByteWrapper() override = default;
-
- const char* data() const override {
- return reinterpret_cast<const char*>(data_);
- }
-
- size_t length() const override { return static_cast<size_t>(length_); }
-
- private:
- const void* data_;
- int length_;
-
- DISALLOW_COPY_AND_ASSIGN(OneByteWrapper);
-};
-
-class TwoByteWrapper : public v8::String::ExternalStringResource {
- public:
- TwoByteWrapper(const void* data, int length) : data_(data), length_(length) {}
- ~TwoByteWrapper() override = default;
-
- const uint16_t* data() const override {
- return reinterpret_cast<const uint16_t*>(data_);
- }
-
- size_t length() const override { return static_cast<size_t>(length_); }
-
- private:
- const void* data_;
- int length_;
-
- DISALLOW_COPY_AND_ASSIGN(TwoByteWrapper);
-};
-
-} // namespace
-
-UnoptimizedCompileJob::UnoptimizedCompileJob(Isolate* isolate,
- CompilerDispatcherTracer* tracer,
- Handle<SharedFunctionInfo> shared,
- size_t max_stack_size)
+UnoptimizedCompileJob::UnoptimizedCompileJob(
+ CompilerDispatcherTracer* tracer, AccountingAllocator* allocator,
+ const ParseInfo* outer_parse_info, const AstRawString* function_name,
+ const FunctionLiteral* function_literal,
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
+ TimedHistogram* timer, size_t max_stack_size)
: CompilerDispatcherJob(Type::kUnoptimizedCompile),
- main_thread_id_(isolate->thread_id().ToInteger()),
tracer_(tracer),
- allocator_(isolate->allocator()),
- context_(isolate->global_handles()->Create(isolate->context())),
- shared_(isolate->global_handles()->Create(*shared)),
- max_stack_size_(max_stack_size),
- trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
- DCHECK(!shared_->is_toplevel());
- // TODO(rmcilroy): Handle functions with non-empty outer scope info.
- DCHECK(!shared_->HasOuterScopeInfo());
- HandleScope scope(isolate);
- Handle<Script> script(Script::cast(shared_->script()), isolate);
- Handle<String> source(String::cast(script->source()), isolate);
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p] created for ", static_cast<void*>(this));
- ShortPrintOnMainThread();
- PrintF(" in initial state.\n");
- }
-}
+ task_(new BackgroundCompileTask(allocator, outer_parse_info,
+ function_name, function_literal,
+ worker_thread_runtime_stats, timer,
+ static_cast<int>(max_stack_size))) {}
UnoptimizedCompileJob::~UnoptimizedCompileJob() {
DCHECK(status() == Status::kInitial || status() == Status::kDone);
- if (!shared_.is_null()) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- i::GlobalHandles::Destroy(Handle<Object>::cast(shared_).location());
- }
- if (!context_.is_null()) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- i::GlobalHandles::Destroy(Handle<Object>::cast(context_).location());
- }
-}
-
-bool UnoptimizedCompileJob::IsAssociatedWith(
- Handle<SharedFunctionInfo> shared) const {
- return *shared_ == *shared;
-}
-
-void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK_EQ(status(), Status::kInitial);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepare);
-
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Preparing to parse\n",
- static_cast<void*>(this));
- }
-
- ParseInfo* parse_info = new ParseInfo(isolate, shared_);
- parse_info_.reset(parse_info);
-
- unicode_cache_.reset(new UnicodeCache());
- parse_info_->set_unicode_cache(unicode_cache_.get());
- parse_info_->set_function_literal_id(shared_->FunctionLiteralId(isolate));
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- parse_info_->set_runtime_call_stats(new (parse_info_->zone())
- RuntimeCallStats());
- }
-
- Handle<Script> script = parse_info->script();
- HandleScope scope(isolate);
-
- DCHECK(script->type() != Script::TYPE_NATIVE);
- Handle<String> source(String::cast(script->source()), isolate);
- if (source->IsExternalTwoByteString() || source->IsExternalOneByteString()) {
- std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
- isolate, source, shared_->StartPosition(), shared_->EndPosition()));
- parse_info_->set_character_stream(std::move(stream));
- } else {
- source = String::Flatten(isolate, source);
- const void* data;
- int offset = 0;
- int length = source->length();
-
- // Objects in lo_space don't move, so we can just read the contents from
- // any thread.
- if (isolate->heap()->lo_space()->Contains(*source)) {
- // We need to globalize the handle to the flattened string here, in
- // case it's not referenced from anywhere else.
- source_ = isolate->global_handles()->Create(*source);
- DisallowHeapAllocation no_allocation;
- String::FlatContent content = source->GetFlatContent();
- DCHECK(content.IsFlat());
- data =
- content.IsOneByte()
- ? reinterpret_cast<const void*>(content.ToOneByteVector().start())
- : reinterpret_cast<const void*>(content.ToUC16Vector().start());
- } else {
- // Otherwise, create a copy of the part of the string we'll parse in the
- // zone.
- length = (shared_->EndPosition() - shared_->StartPosition());
- offset = shared_->StartPosition();
-
- int byte_len = length * (source->IsOneByteRepresentation() ? 1 : 2);
- data = parse_info_->zone()->New(byte_len);
-
- DisallowHeapAllocation no_allocation;
- String::FlatContent content = source->GetFlatContent();
- DCHECK(content.IsFlat());
- if (content.IsOneByte()) {
- MemCopy(const_cast<void*>(data),
- &content.ToOneByteVector().at(shared_->StartPosition()),
- byte_len);
- } else {
- MemCopy(const_cast<void*>(data),
- &content.ToUC16Vector().at(shared_->StartPosition()), byte_len);
- }
- }
- Handle<String> wrapper;
- if (source->IsOneByteRepresentation()) {
- ExternalOneByteString::Resource* resource =
- new OneByteWrapper(data, length);
- wrapper = isolate->factory()
- ->NewExternalStringFromOneByte(resource)
- .ToHandleChecked();
- } else {
- ExternalTwoByteString::Resource* resource =
- new TwoByteWrapper(data, length);
- wrapper = isolate->factory()
- ->NewExternalStringFromTwoByte(resource)
- .ToHandleChecked();
- }
- wrapper_ = isolate->global_handles()->Create(*wrapper);
- std::unique_ptr<Utf16CharacterStream> stream(
- ScannerStream::For(isolate, wrapper_, shared_->StartPosition() - offset,
- shared_->EndPosition() - offset));
- parse_info_->set_character_stream(std::move(stream));
- }
-
- parser_.reset(new Parser(parse_info_.get()));
- parser_->DeserializeScopeChain(isolate, parse_info_.get(),
- parse_info_->maybe_outer_scope_info());
-
- // Initailize the name after setting up the ast_value_factory.
- Handle<String> name(shared_->Name(), isolate);
- parse_info_->set_function_name(
- parse_info_->ast_value_factory()->GetString(name));
-
- set_status(Status::kPrepared);
}
void UnoptimizedCompileJob::Compile(bool on_background_thread) {
- DCHECK_EQ(status(), Status::kPrepared);
+ DCHECK_EQ(status(), Status::kInitial);
COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
tracer_, kCompile,
- parse_info_->end_position() - parse_info_->start_position());
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Compiling\n", static_cast<void*>(this));
- }
-
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- parse_info_->set_on_background_thread(on_background_thread);
- uintptr_t stack_limit = GetCurrentStackPosition() - max_stack_size_ * KB;
- parser_->set_stack_limit(stack_limit);
- parse_info_->set_stack_limit(stack_limit);
- parser_->ParseOnBackground(parse_info_.get());
-
- if (parse_info_->literal() == nullptr) {
- // Parser sets error in pending error handler.
- set_status(Status::kHasErrorsToReport);
- return;
- }
-
- if (!Compiler::Analyze(parse_info_.get())) {
- parse_info_->pending_error_handler()->set_stack_overflow();
- set_status(Status::kHasErrorsToReport);
- return;
- }
-
- compilation_job_.reset(interpreter::Interpreter::NewCompilationJob(
- parse_info_.get(), parse_info_->literal(), allocator_, nullptr));
-
- if (!compilation_job_.get()) {
- parse_info_->pending_error_handler()->set_stack_overflow();
- set_status(Status::kHasErrorsToReport);
- return;
- }
-
- if (compilation_job_->ExecuteJob() != CompilationJob::SUCCEEDED) {
- parse_info_->pending_error_handler()->set_stack_overflow();
- set_status(Status::kHasErrorsToReport);
- return;
- }
-
- set_status(Status::kCompiled);
+ task_->info()->end_position() - task_->info()->start_position());
+ task_->Run();
+ set_status(Status::kReadyToFinalize);
}
-void UnoptimizedCompileJob::FinalizeOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK_EQ(status(), Status::kCompiled);
- DCHECK_NOT_NULL(parse_info_->literal());
- DCHECK_NOT_NULL(compilation_job_.get());
+void UnoptimizedCompileJob::FinalizeOnMainThread(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
+ DCHECK_EQ(ThreadId::Current().ToInteger(), isolate->thread_id().ToInteger());
+ DCHECK_EQ(status(), Status::kReadyToFinalize);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalize);
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Finalizing compiling\n",
- static_cast<void*>(this));
- }
- Handle<Script> script(Script::cast(shared_->script()), isolate);
- DCHECK_EQ(*parse_info_->script(), shared_->script());
-
- parser_->UpdateStatistics(isolate, script);
- parse_info_->UpdateBackgroundParseStatisticsOnMainThread(isolate);
- parser_->HandleSourceURLComments(isolate, script);
-
- {
- HandleScope scope(isolate);
- // Internalize ast values onto the heap.
- parse_info_->ast_value_factory()->Internalize(isolate);
- // Allocate scope infos for the literal.
- DeclarationScope::AllocateScopeInfos(parse_info_.get(), isolate);
- if (compilation_job_->state() == CompilationJob::State::kFailed ||
- !Compiler::FinalizeCompilationJob(compilation_job_.release(), shared_,
- isolate)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- set_status(Status::kFailed);
- return;
- }
- }
-
- ResetDataOnMainThread(isolate);
- set_status(Status::kDone);
-}
-
-void UnoptimizedCompileJob::ReportErrorsOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- DCHECK_EQ(status(), Status::kHasErrorsToReport);
-
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Reporting Errors\n",
- static_cast<void*>(this));
- }
-
- // Ensure we report errors in the correct context for the job.
- SaveContext save(isolate);
- isolate->set_context(context());
-
- Handle<Script> script(Script::cast(shared_->script()), isolate);
- parse_info_->pending_error_handler()->ReportErrors(
- isolate, script, parse_info_->ast_value_factory());
+ bool succeeded = Compiler::FinalizeBackgroundCompileTask(
+ task_.get(), shared, isolate, Compiler::KEEP_EXCEPTION);
ResetDataOnMainThread(isolate);
- set_status(Status::kFailed);
+ set_status(succeeded ? Status::kDone : Status::kFailed);
}
void UnoptimizedCompileJob::ResetDataOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
-
- compilation_job_.reset();
- parser_.reset();
- unicode_cache_.reset();
- parse_info_.reset();
-
- if (!source_.is_null()) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
- source_ = Handle<String>::null();
- }
- if (!wrapper_.is_null()) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK_EQ(isolate->thread_id().ToInteger(), main_thread_id_);
- i::GlobalHandles::Destroy(Handle<Object>::cast(wrapper_).location());
- wrapper_ = Handle<String>::null();
- }
+ DCHECK_EQ(ThreadId::Current().ToInteger(), isolate->thread_id().ToInteger());
+ task_.reset();
}
void UnoptimizedCompileJob::ResetOnMainThread(Isolate* isolate) {
- if (trace_compiler_dispatcher_jobs_) {
- PrintF("UnoptimizedCompileJob[%p]: Resetting\n", static_cast<void*>(this));
- }
-
ResetDataOnMainThread(isolate);
set_status(Status::kInitial);
}
@@ -353,14 +73,12 @@ void UnoptimizedCompileJob::ResetOnMainThread(Isolate* isolate) {
double UnoptimizedCompileJob::EstimateRuntimeOfNextStepInMs() const {
switch (status()) {
case Status::kInitial:
- return tracer_->EstimatePrepareInMs();
- case Status::kPrepared:
- return tracer_->EstimateCompileInMs(parse_info_->end_position() -
- parse_info_->start_position());
- case Status::kCompiled:
+ return tracer_->EstimateCompileInMs(task_->info()->end_position() -
+ task_->info()->start_position());
+ case Status::kReadyToFinalize:
+ // TODO(rmcilroy): Pass size of bytecode to tracer to get better estimate.
return tracer_->EstimateFinalizeInMs();
- case Status::kHasErrorsToReport:
case Status::kFailed:
case Status::kDone:
return 0.0;
@@ -369,11 +87,5 @@ double UnoptimizedCompileJob::EstimateRuntimeOfNextStepInMs() const {
UNREACHABLE();
}
-void UnoptimizedCompileJob::ShortPrintOnMainThread() {
- DCHECK_EQ(ThreadId::Current().ToInteger(), main_thread_id_);
- DCHECK(!shared_.is_null());
- shared_->ShortPrint();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
index 3e08388ca0..31a66e4eb4 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
@@ -15,8 +15,11 @@
namespace v8 {
namespace internal {
+class AccountingAllocator;
+class AstRawString;
class AstValueFactory;
class AstStringConstants;
+class BackgroundCompileTask;
class CompilerDispatcherTracer;
class DeferredHandles;
class FunctionLiteral;
@@ -25,32 +28,30 @@ class ParseInfo;
class Parser;
class SharedFunctionInfo;
class String;
+class TimedHistogram;
class UnicodeCache;
class UnoptimizedCompilationJob;
-class Utf16CharacterStream;
+class WorkerThreadRuntimeCallStats;
+// TODO(rmcilroy): Remove this class entirely and just have CompilerDispatcher
+// manage BackgroundCompileTasks.
class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
public:
// Creates a UnoptimizedCompileJob in the initial state.
- UnoptimizedCompileJob(Isolate* isolate, CompilerDispatcherTracer* tracer,
- Handle<SharedFunctionInfo> shared,
- size_t max_stack_size);
+ UnoptimizedCompileJob(
+ CompilerDispatcherTracer* tracer, AccountingAllocator* allocator,
+ const ParseInfo* outer_parse_info, const AstRawString* function_name,
+ const FunctionLiteral* function_literal,
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
+ TimedHistogram* timer, size_t max_stack_size);
~UnoptimizedCompileJob() override;
- Handle<SharedFunctionInfo> shared() const { return shared_; }
-
- // Returns true if this UnoptimizedCompileJob was created for the given
- // function.
- bool IsAssociatedWith(Handle<SharedFunctionInfo> shared) const;
-
// CompilerDispatcherJob implementation.
- void PrepareOnMainThread(Isolate* isolate) override;
void Compile(bool on_background_thread) override;
- void FinalizeOnMainThread(Isolate* isolate) override;
- void ReportErrorsOnMainThread(Isolate* isolate) override;
+ void FinalizeOnMainThread(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) override;
void ResetOnMainThread(Isolate* isolate) override;
double EstimateRuntimeOfNextStepInMs() const override;
- void ShortPrintOnMainThread() override;
private:
friend class CompilerDispatcherTest;
@@ -58,26 +59,8 @@ class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
void ResetDataOnMainThread(Isolate* isolate);
- Context* context() { return *context_; }
-
- int main_thread_id_;
CompilerDispatcherTracer* tracer_;
- AccountingAllocator* allocator_;
- Handle<Context> context_; // Global handle.
- Handle<SharedFunctionInfo> shared_; // Global handle.
- Handle<String> source_; // Global handle.
- Handle<String> wrapper_; // Global handle.
- size_t max_stack_size_;
-
- // Members required for parsing.
- std::unique_ptr<UnicodeCache> unicode_cache_;
- std::unique_ptr<ParseInfo> parse_info_;
- std::unique_ptr<Parser> parser_;
-
- // Members required for compiling.
- std::unique_ptr<UnoptimizedCompilationJob> compilation_job_;
-
- bool trace_compiler_dispatcher_jobs_;
+ std::unique_ptr<BackgroundCompileTask> task_;
DISALLOW_COPY_AND_ASSIGN(UnoptimizedCompileJob);
};
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 58c1099135..7cb8a45696 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -124,6 +124,14 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
shared->DebugName()));
}
+ScriptOriginOptions OriginOptionsForEval(Object* script) {
+ if (!script->IsScript()) return ScriptOriginOptions();
+
+ const auto outer_origin_options = Script::cast(script)->origin_options();
+ return ScriptOriginOptions(outer_origin_options.IsSharedCrossOrigin(),
+ outer_origin_options.IsOpaque());
+}
+
} // namespace
// ----------------------------------------------------------------------------
@@ -765,61 +773,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
-CompilationJob::Status FinalizeOptimizedCompilationJob(
- OptimizedCompilationJob* job, Isolate* isolate) {
- OptimizedCompilationInfo* compilation_info = job->compilation_info();
-
- TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kRecompileSynchronous);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.RecompileSynchronous");
-
- Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
-
- // Reset profiler ticks, function is no longer considered hot.
- compilation_info->closure()->feedback_vector()->set_profiler_ticks(0);
-
- DCHECK(!shared->HasBreakInfo());
-
- // 1) Optimization on the concurrent thread may have failed.
- // 2) The function may have already been optimized by OSR. Simply continue.
- // Except when OSR already disabled optimization for some reason.
- // 3) The code may have already been invalidated due to dependency change.
- // 4) Code generation may have failed.
- if (job->state() == CompilationJob::State::kReadyToFinalize) {
- if (shared->optimization_disabled()) {
- job->RetryOptimization(BailoutReason::kOptimizationDisabled);
- } else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
- job->RecordCompilationStats();
- job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
- isolate);
- InsertCodeIntoOptimizedCodeCache(compilation_info);
- if (FLAG_trace_opt) {
- PrintF("[completed optimizing ");
- compilation_info->closure()->ShortPrint();
- PrintF("]\n");
- }
- compilation_info->closure()->set_code(*compilation_info->code());
- return CompilationJob::SUCCEEDED;
- }
- }
-
- DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
- if (FLAG_trace_opt) {
- PrintF("[aborted optimizing ");
- compilation_info->closure()->ShortPrint();
- PrintF(" because: %s]\n",
- GetBailoutReason(compilation_info->bailout_reason()));
- }
- compilation_info->closure()->set_code(shared->GetCode());
- // Clear the InOptimizationQueue marker, if it exists.
- if (compilation_info->closure()->IsInOptimizationQueue()) {
- compilation_info->closure()->ClearOptimizationMarker();
- }
- return CompilationJob::FAILED;
-}
-
bool FailWithPendingException(Isolate* isolate, ParseInfo* parse_info,
Compiler::ClearExceptionFlag flag) {
if (flag == Compiler::CLEAR_EXCEPTION) {
@@ -909,7 +862,7 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
&inner_function_jobs);
}
-std::unique_ptr<UnoptimizedCompilationJob> CompileTopLevelOnBackgroundThread(
+std::unique_ptr<UnoptimizedCompilationJob> CompileOnBackgroundThread(
ParseInfo* parse_info, AccountingAllocator* allocator,
UnoptimizedCompilationJobList* inner_function_jobs) {
DisallowHeapAccess no_heap_access;
@@ -917,15 +870,11 @@ std::unique_ptr<UnoptimizedCompilationJob> CompileTopLevelOnBackgroundThread(
"V8.CompileCodeBackground");
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
- parse_info->is_eval() ? RuntimeCallCounterId::kCompileBackgroundEval
- : RuntimeCallCounterId::kCompileBackgroundScript);
-
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- parse_info->set_language_mode(
- stricter_language_mode(parse_info->language_mode(), language_mode));
-
- // Can't access scope info data off-main-thread.
- DCHECK(!parse_info->consumed_preparsed_scope_data()->HasData());
+ parse_info->is_toplevel()
+ ? parse_info->is_eval()
+ ? RuntimeCallCounterId::kCompileBackgroundEval
+ : RuntimeCallCounterId::kCompileBackgroundScript
+ : RuntimeCallCounterId::kCompileBackgroundFunction);
// Generate the unoptimized bytecode or asm-js data.
std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
@@ -933,89 +882,137 @@ std::unique_ptr<UnoptimizedCompilationJob> CompileTopLevelOnBackgroundThread(
return outer_function_job;
}
-class BackgroundCompileTask : public ScriptCompiler::ScriptStreamingTask {
- public:
- BackgroundCompileTask(ScriptStreamingData* source, Isolate* isolate);
-
- virtual void Run();
-
- private:
- ScriptStreamingData* source_; // Not owned.
- int stack_size_;
- AccountingAllocator* allocator_;
- TimedHistogram* timer_;
-
- DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTask);
-};
+} // namespace
-BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* source,
+BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
Isolate* isolate)
- : source_(source),
+ : info_(new ParseInfo(isolate)),
stack_size_(i::FLAG_stack_size),
+ worker_thread_runtime_call_stats_(
+ isolate->counters()->worker_thread_runtime_call_stats()),
+ allocator_(isolate->allocator()),
timer_(isolate->counters()->compile_script_on_background()) {
VMState<PARSER> state(isolate);
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
- ParseInfo* info = new ParseInfo(isolate);
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
- info->script_id()));
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
- } else {
- info->set_runtime_call_stats(nullptr);
- }
- info->set_toplevel();
- std::unique_ptr<Utf16CharacterStream> stream(
- ScannerStream::For(source->source_stream.get(), source->encoding,
- info->runtime_call_stats()));
- info->set_character_stream(std::move(stream));
- info->set_unicode_cache(&source_->unicode_cache);
- info->set_allow_lazy_parsing();
- if (V8_UNLIKELY(info->block_coverage_enabled())) {
- info->AllocateSourceRangeMap();
+ info_->script_id()));
+ info_->set_toplevel();
+ info_->set_unicode_cache(&unicode_cache_);
+ info_->set_allow_lazy_parsing();
+ if (V8_UNLIKELY(info_->block_coverage_enabled())) {
+ info_->AllocateSourceRangeMap();
}
LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- info->set_language_mode(
- stricter_language_mode(info->language_mode(), language_mode));
+ info_->set_language_mode(
+ stricter_language_mode(info_->language_mode(), language_mode));
- source->info.reset(info);
- allocator_ = isolate->allocator();
+ std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
+ streamed_data->source_stream.get(), streamed_data->encoding));
+ info_->set_character_stream(std::move(stream));
+}
- // Parser needs to stay alive for finalizing the parsing on the main
- // thread.
- source_->parser.reset(new Parser(source_->info.get()));
- source_->parser->DeserializeScopeChain(isolate, source_->info.get(),
- MaybeHandle<ScopeInfo>());
+BackgroundCompileTask::BackgroundCompileTask(
+ AccountingAllocator* allocator, const ParseInfo* outer_parse_info,
+ const AstRawString* function_name, const FunctionLiteral* function_literal,
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
+ TimedHistogram* timer, int max_stack_size)
+ : info_(ParseInfo::FromParent(outer_parse_info, allocator, function_literal,
+ function_name)),
+ stack_size_(max_stack_size),
+ worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
+ allocator_(allocator),
+ timer_(timer) {
+ DCHECK(outer_parse_info->is_toplevel());
+ DCHECK(!function_literal->is_toplevel());
+
+ info_->set_unicode_cache(&unicode_cache_);
+
+ // Clone the character stream so both can be accessed independently.
+ std::unique_ptr<Utf16CharacterStream> character_stream =
+ outer_parse_info->character_stream()->Clone();
+ character_stream->Seek(function_literal->start_position());
+ info_->set_character_stream(std::move(character_stream));
+
+ // Get preparsed scope data from the function literal.
+ if (function_literal->produced_preparsed_scope_data()) {
+ DCHECK(FLAG_preparser_scope_analysis);
+ ZonePreParsedScopeData* serialized_data =
+ function_literal->produced_preparsed_scope_data()->Serialize(
+ info_->zone());
+ info_->set_consumed_preparsed_scope_data(
+ ConsumedPreParsedScopeData::For(info_->zone(), serialized_data));
+ }
}
+namespace {
+
+// A scope object that ensures a parse info's runtime call stats, stack limit
+// and on_background_thread fields is set correctly during worker-thread
+// compile, and restores it after going out of scope.
+class OffThreadParseInfoScope {
+ public:
+ OffThreadParseInfoScope(
+ ParseInfo* parse_info,
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_stats, int stack_size)
+ : parse_info_(parse_info),
+ original_runtime_call_stats_(parse_info_->runtime_call_stats()),
+ original_stack_limit_(parse_info_->stack_limit()),
+ worker_thread_scope_(worker_thread_runtime_stats) {
+ parse_info_->set_on_background_thread(true);
+ parse_info_->set_runtime_call_stats(worker_thread_scope_.Get());
+ parse_info_->set_stack_limit(GetCurrentStackPosition() - stack_size * KB);
+ }
+
+ ~OffThreadParseInfoScope() {
+ parse_info_->set_stack_limit(original_stack_limit_);
+ parse_info_->set_runtime_call_stats(original_runtime_call_stats_);
+ parse_info_->set_on_background_thread(false);
+ }
+
+ private:
+ ParseInfo* parse_info_;
+ RuntimeCallStats* original_runtime_call_stats_;
+ uintptr_t original_stack_limit_;
+ WorkerThreadRuntimeCallStatsScope worker_thread_scope_;
+
+ DISALLOW_COPY_AND_ASSIGN(OffThreadParseInfoScope);
+};
+
+} // namespace
+
void BackgroundCompileTask::Run() {
- TimedHistogramScope timer(timer_);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
DisallowHeapAccess no_heap_access;
- source_->info->set_on_background_thread(true);
+ TimedHistogramScope timer(timer_);
+ OffThreadParseInfoScope off_thread_scope(
+ info_.get(), worker_thread_runtime_call_stats_, stack_size_);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "BackgroundCompileTask::Run");
+ RuntimeCallTimerScope runtimeTimer(
+ info_->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileBackgroundCompileTask);
+
+ // Update the character stream's runtime call stats.
+ info_->character_stream()->set_runtime_call_stats(
+ info_->runtime_call_stats());
- // Reset the stack limit of the parser to reflect correctly that we're on a
- // background thread.
- uintptr_t old_stack_limit = source_->info->stack_limit();
- uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
- source_->info->set_stack_limit(stack_limit);
- source_->parser->set_stack_limit(stack_limit);
+ // Parser needs to stay alive for finalizing the parsing on the main
+ // thread.
+ parser_.reset(new Parser(info_.get()));
+ parser_->InitializeEmptyScopeChain(info_.get());
- source_->parser->ParseOnBackground(source_->info.get());
- if (source_->info->literal() != nullptr) {
+ parser_->ParseOnBackground(info_.get());
+ if (info_->literal() != nullptr) {
// Parsing has succeeded, compile.
- source_->outer_function_job = CompileTopLevelOnBackgroundThread(
- source_->info.get(), allocator_, &source_->inner_function_jobs);
+ outer_function_job_ = CompileOnBackgroundThread(info_.get(), allocator_,
+ &inner_function_jobs_);
}
-
- source_->info->EmitBackgroundParseStatisticsOnBackgroundThread();
-
- source_->info->set_on_background_thread(false);
- source_->info->set_stack_limit(old_stack_limit);
}
-} // namespace
// ----------------------------------------------------------------------------
// Implementation of Compiler
@@ -1074,10 +1071,12 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
if (FLAG_preparser_scope_analysis) {
if (shared_info->HasUncompiledDataWithPreParsedScope()) {
- parse_info.consumed_preparsed_scope_data()->SetData(
- isolate, handle(shared_info->uncompiled_data_with_pre_parsed_scope()
- ->pre_parsed_scope_data(),
- isolate));
+ parse_info.set_consumed_preparsed_scope_data(
+ ConsumedPreParsedScopeData::For(
+ isolate,
+ handle(shared_info->uncompiled_data_with_pre_parsed_scope()
+ ->pre_parsed_scope_data(),
+ isolate)));
}
}
@@ -1150,6 +1149,43 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
return true;
}
+bool Compiler::FinalizeBackgroundCompileTask(
+ BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate, ClearExceptionFlag flag) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeBackgroundCompileTask");
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
+ HandleScope scope(isolate);
+ ParseInfo* parse_info = task->info();
+ DCHECK(!parse_info->is_toplevel());
+ DCHECK(!shared_info->is_compiled());
+
+ Handle<Script> script(Script::cast(shared_info->script()), isolate);
+ parse_info->set_script(script);
+
+ task->parser()->UpdateStatistics(isolate, script);
+ task->parser()->HandleSourceURLComments(isolate, script);
+
+ if (parse_info->literal() == nullptr || !task->outer_function_job()) {
+ // Parsing or compile failed on background thread - report error messages.
+ return FailWithPendingException(isolate, parse_info, flag);
+ }
+
+ // Parsing has succeeded - finalize compilation.
+ parse_info->ast_value_factory()->Internalize(isolate);
+ if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
+ task->outer_function_job(),
+ task->inner_function_jobs())) {
+ // Finalization failed - throw an exception.
+ return FailWithPendingException(isolate, parse_info, flag);
+ }
+
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(shared_info->is_compiled());
+ return true;
+}
+
bool Compiler::CompileOptimized(Handle<JSFunction> function,
ConcurrencyMode mode) {
if (function->IsOptimized()) return true;
@@ -1192,9 +1228,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int parameters_end_pos,
- int eval_scope_position, int eval_position, int line_offset,
- int column_offset, Handle<Object> script_name,
- ScriptOriginOptions options) {
+ int eval_scope_position, int eval_position) {
Isolate* isolate = context->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
@@ -1209,8 +1243,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// is unused (just 0), which means it's an available field to use to indicate
// this separation. But to make sure we're not causing other false hits, we
// negate the scope position.
- if (FLAG_harmony_function_tostring &&
- restriction == ONLY_SINGLE_FUNCTION_LITERAL &&
+ if (restriction == ONLY_SINGLE_FUNCTION_LITERAL &&
parameters_end_pos != kNoSourcePosition) {
// use the parameters_end_pos as the eval_scope_position in the eval cache.
DCHECK_EQ(eval_scope_position, 0);
@@ -1233,14 +1266,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
allow_eval_cache = true;
} else {
ParseInfo parse_info(isolate);
- script = parse_info.CreateScript(isolate, source, options);
- if (!script_name.is_null()) {
- // TODO(cbruni): check whether we can store this data in options
- script->set_name(*script_name);
- script->set_line_offset(line_offset);
- script->set_column_offset(column_offset);
- LOG(isolate, ScriptDetails(*script));
- }
+ script = parse_info.CreateScript(
+ isolate, source, OriginOptionsForEval(outer_info->script()));
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
script->set_eval_from_shared(*outer_info);
if (eval_position == kNoSourcePosition) {
@@ -1252,6 +1279,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
script->set_eval_from_shared(
summary.AsJavaScript().function()->shared());
+ script->set_origin_options(OriginOptionsForEval(*summary.script()));
eval_position = -summary.code_offset();
} else {
eval_position = 0;
@@ -1754,11 +1782,6 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
NOT_TENURED);
}
-ScriptCompiler::ScriptStreamingTask* Compiler::NewBackgroundCompileTask(
- ScriptStreamingData* source, Isolate* isolate) {
- return new BackgroundCompileTask(source, isolate);
-}
-
MaybeHandle<SharedFunctionInfo>
Compiler::GetSharedFunctionInfoForStreamedScript(
Isolate* isolate, Handle<String> source,
@@ -1772,9 +1795,9 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- ParseInfo* parse_info = streaming_data->info.get();
- parse_info->UpdateBackgroundParseStatisticsOnMainThread(isolate);
-
+ BackgroundCompileTask* task = streaming_data->task.get();
+ ParseInfo* parse_info = task->info();
+ DCHECK(parse_info->is_toplevel());
// Check if compile cache already holds the SFI, if so no need to finalize
// the code compiled on the background thread.
CompilationCache* compilation_cache = isolate->compilation_cache();
@@ -1793,21 +1816,20 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
Handle<Script> script =
NewScript(isolate, parse_info, source, script_details, origin_options,
NOT_NATIVES_CODE);
- streaming_data->parser->UpdateStatistics(isolate, script);
- streaming_data->parser->HandleSourceURLComments(isolate, script);
+ task->parser()->UpdateStatistics(isolate, script);
+ task->parser()->HandleSourceURLComments(isolate, script);
- if (parse_info->literal() == nullptr) {
+ if (parse_info->literal() == nullptr || !task->outer_function_job()) {
// Parsing has failed - report error messages.
- parse_info->pending_error_handler()->ReportErrors(
- isolate, script, parse_info->ast_value_factory());
+ FailWithPendingException(isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
} else {
// Parsing has succeeded - finalize compilation.
- if (streaming_data->outer_function_job) {
- maybe_result = FinalizeTopLevel(
- parse_info, isolate, streaming_data->outer_function_job.get(),
- &streaming_data->inner_function_jobs);
- } else {
- // Compilation failed on background thread - throw an exception.
+ maybe_result =
+ FinalizeTopLevel(parse_info, isolate, task->outer_function_job(),
+ task->inner_function_jobs());
+ if (maybe_result.is_null()) {
+ // Finalization failed - throw an exception.
FailWithPendingException(isolate, parse_info,
Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
}
@@ -1856,23 +1878,62 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
osr_frame);
}
-bool Compiler::FinalizeCompilationJob(OptimizedCompilationJob* raw_job,
- Isolate* isolate) {
+bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
+ Isolate* isolate) {
VMState<COMPILER> state(isolate);
// Take ownership of compilation job. Deleting job also tears down the zone.
- std::unique_ptr<OptimizedCompilationJob> job(raw_job);
- return FinalizeOptimizedCompilationJob(job.get(), isolate) ==
- CompilationJob::SUCCEEDED;
-}
+ std::unique_ptr<OptimizedCompilationJob> job_scope(job);
+ OptimizedCompilationInfo* compilation_info = job->compilation_info();
-bool Compiler::FinalizeCompilationJob(UnoptimizedCompilationJob* raw_job,
- Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate) {
- VMState<BYTECODE_COMPILER> state(isolate);
- // Take ownership of compilation job. Deleting job also tears down the zone.
- std::unique_ptr<UnoptimizedCompilationJob> job(raw_job);
- return FinalizeUnoptimizedCompilationJob(job.get(), shared_info, isolate) ==
- CompilationJob::SUCCEEDED;
+ TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kRecompileSynchronous);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.RecompileSynchronous");
+
+ Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
+
+ // Reset profiler ticks, function is no longer considered hot.
+ compilation_info->closure()->feedback_vector()->set_profiler_ticks(0);
+
+ DCHECK(!shared->HasBreakInfo());
+
+ // 1) Optimization on the concurrent thread may have failed.
+ // 2) The function may have already been optimized by OSR. Simply continue.
+ // Except when OSR already disabled optimization for some reason.
+ // 3) The code may have already been invalidated due to dependency change.
+ // 4) Code generation may have failed.
+ if (job->state() == CompilationJob::State::kReadyToFinalize) {
+ if (shared->optimization_disabled()) {
+ job->RetryOptimization(BailoutReason::kOptimizationDisabled);
+ } else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
+ job->RecordCompilationStats();
+ job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
+ isolate);
+ InsertCodeIntoOptimizedCodeCache(compilation_info);
+ if (FLAG_trace_opt) {
+ PrintF("[completed optimizing ");
+ compilation_info->closure()->ShortPrint();
+ PrintF("]\n");
+ }
+ compilation_info->closure()->set_code(*compilation_info->code());
+ return CompilationJob::SUCCEEDED;
+ }
+ }
+
+ DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
+ if (FLAG_trace_opt) {
+ PrintF("[aborted optimizing ");
+ compilation_info->closure()->ShortPrint();
+ PrintF(" because: %s]\n",
+ GetBailoutReason(compilation_info->bailout_reason()));
+ }
+ compilation_info->closure()->set_code(shared->GetCode());
+ // Clear the InOptimizationQueue marker, if it exists.
+ if (compilation_info->closure()->IsInOptimizationQueue()) {
+ compilation_info->closure()->ClearOptimizationMarker();
+ }
+ return CompilationJob::FAILED;
}
void Compiler::PostInstantiation(Handle<JSFunction> function,
@@ -1920,14 +1981,9 @@ ScriptStreamingData::ScriptStreamingData(
ScriptCompiler::StreamedSource::Encoding encoding)
: source_stream(source_stream), encoding(encoding) {}
-ScriptStreamingData::~ScriptStreamingData() {}
+ScriptStreamingData::~ScriptStreamingData() = default;
-void ScriptStreamingData::Release() {
- parser.reset();
- info.reset();
- outer_function_job.reset();
- inner_function_jobs.clear();
-}
+void ScriptStreamingData::Release() { task.reset(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index b3a5b73997..f32d771266 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -20,6 +20,8 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class AstRawString;
+class BackgroundCompileTask;
class JavaScriptFrame;
class OptimizedCompilationInfo;
class OptimizedCompilationJob;
@@ -27,8 +29,10 @@ class ParseInfo;
class Parser;
class ScriptData;
struct ScriptStreamingData;
+class TimedHistogram;
class UnoptimizedCompilationInfo;
class UnoptimizedCompilationJob;
+class WorkerThreadRuntimeCallStats;
typedef std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>
UnoptimizedCompilationJobList;
@@ -61,19 +65,14 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Isolate* isolate);
- // Creates a new task that when run will parse and compile the streamed
- // script associated with |streaming_data| and can be finalized with
- // Compiler::GetSharedFunctionInfoForStreamedScript.
- // Note: does not take ownership of streaming_data.
- static ScriptCompiler::ScriptStreamingTask* NewBackgroundCompileTask(
- ScriptStreamingData* streaming_data, Isolate* isolate);
+ // Finalize and install code from previously run background compile task.
+ static bool FinalizeBackgroundCompileTask(
+ BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate, ClearExceptionFlag flag);
- // Generate and install code from previously queued compilation job.
- static bool FinalizeCompilationJob(UnoptimizedCompilationJob* job,
- Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate);
- static bool FinalizeCompilationJob(OptimizedCompilationJob* job,
- Isolate* isolate);
+ // Finalize and install optimized code from previously run job.
+ static bool FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
+ Isolate* isolate);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
@@ -101,9 +100,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int parameters_end_pos,
- int eval_scope_position, int eval_position, int line_offset = 0,
- int column_offset = 0, Handle<Object> script_name = Handle<Object>(),
- ScriptOriginOptions options = ScriptOriginOptions());
+ int eval_scope_position, int eval_position);
struct ScriptDetails {
ScriptDetails() : line_offset(0), column_offset(0) {}
@@ -192,7 +189,7 @@ class V8_EXPORT_PRIVATE CompilationJob {
CompilationJob(uintptr_t stack_limit, State initial_state)
: state_(initial_state), stack_limit_(stack_limit) {}
- virtual ~CompilationJob() {}
+ virtual ~CompilationJob() = default;
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
uintptr_t stack_limit() const { return stack_limit_; }
@@ -319,6 +316,57 @@ class OptimizedCompilationJob : public CompilationJob {
const char* compiler_name_;
};
+class BackgroundCompileTask {
+ public:
+ // Creates a new task that when run will parse and compile the streamed
+ // script associated with |data| and can be finalized with
+ // Compiler::GetSharedFunctionInfoForStreamedScript.
+ // Note: does not take ownership of |data|.
+ BackgroundCompileTask(ScriptStreamingData* data, Isolate* isolate);
+
+ // Creates a new task that when run will parse and compile the
+ // |function_literal| and can be finalized with
+ // Compiler::FinalizeBackgroundCompileTask.
+ BackgroundCompileTask(
+ AccountingAllocator* allocator, const ParseInfo* outer_parse_info,
+ const AstRawString* function_name,
+ const FunctionLiteral* function_literal,
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
+ TimedHistogram* timer, int max_stack_size);
+
+ void Run();
+
+ ParseInfo* info() { return info_.get(); }
+ Parser* parser() { return parser_.get(); }
+ UnoptimizedCompilationJob* outer_function_job() {
+ return outer_function_job_.get();
+ }
+ UnoptimizedCompilationJobList* inner_function_jobs() {
+ return &inner_function_jobs_;
+ }
+
+ private:
+ // Data needed for parsing, and data needed to to be passed between thread
+ // between parsing and compilation. These need to be initialized before the
+ // compilation starts.
+ std::unique_ptr<ParseInfo> info_;
+ std::unique_ptr<Parser> parser_;
+ // TODO(rmcilroy): Consider having thread-local unicode-caches rather than
+ // creating a new one each time.
+ UnicodeCache unicode_cache_;
+
+ // Data needed for finalizing compilation after background compilation.
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job_;
+ UnoptimizedCompilationJobList inner_function_jobs_;
+
+ int stack_size_;
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
+ AccountingAllocator* allocator_;
+ TimedHistogram* timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTask);
+};
+
// Contains all data which needs to be transmitted between threads for
// background parsing and compiling and finalizing it on the main thread.
struct ScriptStreamingData {
@@ -331,18 +379,9 @@ struct ScriptStreamingData {
// Internal implementation of v8::ScriptCompiler::StreamedSource.
std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
ScriptCompiler::StreamedSource::Encoding encoding;
- std::unique_ptr<ScriptCompiler::CachedData> cached_data;
- // Data needed for parsing, and data needed to to be passed between thread
- // between parsing and compilation. These need to be initialized before the
- // compilation starts.
- UnicodeCache unicode_cache;
- std::unique_ptr<ParseInfo> info;
- std::unique_ptr<Parser> parser;
-
- // Data needed for finalizing compilation after background compilation.
- std::unique_ptr<UnoptimizedCompilationJob> outer_function_job;
- UnoptimizedCompilationJobList inner_function_jobs;
+ // Task that performs background parsing and compilation.
+ std::unique_ptr<BackgroundCompileTask> task;
DISALLOW_COPY_AND_ASSIGN(ScriptStreamingData);
};
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 0342a9c950..a0648d0257 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -67,20 +67,20 @@ FieldAccess AccessBuilder::ForBigIntBitfield() {
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kPointerWriteBarrier, LoadSensitivity::kCritical};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier, LoadSensitivity::kCritical};
return access;
}
@@ -357,9 +357,9 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteLength() {
JSArrayBufferView::kByteLengthOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kPositiveInteger,
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ TypeCache::Get().kJSArrayBufferViewByteLengthType,
+ MachineType::UintPtr(),
+ kNoWriteBarrier};
return access;
}
@@ -369,9 +369,9 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteOffset() {
JSArrayBufferView::kByteOffsetOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kPositiveInteger,
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ TypeCache::Get().kJSArrayBufferViewByteOffsetType,
+ MachineType::UintPtr(),
+ kNoWriteBarrier};
return access;
}
@@ -494,7 +494,7 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
kTaggedBase, FixedTypedArrayBase::kBasePointerOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ kPointerWriteBarrier, LoadSensitivity::kCritical};
return access;
}
@@ -506,7 +506,8 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
MaybeHandle<Map>(),
Type::ExternalPointer(),
MachineType::Pointer(),
- kNoWriteBarrier};
+ kNoWriteBarrier,
+ LoadSensitivity::kCritical};
return access;
}
@@ -611,7 +612,7 @@ FieldAccess AccessBuilder::ForStringLength() {
Handle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get().kStringLengthType,
- MachineType::TaggedSigned(),
+ MachineType::Uint32(),
kNoWriteBarrier};
return access;
}
@@ -801,10 +802,11 @@ FieldAccess AccessBuilder::ForValue() {
// static
FieldAccess AccessBuilder::ForArgumentsLength() {
- FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArgumentsObjectWithLength::kLengthOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -834,10 +836,10 @@ FieldAccess AccessBuilder::ForFixedArraySlot(
// static
FieldAccess AccessBuilder::ForCellValue() {
- FieldAccess access = {kTaggedBase, Cell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, Cell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier, LoadSensitivity::kCritical};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 945edf3014..889a139a38 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -9,6 +9,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/elements-kind.h"
#include "src/globals.h"
+#include "src/objects/js-objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 0b7d1a18a1..5bf515f654 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -65,7 +65,7 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
UNREACHABLE();
}
-ElementAccessInfo::ElementAccessInfo() {}
+ElementAccessInfo::ElementAccessInfo() = default;
ElementAccessInfo::ElementAccessInfo(MapHandles const& receiver_maps,
ElementsKind elements_kind)
@@ -74,7 +74,7 @@ ElementAccessInfo::ElementAccessInfo(MapHandles const& receiver_maps,
// static
PropertyAccessInfo PropertyAccessInfo::NotFound(MapHandles const& receiver_maps,
MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(holder, receiver_maps);
+ return PropertyAccessInfo(kNotFound, holder, receiver_maps);
}
// static
@@ -111,14 +111,21 @@ PropertyAccessInfo PropertyAccessInfo::ModuleExport(
receiver_maps);
}
+// static
+PropertyAccessInfo PropertyAccessInfo::StringLength(
+ MapHandles const& receiver_maps) {
+ return PropertyAccessInfo(kStringLength, MaybeHandle<JSObject>(),
+ receiver_maps);
+}
+
PropertyAccessInfo::PropertyAccessInfo()
: kind_(kInvalid),
field_representation_(MachineRepresentation::kNone),
field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MapHandles const& receiver_maps)
- : kind_(kNotFound),
+ : kind_(kind),
receiver_maps_(receiver_maps),
holder_(holder),
field_representation_(MachineRepresentation::kNone),
@@ -218,7 +225,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
return false;
}
- case kNotFound: {
+ case kNotFound:
+ case kStringLength: {
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
@@ -396,11 +404,12 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
} else if (descriptors_field_type->IsClass()) {
- dependencies()->DependOnFieldType(MapRef(js_heap_broker(), map),
- number);
+ MapRef map_ref(js_heap_broker(), map);
+ map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
+ dependencies()->DependOnFieldType(map_ref, number);
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
- field_type = Type::For(js_heap_broker(), map);
+ field_type = Type::For(MapRef(js_heap_broker(), map));
field_map = MaybeHandle<Map>(map);
}
}
@@ -620,18 +629,20 @@ bool AccessInfoFactory::ConsolidateElementLoad(MapHandles const& maps,
bool AccessInfoFactory::LookupSpecialFieldAccessor(
Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
+ // Check for String::length field accessor.
+ if (map->IsStringMap()) {
+ if (Name::Equals(isolate(), name, factory()->length_string())) {
+ *access_info = PropertyAccessInfo::StringLength(MapHandles{map});
+ return true;
+ }
+ return false;
+ }
// Check for special JSObject field accessors.
FieldIndex field_index;
if (Accessors::IsJSObjectFieldAccessor(isolate(), map, name, &field_index)) {
Type field_type = Type::NonInternal();
MachineRepresentation field_representation = MachineRepresentation::kTagged;
- if (map->IsStringMap()) {
- DCHECK(Name::Equals(isolate(), factory()->length_string(), name));
- // The String::length property is always a smi in the range
- // [0, String::kMaxLength].
- field_type = type_cache_.kStringLengthType;
- field_representation = MachineRepresentation::kTaggedSigned;
- } else if (map->IsJSArrayMap()) {
+ if (map->IsJSArrayMap()) {
DCHECK(Name::Equals(isolate(), factory()->length_string(), name));
// The JSArray::length property is a smi in the range
// [0, FixedDoubleArray::kMaxLength] in case of fast double
@@ -698,11 +709,13 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// Store is not safe if the field type was cleared.
return false;
} else if (descriptors_field_type->IsClass()) {
- dependencies()->DependOnFieldType(
- MapRef(js_heap_broker(), transition_map), number);
+ MapRef transition_map_ref(js_heap_broker(), transition_map);
+ transition_map_ref
+ .SerializeOwnDescriptors(); // TODO(neis): Remove later.
+ dependencies()->DependOnFieldType(transition_map_ref, number);
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
- field_type = Type::For(js_heap_broker(), map);
+ field_type = Type::For(MapRef(js_heap_broker(), map));
field_map = MaybeHandle<Map>(map);
}
}
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index e9890bbb7a..9d6828ee69 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -65,7 +65,8 @@ class PropertyAccessInfo final {
kDataField,
kDataConstantField,
kAccessorConstant,
- kModuleExport
+ kModuleExport,
+ kStringLength
};
static PropertyAccessInfo NotFound(MapHandles const& receiver_maps,
@@ -84,6 +85,7 @@ class PropertyAccessInfo final {
MaybeHandle<JSObject> holder);
static PropertyAccessInfo ModuleExport(MapHandles const& receiver_maps,
Handle<Cell> cell);
+ static PropertyAccessInfo StringLength(MapHandles const& receiver_maps);
PropertyAccessInfo();
@@ -98,6 +100,7 @@ class PropertyAccessInfo final {
bool IsDataConstantField() const { return kind() == kDataConstantField; }
bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
bool IsModuleExport() const { return kind() == kModuleExport; }
+ bool IsStringLength() const { return kind() == kStringLength; }
bool HasTransitionMap() const { return !transition_map().is_null(); }
@@ -115,7 +118,7 @@ class PropertyAccessInfo final {
Handle<Cell> export_cell() const;
private:
- PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MapHandles const& receiver_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant, MapHandles const& receiver_maps);
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index 4aabac1c11..6943e3ae78 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -50,7 +50,7 @@ class AllocationBuilder final {
// Compound allocation of a context.
void AllocateContext(int length, Handle<Map> map) {
- DCHECK(map->instance_type() >= BLOCK_CONTEXT_TYPE &&
+ DCHECK(map->instance_type() >= AWAIT_CONTEXT_TYPE &&
map->instance_type() <= WITH_CONTEXT_TYPE);
int size = FixedArray::SizeFor(length);
Allocate(size, NOT_TENURED, Type::OtherInternal());
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 8e1c1ab8f4..718272b2cc 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -125,6 +125,9 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kInt64:
case Constant::kHeapObject:
// TODO(dcarney): loading RPO constants on arm.
@@ -416,48 +419,39 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2) \
- do { \
- Label binop; \
- __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
- __ dmb(ISH); \
- __ bind(&binop); \
- __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
- __ instr1(i.TempRegister(1), i.OutputRegister(0), i.InputRegister(0), \
- SBit::SetCC); \
- __ instr2(i.TempRegister(2), i.OutputRegister(1), \
- Operand(i.InputRegister(1))); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
- i.TempRegister(0)); \
- __ teq(i.TempRegister(3), Operand(0)); \
- __ b(ne, &binop); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ ldrexd(r2, r3, i.TempRegister(0)); \
+ __ instr1(i.TempRegister(1), r2, i.InputRegister(0), SBit::SetCC); \
+ __ instr2(i.TempRegister(2), r3, Operand(i.InputRegister(1))); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
+ i.TempRegister(0)); \
+ __ teq(i.TempRegister(3), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr) \
- do { \
- Label binop; \
- __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
- __ dmb(ISH); \
- __ bind(&binop); \
- __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
- __ instr(i.TempRegister(1), i.OutputRegister(0), \
- Operand(i.InputRegister(0))); \
- __ instr(i.TempRegister(2), i.OutputRegister(1), \
- Operand(i.InputRegister(1))); \
- __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
- i.TempRegister(0)); \
- __ teq(i.TempRegister(3), Operand(0)); \
- __ b(ne, &binop); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ ldrexd(r2, r3, i.TempRegister(0)); \
+ __ instr(i.TempRegister(1), r2, Operand(i.InputRegister(0))); \
+ __ instr(i.TempRegister(2), r3, Operand(i.InputRegister(1))); \
+ __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
+ i.TempRegister(0)); \
+ __ teq(i.TempRegister(3), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
} while (0)
-#define ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op) \
- if (arch_opcode == kArmWord64AtomicNarrow##op) { \
- __ mov(i.OutputRegister(1), Operand(0)); \
- }
-
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -607,6 +601,19 @@ void AdjustStackPointerForTailCall(
}
}
+#if DEBUG
+bool VerifyOutputOfAtomicPairInstr(ArmOperandConverter* converter,
+ const Instruction* instr, Register low,
+ Register high) {
+ if (instr->OutputCount() > 0) {
+ if (converter->OutputRegister(0) != low) return false;
+ if (instr->OutputCount() == 2 && converter->OutputRegister(1) != high)
+ return false;
+ }
+ return true;
+}
+#endif
+
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -2684,23 +2691,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicExchangeUint8:
- case kArmWord64AtomicNarrowExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint8);
break;
case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicExchangeUint16:
- case kArmWord64AtomicNarrowExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint16);
break;
case kWord32AtomicExchangeWord32:
- case kArmWord64AtomicNarrowExchangeUint32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint32);
break;
case kWord32AtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
@@ -2710,12 +2711,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicCompareExchangeUint8:
- case kArmWord64AtomicNarrowCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint8);
break;
case kWord32AtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
@@ -2725,19 +2724,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicCompareExchangeUint16:
- case kArmWord64AtomicNarrowCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint16);
break;
case kWord32AtomicCompareExchangeWord32:
- case kArmWord64AtomicNarrowCompareExchangeUint32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint32);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: \
@@ -2745,23 +2740,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint8: \
- case kArmWord64AtomicNarrow##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint8); \
break; \
case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint16: \
- case kArmWord64AtomicNarrow##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint16); \
break; \
case kWord32Atomic##op##Word32: \
- case kArmWord64AtomicNarrow##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
- ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint32); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -2769,11 +2758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orr)
ATOMIC_BINOP_CASE(Xor, eor)
#undef ATOMIC_BINOP_CASE
- case kArmWord32AtomicPairLoad:
+ case kArmWord32AtomicPairLoad: {
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr, r0, r1));
__ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
- __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ ldrexd(r0, r1, i.TempRegister(0));
__ dmb(ISH);
break;
+ }
case kArmWord32AtomicPairStore: {
Label store;
__ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
@@ -2787,28 +2778,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dmb(ISH);
break;
}
-#define ATOMIC_ARITH_BINOP_CASE(op, instr1, instr2) \
- case kArmWord32AtomicPair##op: { \
- ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2); \
- break; \
+#define ATOMIC_ARITH_BINOP_CASE(op, instr1, instr2) \
+ case kArmWord32AtomicPair##op: { \
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr, r2, r3)); \
+ ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2); \
+ break; \
}
ATOMIC_ARITH_BINOP_CASE(Add, add, adc)
ATOMIC_ARITH_BINOP_CASE(Sub, sub, sbc)
#undef ATOMIC_ARITH_BINOP_CASE
-#define ATOMIC_LOGIC_BINOP_CASE(op, instr) \
- case kArmWord32AtomicPair##op: { \
- ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr); \
- break; \
+#define ATOMIC_LOGIC_BINOP_CASE(op, instr1) \
+ case kArmWord32AtomicPair##op: { \
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr, r2, r3)); \
+ ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr1); \
+ break; \
}
ATOMIC_LOGIC_BINOP_CASE(And, and_)
ATOMIC_LOGIC_BINOP_CASE(Or, orr)
ATOMIC_LOGIC_BINOP_CASE(Xor, eor)
+#undef ATOMIC_LOGIC_BINOP_CASE
case kArmWord32AtomicPairExchange: {
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr, r6, r7));
Label exchange;
__ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3));
__ dmb(ISH);
__ bind(&exchange);
- __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ ldrexd(r6, r7, i.TempRegister(0));
__ strexd(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1),
i.TempRegister(0));
__ teq(i.TempRegister(1), Operand(0));
@@ -2817,15 +2812,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmWord32AtomicPairCompareExchange: {
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr, r2, r3));
__ add(i.TempRegister(0), i.InputRegister(4), i.InputRegister(5));
Label compareExchange;
Label exit;
__ dmb(ISH);
__ bind(&compareExchange);
- __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
- __ teq(i.InputRegister(0), Operand(i.OutputRegister(0)));
+ __ ldrexd(r2, r3, i.TempRegister(0));
+ __ teq(i.InputRegister(0), Operand(r2));
__ b(ne, &exit);
- __ teq(i.InputRegister(1), Operand(i.OutputRegister(1)));
+ __ teq(i.InputRegister(1), Operand(r3));
__ b(ne, &exit);
__ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
i.TempRegister(0));
@@ -2835,8 +2831,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dmb(ISH);
break;
}
-#undef ATOMIC_LOGIC_BINOP_CASE
-#undef ATOMIC_NARROW_OP_CLEAR_HIGH_WORD
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
@@ -3192,7 +3186,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (src.type() == Constant::kHeapObject) {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index ca8684a375..751530e206 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -11,295 +11,274 @@ namespace compiler {
// ARM-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(ArmAdd) \
- V(ArmAnd) \
- V(ArmBic) \
- V(ArmClz) \
- V(ArmCmp) \
- V(ArmCmn) \
- V(ArmTst) \
- V(ArmTeq) \
- V(ArmOrr) \
- V(ArmEor) \
- V(ArmSub) \
- V(ArmRsb) \
- V(ArmMul) \
- V(ArmMla) \
- V(ArmMls) \
- V(ArmSmull) \
- V(ArmSmmul) \
- V(ArmSmmla) \
- V(ArmUmull) \
- V(ArmSdiv) \
- V(ArmUdiv) \
- V(ArmMov) \
- V(ArmMvn) \
- V(ArmBfc) \
- V(ArmUbfx) \
- V(ArmSbfx) \
- V(ArmSxtb) \
- V(ArmSxth) \
- V(ArmSxtab) \
- V(ArmSxtah) \
- V(ArmUxtb) \
- V(ArmUxth) \
- V(ArmUxtab) \
- V(ArmRbit) \
- V(ArmRev) \
- V(ArmUxtah) \
- V(ArmAddPair) \
- V(ArmSubPair) \
- V(ArmMulPair) \
- V(ArmLslPair) \
- V(ArmLsrPair) \
- V(ArmAsrPair) \
- V(ArmVcmpF32) \
- V(ArmVaddF32) \
- V(ArmVsubF32) \
- V(ArmVmulF32) \
- V(ArmVmlaF32) \
- V(ArmVmlsF32) \
- V(ArmVdivF32) \
- V(ArmVabsF32) \
- V(ArmVnegF32) \
- V(ArmVsqrtF32) \
- V(ArmVcmpF64) \
- V(ArmVaddF64) \
- V(ArmVsubF64) \
- V(ArmVmulF64) \
- V(ArmVmlaF64) \
- V(ArmVmlsF64) \
- V(ArmVdivF64) \
- V(ArmVmodF64) \
- V(ArmVabsF64) \
- V(ArmVnegF64) \
- V(ArmVsqrtF64) \
- V(ArmVrintmF32) \
- V(ArmVrintmF64) \
- V(ArmVrintpF32) \
- V(ArmVrintpF64) \
- V(ArmVrintzF32) \
- V(ArmVrintzF64) \
- V(ArmVrintaF64) \
- V(ArmVrintnF32) \
- V(ArmVrintnF64) \
- V(ArmVcvtF32F64) \
- V(ArmVcvtF64F32) \
- V(ArmVcvtF32S32) \
- V(ArmVcvtF32U32) \
- V(ArmVcvtF64S32) \
- V(ArmVcvtF64U32) \
- V(ArmVcvtS32F32) \
- V(ArmVcvtU32F32) \
- V(ArmVcvtS32F64) \
- V(ArmVcvtU32F64) \
- V(ArmVmovU32F32) \
- V(ArmVmovF32U32) \
- V(ArmVmovLowU32F64) \
- V(ArmVmovLowF64U32) \
- V(ArmVmovHighU32F64) \
- V(ArmVmovHighF64U32) \
- V(ArmVmovF64U32U32) \
- V(ArmVmovU32U32F64) \
- V(ArmVldrF32) \
- V(ArmVstrF32) \
- V(ArmVldrF64) \
- V(ArmVld1F64) \
- V(ArmVstrF64) \
- V(ArmVst1F64) \
- V(ArmVld1S128) \
- V(ArmVst1S128) \
- V(ArmFloat32Max) \
- V(ArmFloat64Max) \
- V(ArmFloat32Min) \
- V(ArmFloat64Min) \
- V(ArmFloat64SilenceNaN) \
- V(ArmLdrb) \
- V(ArmLdrsb) \
- V(ArmStrb) \
- V(ArmLdrh) \
- V(ArmLdrsh) \
- V(ArmStrh) \
- V(ArmLdr) \
- V(ArmStr) \
- V(ArmPush) \
- V(ArmPoke) \
- V(ArmPeek) \
- V(ArmDsbIsb) \
- V(ArmF32x4Splat) \
- V(ArmF32x4ExtractLane) \
- V(ArmF32x4ReplaceLane) \
- V(ArmF32x4SConvertI32x4) \
- V(ArmF32x4UConvertI32x4) \
- V(ArmF32x4Abs) \
- V(ArmF32x4Neg) \
- V(ArmF32x4RecipApprox) \
- V(ArmF32x4RecipSqrtApprox) \
- V(ArmF32x4Add) \
- V(ArmF32x4AddHoriz) \
- V(ArmF32x4Sub) \
- V(ArmF32x4Mul) \
- V(ArmF32x4Min) \
- V(ArmF32x4Max) \
- V(ArmF32x4Eq) \
- V(ArmF32x4Ne) \
- V(ArmF32x4Lt) \
- V(ArmF32x4Le) \
- V(ArmI32x4Splat) \
- V(ArmI32x4ExtractLane) \
- V(ArmI32x4ReplaceLane) \
- V(ArmI32x4SConvertF32x4) \
- V(ArmI32x4SConvertI16x8Low) \
- V(ArmI32x4SConvertI16x8High) \
- V(ArmI32x4Neg) \
- V(ArmI32x4Shl) \
- V(ArmI32x4ShrS) \
- V(ArmI32x4Add) \
- V(ArmI32x4AddHoriz) \
- V(ArmI32x4Sub) \
- V(ArmI32x4Mul) \
- V(ArmI32x4MinS) \
- V(ArmI32x4MaxS) \
- V(ArmI32x4Eq) \
- V(ArmI32x4Ne) \
- V(ArmI32x4GtS) \
- V(ArmI32x4GeS) \
- V(ArmI32x4UConvertF32x4) \
- V(ArmI32x4UConvertI16x8Low) \
- V(ArmI32x4UConvertI16x8High) \
- V(ArmI32x4ShrU) \
- V(ArmI32x4MinU) \
- V(ArmI32x4MaxU) \
- V(ArmI32x4GtU) \
- V(ArmI32x4GeU) \
- V(ArmI16x8Splat) \
- V(ArmI16x8ExtractLane) \
- V(ArmI16x8ReplaceLane) \
- V(ArmI16x8SConvertI8x16Low) \
- V(ArmI16x8SConvertI8x16High) \
- V(ArmI16x8Neg) \
- V(ArmI16x8Shl) \
- V(ArmI16x8ShrS) \
- V(ArmI16x8SConvertI32x4) \
- V(ArmI16x8Add) \
- V(ArmI16x8AddSaturateS) \
- V(ArmI16x8AddHoriz) \
- V(ArmI16x8Sub) \
- V(ArmI16x8SubSaturateS) \
- V(ArmI16x8Mul) \
- V(ArmI16x8MinS) \
- V(ArmI16x8MaxS) \
- V(ArmI16x8Eq) \
- V(ArmI16x8Ne) \
- V(ArmI16x8GtS) \
- V(ArmI16x8GeS) \
- V(ArmI16x8UConvertI8x16Low) \
- V(ArmI16x8UConvertI8x16High) \
- V(ArmI16x8ShrU) \
- V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSaturateU) \
- V(ArmI16x8SubSaturateU) \
- V(ArmI16x8MinU) \
- V(ArmI16x8MaxU) \
- V(ArmI16x8GtU) \
- V(ArmI16x8GeU) \
- V(ArmI8x16Splat) \
- V(ArmI8x16ExtractLane) \
- V(ArmI8x16ReplaceLane) \
- V(ArmI8x16Neg) \
- V(ArmI8x16Shl) \
- V(ArmI8x16ShrS) \
- V(ArmI8x16SConvertI16x8) \
- V(ArmI8x16Add) \
- V(ArmI8x16AddSaturateS) \
- V(ArmI8x16Sub) \
- V(ArmI8x16SubSaturateS) \
- V(ArmI8x16Mul) \
- V(ArmI8x16MinS) \
- V(ArmI8x16MaxS) \
- V(ArmI8x16Eq) \
- V(ArmI8x16Ne) \
- V(ArmI8x16GtS) \
- V(ArmI8x16GeS) \
- V(ArmI8x16ShrU) \
- V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSaturateU) \
- V(ArmI8x16SubSaturateU) \
- V(ArmI8x16MinU) \
- V(ArmI8x16MaxU) \
- V(ArmI8x16GtU) \
- V(ArmI8x16GeU) \
- V(ArmS128Zero) \
- V(ArmS128Dup) \
- V(ArmS128And) \
- V(ArmS128Or) \
- V(ArmS128Xor) \
- V(ArmS128Not) \
- V(ArmS128Select) \
- V(ArmS32x4ZipLeft) \
- V(ArmS32x4ZipRight) \
- V(ArmS32x4UnzipLeft) \
- V(ArmS32x4UnzipRight) \
- V(ArmS32x4TransposeLeft) \
- V(ArmS32x4TransposeRight) \
- V(ArmS32x4Shuffle) \
- V(ArmS16x8ZipLeft) \
- V(ArmS16x8ZipRight) \
- V(ArmS16x8UnzipLeft) \
- V(ArmS16x8UnzipRight) \
- V(ArmS16x8TransposeLeft) \
- V(ArmS16x8TransposeRight) \
- V(ArmS8x16ZipLeft) \
- V(ArmS8x16ZipRight) \
- V(ArmS8x16UnzipLeft) \
- V(ArmS8x16UnzipRight) \
- V(ArmS8x16TransposeLeft) \
- V(ArmS8x16TransposeRight) \
- V(ArmS8x16Concat) \
- V(ArmS8x16Shuffle) \
- V(ArmS32x2Reverse) \
- V(ArmS16x4Reverse) \
- V(ArmS16x2Reverse) \
- V(ArmS8x8Reverse) \
- V(ArmS8x4Reverse) \
- V(ArmS8x2Reverse) \
- V(ArmS1x4AnyTrue) \
- V(ArmS1x4AllTrue) \
- V(ArmS1x8AnyTrue) \
- V(ArmS1x8AllTrue) \
- V(ArmS1x16AnyTrue) \
- V(ArmS1x16AllTrue) \
- V(ArmWord32AtomicPairLoad) \
- V(ArmWord32AtomicPairStore) \
- V(ArmWord32AtomicPairAdd) \
- V(ArmWord32AtomicPairSub) \
- V(ArmWord32AtomicPairAnd) \
- V(ArmWord32AtomicPairOr) \
- V(ArmWord32AtomicPairXor) \
- V(ArmWord32AtomicPairExchange) \
- V(ArmWord32AtomicPairCompareExchange) \
- V(ArmWord64AtomicNarrowAddUint8) \
- V(ArmWord64AtomicNarrowAddUint16) \
- V(ArmWord64AtomicNarrowAddUint32) \
- V(ArmWord64AtomicNarrowSubUint8) \
- V(ArmWord64AtomicNarrowSubUint16) \
- V(ArmWord64AtomicNarrowSubUint32) \
- V(ArmWord64AtomicNarrowAndUint8) \
- V(ArmWord64AtomicNarrowAndUint16) \
- V(ArmWord64AtomicNarrowAndUint32) \
- V(ArmWord64AtomicNarrowOrUint8) \
- V(ArmWord64AtomicNarrowOrUint16) \
- V(ArmWord64AtomicNarrowOrUint32) \
- V(ArmWord64AtomicNarrowXorUint8) \
- V(ArmWord64AtomicNarrowXorUint16) \
- V(ArmWord64AtomicNarrowXorUint32) \
- V(ArmWord64AtomicNarrowExchangeUint8) \
- V(ArmWord64AtomicNarrowExchangeUint16) \
- V(ArmWord64AtomicNarrowExchangeUint32) \
- V(ArmWord64AtomicNarrowCompareExchangeUint8) \
- V(ArmWord64AtomicNarrowCompareExchangeUint16) \
- V(ArmWord64AtomicNarrowCompareExchangeUint32)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmClz) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSmull) \
+ V(ArmSmmul) \
+ V(ArmSmmla) \
+ V(ArmUmull) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmSbfx) \
+ V(ArmSxtb) \
+ V(ArmSxth) \
+ V(ArmSxtab) \
+ V(ArmSxtah) \
+ V(ArmUxtb) \
+ V(ArmUxth) \
+ V(ArmUxtab) \
+ V(ArmRbit) \
+ V(ArmRev) \
+ V(ArmUxtah) \
+ V(ArmAddPair) \
+ V(ArmSubPair) \
+ V(ArmMulPair) \
+ V(ArmLslPair) \
+ V(ArmLsrPair) \
+ V(ArmAsrPair) \
+ V(ArmVcmpF32) \
+ V(ArmVaddF32) \
+ V(ArmVsubF32) \
+ V(ArmVmulF32) \
+ V(ArmVmlaF32) \
+ V(ArmVmlsF32) \
+ V(ArmVdivF32) \
+ V(ArmVabsF32) \
+ V(ArmVnegF32) \
+ V(ArmVsqrtF32) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVabsF64) \
+ V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVrintmF32) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
+ V(ArmVcvtF32F64) \
+ V(ArmVcvtF64F32) \
+ V(ArmVcvtF32S32) \
+ V(ArmVcvtF32U32) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F32) \
+ V(ArmVcvtU32F32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
+ V(ArmVmovU32U32F64) \
+ V(ArmVldrF32) \
+ V(ArmVstrF32) \
+ V(ArmVldrF64) \
+ V(ArmVld1F64) \
+ V(ArmVstrF64) \
+ V(ArmVst1F64) \
+ V(ArmVld1S128) \
+ V(ArmVst1S128) \
+ V(ArmFloat32Max) \
+ V(ArmFloat64Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
+ V(ArmPoke) \
+ V(ArmPeek) \
+ V(ArmDsbIsb) \
+ V(ArmF32x4Splat) \
+ V(ArmF32x4ExtractLane) \
+ V(ArmF32x4ReplaceLane) \
+ V(ArmF32x4SConvertI32x4) \
+ V(ArmF32x4UConvertI32x4) \
+ V(ArmF32x4Abs) \
+ V(ArmF32x4Neg) \
+ V(ArmF32x4RecipApprox) \
+ V(ArmF32x4RecipSqrtApprox) \
+ V(ArmF32x4Add) \
+ V(ArmF32x4AddHoriz) \
+ V(ArmF32x4Sub) \
+ V(ArmF32x4Mul) \
+ V(ArmF32x4Min) \
+ V(ArmF32x4Max) \
+ V(ArmF32x4Eq) \
+ V(ArmF32x4Ne) \
+ V(ArmF32x4Lt) \
+ V(ArmF32x4Le) \
+ V(ArmI32x4Splat) \
+ V(ArmI32x4ExtractLane) \
+ V(ArmI32x4ReplaceLane) \
+ V(ArmI32x4SConvertF32x4) \
+ V(ArmI32x4SConvertI16x8Low) \
+ V(ArmI32x4SConvertI16x8High) \
+ V(ArmI32x4Neg) \
+ V(ArmI32x4Shl) \
+ V(ArmI32x4ShrS) \
+ V(ArmI32x4Add) \
+ V(ArmI32x4AddHoriz) \
+ V(ArmI32x4Sub) \
+ V(ArmI32x4Mul) \
+ V(ArmI32x4MinS) \
+ V(ArmI32x4MaxS) \
+ V(ArmI32x4Eq) \
+ V(ArmI32x4Ne) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
+ V(ArmI32x4UConvertF32x4) \
+ V(ArmI32x4UConvertI16x8Low) \
+ V(ArmI32x4UConvertI16x8High) \
+ V(ArmI32x4ShrU) \
+ V(ArmI32x4MinU) \
+ V(ArmI32x4MaxU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
+ V(ArmI16x8Splat) \
+ V(ArmI16x8ExtractLane) \
+ V(ArmI16x8ReplaceLane) \
+ V(ArmI16x8SConvertI8x16Low) \
+ V(ArmI16x8SConvertI8x16High) \
+ V(ArmI16x8Neg) \
+ V(ArmI16x8Shl) \
+ V(ArmI16x8ShrS) \
+ V(ArmI16x8SConvertI32x4) \
+ V(ArmI16x8Add) \
+ V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8AddHoriz) \
+ V(ArmI16x8Sub) \
+ V(ArmI16x8SubSaturateS) \
+ V(ArmI16x8Mul) \
+ V(ArmI16x8MinS) \
+ V(ArmI16x8MaxS) \
+ V(ArmI16x8Eq) \
+ V(ArmI16x8Ne) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
+ V(ArmI16x8UConvertI8x16Low) \
+ V(ArmI16x8UConvertI8x16High) \
+ V(ArmI16x8ShrU) \
+ V(ArmI16x8UConvertI32x4) \
+ V(ArmI16x8AddSaturateU) \
+ V(ArmI16x8SubSaturateU) \
+ V(ArmI16x8MinU) \
+ V(ArmI16x8MaxU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
+ V(ArmI8x16Splat) \
+ V(ArmI8x16ExtractLane) \
+ V(ArmI8x16ReplaceLane) \
+ V(ArmI8x16Neg) \
+ V(ArmI8x16Shl) \
+ V(ArmI8x16ShrS) \
+ V(ArmI8x16SConvertI16x8) \
+ V(ArmI8x16Add) \
+ V(ArmI8x16AddSaturateS) \
+ V(ArmI8x16Sub) \
+ V(ArmI8x16SubSaturateS) \
+ V(ArmI8x16Mul) \
+ V(ArmI8x16MinS) \
+ V(ArmI8x16MaxS) \
+ V(ArmI8x16Eq) \
+ V(ArmI8x16Ne) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
+ V(ArmI8x16ShrU) \
+ V(ArmI8x16UConvertI16x8) \
+ V(ArmI8x16AddSaturateU) \
+ V(ArmI8x16SubSaturateU) \
+ V(ArmI8x16MinU) \
+ V(ArmI8x16MaxU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
+ V(ArmS128Zero) \
+ V(ArmS128Dup) \
+ V(ArmS128And) \
+ V(ArmS128Or) \
+ V(ArmS128Xor) \
+ V(ArmS128Not) \
+ V(ArmS128Select) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmS8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
+ V(ArmS1x4AnyTrue) \
+ V(ArmS1x4AllTrue) \
+ V(ArmS1x8AnyTrue) \
+ V(ArmS1x8AllTrue) \
+ V(ArmS1x16AnyTrue) \
+ V(ArmS1x16AllTrue) \
+ V(ArmWord32AtomicPairLoad) \
+ V(ArmWord32AtomicPairStore) \
+ V(ArmWord32AtomicPairAdd) \
+ V(ArmWord32AtomicPairSub) \
+ V(ArmWord32AtomicPairAnd) \
+ V(ArmWord32AtomicPairOr) \
+ V(ArmWord32AtomicPairXor) \
+ V(ArmWord32AtomicPairExchange) \
+ V(ArmWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 56ff02689a..3de063b3fe 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -285,27 +285,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmWord32AtomicPairXor:
case kArmWord32AtomicPairExchange:
case kArmWord32AtomicPairCompareExchange:
- case kArmWord64AtomicNarrowAddUint8:
- case kArmWord64AtomicNarrowAddUint16:
- case kArmWord64AtomicNarrowAddUint32:
- case kArmWord64AtomicNarrowSubUint8:
- case kArmWord64AtomicNarrowSubUint16:
- case kArmWord64AtomicNarrowSubUint32:
- case kArmWord64AtomicNarrowAndUint8:
- case kArmWord64AtomicNarrowAndUint16:
- case kArmWord64AtomicNarrowAndUint32:
- case kArmWord64AtomicNarrowOrUint8:
- case kArmWord64AtomicNarrowOrUint16:
- case kArmWord64AtomicNarrowOrUint32:
- case kArmWord64AtomicNarrowXorUint8:
- case kArmWord64AtomicNarrowXorUint16:
- case kArmWord64AtomicNarrowXorUint32:
- case kArmWord64AtomicNarrowExchangeUint8:
- case kArmWord64AtomicNarrowExchangeUint16:
- case kArmWord64AtomicNarrowExchangeUint32:
- case kArmWord64AtomicNarrowCompareExchangeUint8:
- case kArmWord64AtomicNarrowCompareExchangeUint16:
- case kArmWord64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 277d9779c0..28d7a7fcd0 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -411,36 +411,33 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[] = {g.UseUniqueRegister(value),
- g.UseUniqueRegister(value_high),
- g.UseRegister(base), g.UseRegister(index)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
- g.TempRegister(r7), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
-}
-
-void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- ArmOperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[3] = {g.UseRegister(base), g.UseRegister(index),
- g.UseUniqueRegister(value)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r4),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r5)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister()};
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(value_high),
+ g.UseUniqueRegister(base), g.UseUniqueRegister(index)};
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
+ if (projection1) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r2),
+ g.DefineAsFixed(projection1, r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
+ g.TempRegister(r7), g.TempRegister()};
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r2)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
+ g.TempRegister(r7), g.TempRegister(),
+ g.TempRegister(r3)};
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
+ g.TempRegister(r7), g.TempRegister(),
+ g.TempRegister(r2), g.TempRegister(r3)};
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ }
}
} // namespace
@@ -2265,16 +2262,29 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r0),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r1)};
- InstructionOperand temps[] = {g.TempRegister()};
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionCode code =
kArmWord32AtomicPairLoad | AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
+ InstructionOperand inputs[] = {g.UseUniqueRegister(base),
+ g.UseUniqueRegister(index)};
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r0),
+ g.DefineAsFixed(projection1, r1)};
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r0)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r1)};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r0),
+ g.TempRegister(r1)};
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ }
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
@@ -2314,39 +2324,6 @@ void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairXor);
}
-void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
- ArchOpcode uint8_op,
- ArchOpcode uint16_op,
- ArchOpcode uint32_op) {
- MachineType type = AtomicOpType(node->op());
- DCHECK(type != MachineType::Uint64());
- ArchOpcode opcode = kArchNop;
- if (type == MachineType::Uint32()) {
- opcode = uint32_op;
- } else if (type == MachineType::Uint16()) {
- opcode = uint16_op;
- } else if (type == MachineType::Uint8()) {
- opcode = uint8_op;
- } else {
- UNREACHABLE();
- return;
- }
- VisitNarrowAtomicBinOp(this, node, opcode);
-}
-
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
- VisitWord64AtomicNarrowBinop(node, kArmWord64AtomicNarrow##op##Uint8, \
- kArmWord64AtomicNarrow##op##Uint16, \
- kArmWord64AtomicNarrow##op##Uint32); \
- }
-VISIT_ATOMIC_BINOP(Add)
-VISIT_ATOMIC_BINOP(Sub)
-VISIT_ATOMIC_BINOP(And)
-VISIT_ATOMIC_BINOP(Or)
-VISIT_ATOMIC_BINOP(Xor)
-#undef VISIT_ATOMIC_BINOP
-
void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2354,95 +2331,63 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[] = {g.UseFixed(value, r0),
- g.UseFixed(value_high, r1),
- g.UseRegister(base), g.UseRegister(index)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r6),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r7)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionOperand inputs[] = {
+ g.UseFixed(value, r0), g.UseFixed(value_high, r1),
+ g.UseUniqueRegister(base), g.UseUniqueRegister(index)};
InstructionCode code = kArmWord32AtomicPairExchange |
AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
- ArmOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpType(node->op());
- if (type == MachineType::Uint8()) {
- opcode = kArmWord64AtomicNarrowExchangeUint8;
- } else if (type == MachineType::Uint16()) {
- opcode = kArmWord64AtomicNarrowExchangeUint16;
- } else if (type == MachineType::Uint32()) {
- opcode = kArmWord64AtomicNarrowExchangeUint32;
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r6),
+ g.DefineAsFixed(projection1, r7)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r6)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister(r7)};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
} else {
- UNREACHABLE();
- return;
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister(r6), g.TempRegister(r7)};
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
}
- AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
- g.UseUniqueRegister(value)};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
}
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
ArmOperandGenerator g(this);
AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[] = {
- g.UseFixed(node->InputAt(2), r4), g.UseFixed(node->InputAt(3), r5),
- g.UseFixed(node->InputAt(4), r8), g.UseFixed(node->InputAt(5), r9),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionOperand inputs[] = {g.UseFixed(node->InputAt(2), r4),
+ g.UseFixed(node->InputAt(3), r5),
+ g.UseFixed(node->InputAt(4), r8),
+ g.UseFixed(node->InputAt(5), r9),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1))};
InstructionCode code = kArmWord32AtomicPairCompareExchange |
AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
- ArmOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* old_value = node->InputAt(2);
- Node* new_value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpType(node->op());
- if (type == MachineType::Uint8()) {
- opcode = kArmWord64AtomicNarrowCompareExchangeUint8;
- } else if (type == MachineType::Uint16()) {
- opcode = kArmWord64AtomicNarrowCompareExchangeUint16;
- } else if (type == MachineType::Uint32()) {
- opcode = kArmWord64AtomicNarrowCompareExchangeUint32;
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r2),
+ g.DefineAsFixed(projection1, r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister(r3)};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
} else {
- UNREACHABLE();
- return;
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister(r2), g.TempRegister(r3)};
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
}
- AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
- g.UseUniqueRegister(old_value),
- g.UseUniqueRegister(new_value)};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
}
#define SIMD_TYPE_LIST(V) \
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 867c3687a1..128ed9ffee 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -225,6 +225,9 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(constant.ToExternalReference());
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
break;
@@ -2578,7 +2581,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (src.type() == Constant::kHeapObject) {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index b2e8b4b205..dd9914b8bc 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -1195,9 +1195,11 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(RoundInt32ToFloat32, kArm64Int32ToFloat32) \
V(RoundUint32ToFloat32, kArm64Uint32ToFloat32) \
V(ChangeInt32ToFloat64, kArm64Int32ToFloat64) \
+ V(ChangeInt64ToFloat64, kArm64Int64ToFloat64) \
V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64) \
V(TruncateFloat32ToInt32, kArm64Float32ToInt32) \
V(ChangeFloat64ToInt32, kArm64Float64ToInt32) \
+ V(ChangeFloat64ToInt64, kArm64Float64ToInt64) \
V(TruncateFloat32ToUint32, kArm64Float32ToUint32) \
V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \
V(ChangeFloat64ToUint64, kArm64Float64ToUint64) \
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 741de6f264..90ad85fb50 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -21,7 +21,7 @@ BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
zone_(zone),
dead_(js_graph->Dead()) {}
-BranchElimination::~BranchElimination() {}
+BranchElimination::~BranchElimination() = default;
Reduction BranchElimination::Reduce(Node* node) {
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 980869ccd3..255a4f3926 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -666,7 +666,7 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
valid = false;
}
// Check loops.
- for (const std::pair<int, LoopInfo>& loop_info : header_to_info_) {
+ for (const std::pair<const int, LoopInfo>& loop_info : header_to_info_) {
if (!loop_info.second.resume_jump_targets().empty()) {
PrintF(stderr,
"Found %zu resume targets at loop at offset %d, but no resume "
@@ -700,7 +700,7 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
valid = false;
}
// Check loops.
- for (const std::pair<int, LoopInfo>& loop_info : header_to_info_) {
+ for (const std::pair<const int, LoopInfo>& loop_info : header_to_info_) {
if (!ResumeJumpTargetLeavesResolveSuspendIds(
loop_info.first, loop_info.second.resume_jump_targets(),
&unresolved_suspend_ids)) {
@@ -714,7 +714,7 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
"Found suspend ids that are not resolved by a final leaf resume "
"jump:\n");
- for (const std::pair<int, int>& target : unresolved_suspend_ids) {
+ for (const std::pair<const int, int>& target : unresolved_suspend_ids) {
PrintF(stderr, " %d -> %d\n", target.first, target.second);
}
valid = false;
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 6ff9ed021a..bc788943d7 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -92,7 +92,7 @@ struct V8_EXPORT_PRIVATE LoopInfo {
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
};
-class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE BytecodeAnalysis {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
bool do_liveness_analysis);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 5fad7bc920..4405aff207 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -571,7 +571,9 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
}
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
- return VectorSlotPair(feedback_vector(), FeedbackVector::ToSlot(slot_id));
+ FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
+ FeedbackNexus nexus(feedback_vector(), slot);
+ return VectorSlotPair(feedback_vector(), slot, nexus.ic_state());
}
void BytecodeGraphBuilder::CreateGraph() {
@@ -801,7 +803,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops(
int current_parent_offset =
analysis.GetLoopInfoFor(osr_offset).parent_offset();
while (current_parent_offset != -1) {
- LoopInfo current_parent_loop =
+ const LoopInfo& current_parent_loop =
analysis.GetLoopInfoFor(current_parent_offset);
// We iterate until the back edge of the parent loop, which we detect by
// the offset that the JumpLoop targets.
@@ -1337,6 +1339,17 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
+ PrepareEagerCheckpoint();
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ const Operator* op = javascript()->LoadNamed(name, VectorSlotPair());
+ Node* node = NewNode(op, object);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
PrepareEagerCheckpoint();
Node* key = environment()->LookupAccumulator();
@@ -1400,6 +1413,21 @@ void BytecodeGraphBuilder::VisitStaNamedProperty() {
BuildNamedStore(StoreMode::kNormal);
}
+void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
+ PrepareEagerCheckpoint();
+ Node* value = environment()->LookupAccumulator();
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Handle<Name> name(
+ Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ LanguageMode language_mode =
+ static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
+ const Operator* op =
+ javascript()->StoreNamed(language_mode, name, VectorSlotPair());
+ Node* node = NewNode(op, object, value);
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitStaNamedOwnProperty() {
BuildNamedStore(StoreMode::kOwn);
}
@@ -1601,6 +1629,12 @@ void BytecodeGraphBuilder::VisitCreateEmptyArrayLiteral() {
environment()->BindAccumulator(literal);
}
+void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
+ Node* iterable = NewNode(javascript()->CreateArrayFromIterable(),
+ environment()->LookupAccumulator());
+ environment()->BindAccumulator(iterable, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
Handle<ObjectBoilerplateDescription> constant_properties(
ObjectBoilerplateDescription::cast(
@@ -1655,8 +1689,9 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
TemplateObjectDescription::CreateTemplateObject(isolate(), description);
nexus.vector()->Set(slot, *cached_value);
} else {
- cached_value = handle(
- JSArray::cast(nexus.GetFeedback()->ToStrongHeapObject()), isolate());
+ cached_value =
+ handle(JSArray::cast(nexus.GetFeedback()->GetHeapObjectAssumeStrong()),
+ isolate());
}
Node* template_object = jsgraph()->HeapConstant(cached_value);
@@ -1781,6 +1816,36 @@ void BytecodeGraphBuilder::VisitCallAnyReceiver() {
BuildCallVarArgs(ConvertReceiverMode::kAny);
}
+void BytecodeGraphBuilder::VisitCallNoFeedback() {
+ DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
+ bytecode_iterator().current_bytecode()),
+ ConvertReceiverMode::kAny);
+
+ PrepareEagerCheckpoint();
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
+
+ // The receiver is the first register, followed by the arguments in the
+ // consecutive registers.
+ int arg_count = static_cast<int>(reg_count) - 1;
+ // The arity of the Call node -- includes the callee, receiver and function
+ // arguments.
+ int arity = 2 + arg_count;
+
+ // Setting call frequency to a value less than min_inlining frequency to
+ // prevent inlining of one-shot call node.
+ DCHECK(CallFrequency::kNoFeedbackCallFrequency < FLAG_min_inlining_frequency);
+ const Operator* call = javascript()->Call(
+ arity, CallFrequency(CallFrequency::kNoFeedbackCallFrequency));
+ Node* const* call_args = ProcessCallVarArgs(ConvertReceiverMode::kAny, callee,
+ first_reg, arg_count);
+ Node* value = ProcessCallArguments(call, call_args, arity);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitCallProperty() {
BuildCallVarArgs(ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -3403,7 +3468,9 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
memcpy(buffer, value_inputs, kPointerSize * value_input_count);
Node** current_input = buffer + value_input_count;
if (has_context) {
- *current_input++ = environment()->Context();
+ *current_input++ = OperatorProperties::NeedsExactContext(op)
+ ? environment()->Context()
+ : jsgraph()->HeapConstant(native_context());
}
if (has_frame_state) {
// The frame state will be inserted later. Here we misuse the {Dead} node
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 02b6f5fb3d..1300cd258d 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -99,10 +99,15 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS \
f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
-#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+#elif V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
// ===========================================================================
+#ifdef V8_TARGET_LITTLE_ENDIAN // ppc64le linux
+#define STACK_SHADOW_WORDS 12
+#else // AIX
+#define STACK_SHADOW_WORDS 14
+#endif
#define PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
#define CALLEE_SAVE_REGISTERS \
r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() | r19.bit() | \
@@ -117,6 +122,7 @@ namespace {
// ===========================================================================
// == s390x ==================================================================
// ===========================================================================
+#define STACK_SHADOW_WORDS 20
#define PARAM_REGISTERS r2, r3, r4, r5, r6
#define CALLEE_SAVE_REGISTERS \
r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
@@ -124,15 +130,6 @@ namespace {
d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
d14.bit() | d15.bit()
-#elif V8_TARGET_ARCH_S390
-// ===========================================================================
-// == s390 ===================================================================
-// ===========================================================================
-#define PARAM_REGISTERS r2, r3, r4, r5, r6
-#define CALLEE_SAVE_REGISTERS \
- r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
-#define CALLEE_SAVE_FP_REGISTERS (d4.bit() | d6.bit())
-
#else
// ===========================================================================
// == unknown ================================================================
diff --git a/deps/v8/src/compiler/checkpoint-elimination.h b/deps/v8/src/compiler/checkpoint-elimination.h
index 87f14c27a6..97e05c130d 100644
--- a/deps/v8/src/compiler/checkpoint-elimination.h
+++ b/deps/v8/src/compiler/checkpoint-elimination.h
@@ -18,7 +18,7 @@ class V8_EXPORT_PRIVATE CheckpointElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
explicit CheckpointElimination(Editor* editor);
- ~CheckpointElimination() final {}
+ ~CheckpointElimination() final = default;
const char* reducer_name() const override { return "CheckpointElimination"; }
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 4f400846d4..93e384444e 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -61,10 +61,11 @@ CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int32_t builtin_index)
: CodeAssemblerState(
isolate, zone,
- Linkage::GetJSCallDescriptor(zone, false, parameter_count,
- kind == Code::BUILTIN
- ? CallDescriptor::kPushArgumentCount
- : CallDescriptor::kNoFlags),
+ Linkage::GetJSCallDescriptor(
+ zone, false, parameter_count,
+ (kind == Code::BUILTIN ? CallDescriptor::kPushArgumentCount
+ : CallDescriptor::kNoFlags) |
+ CallDescriptor::kCanUseRoots),
kind, name, poisoning_level, 0, builtin_index) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
@@ -84,13 +85,13 @@ CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
code_generated_(false),
variables_(zone) {}
-CodeAssemblerState::~CodeAssemblerState() {}
+CodeAssemblerState::~CodeAssemblerState() = default;
int CodeAssemblerState::parameter_count() const {
return static_cast<int>(raw_assembler_->call_descriptor()->ParameterCount());
}
-CodeAssembler::~CodeAssembler() {}
+CodeAssembler::~CodeAssembler() = default;
#if DEBUG
void CodeAssemblerState::PrintCurrentBlock(std::ostream& os) {
@@ -309,7 +310,7 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) {
}
TNode<HeapNumber> CodeAssembler::NaNConstant() {
- return UncheckedCast<HeapNumber>(LoadRoot(Heap::kNanValueRootIndex));
+ return UncheckedCast<HeapNumber>(LoadRoot(RootIndex::kNanValue));
}
bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
@@ -924,6 +925,17 @@ TNode<UintPtrT> CodeAssembler::ChangeFloat64ToUintPtr(
raw_assembler()->ChangeFloat64ToUint32(value));
}
+TNode<Float64T> CodeAssembler::ChangeUintPtrToFloat64(TNode<UintPtrT> value) {
+ if (raw_assembler()->machine()->Is64()) {
+ // TODO(turbofan): Maybe we should introduce a ChangeUint64ToFloat64
+ // machine operator to TurboFan here?
+ return ReinterpretCast<Float64T>(
+ raw_assembler()->RoundUint64ToFloat64(value));
+ }
+ return ReinterpretCast<Float64T>(
+ raw_assembler()->ChangeUint32ToFloat64(value));
+}
+
Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
if (raw_assembler()->machine()->Is64()) {
return raw_assembler()->RoundInt64ToFloat64(value);
@@ -952,7 +964,7 @@ Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
return raw_assembler()->AtomicLoad(rep, base, offset);
}
-TNode<Object> CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
+TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
Handle<Object> root = isolate()->heap()->root_handle(root_index);
if (root->IsSmi()) {
@@ -967,8 +979,9 @@ TNode<Object> CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
// cases, it would boil down to loading from a fixed kRootRegister offset.
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ size_t offset = static_cast<size_t>(root_index) * kPointerSize;
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), roots_array_start,
- IntPtrConstant(root_index * kPointerSize)));
+ IntPtrConstant(offset)));
}
Node* CodeAssembler::Store(Node* base, Node* value) {
@@ -998,14 +1011,16 @@ Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
}
Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
- Node* offset, Node* value) {
- return raw_assembler()->AtomicStore(rep, base, offset, value);
+ Node* offset, Node* value, Node* value_high) {
+ return raw_assembler()->AtomicStore(rep, base, offset, value, value_high);
}
-#define ATOMIC_FUNCTION(name) \
- Node* CodeAssembler::Atomic##name(MachineType type, Node* base, \
- Node* offset, Node* value) { \
- return raw_assembler()->Atomic##name(type, base, offset, value); \
+#define ATOMIC_FUNCTION(name) \
+ Node* CodeAssembler::Atomic##name(MachineType type, Node* base, \
+ Node* offset, Node* value, \
+ Node* value_high) { \
+ return raw_assembler()->Atomic##name(type, base, offset, value, \
+ value_high); \
}
ATOMIC_FUNCTION(Exchange);
ATOMIC_FUNCTION(Add);
@@ -1017,17 +1032,20 @@ ATOMIC_FUNCTION(Xor);
Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
Node* offset, Node* old_value,
- Node* new_value) {
- return raw_assembler()->AtomicCompareExchange(type, base, offset, old_value,
- new_value);
+ Node* new_value,
+ Node* old_value_high,
+ Node* new_value_high) {
+ return raw_assembler()->AtomicCompareExchange(
+ type, base, offset, old_value, old_value_high, new_value, new_value_high);
}
-Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
+Node* CodeAssembler::StoreRoot(RootIndex root_index, Node* value) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ size_t offset = static_cast<size_t>(root_index) * kPointerSize;
return StoreNoWriteBarrier(MachineRepresentation::kTagged, roots_array_start,
- IntPtrConstant(root_index * kPointerSize), value);
+ IntPtrConstant(offset), value);
}
Node* CodeAssembler::Retain(Node* value) {
@@ -1035,6 +1053,7 @@ Node* CodeAssembler::Retain(Node* value) {
}
Node* CodeAssembler::Projection(int index, Node* value) {
+ DCHECK(index < value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
}
@@ -1392,8 +1411,8 @@ void CodeAssembler::Branch(SloppyTNode<IntegralT> condition, Label* true_label,
}
void CodeAssembler::Branch(TNode<BoolT> condition,
- std::function<void()> true_body,
- std::function<void()> false_body) {
+ const std::function<void()>& true_body,
+ const std::function<void()>& false_body) {
int32_t constant;
if (ToInt32Constant(condition, constant)) {
return constant ? true_body() : false_body();
@@ -1410,7 +1429,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition,
}
void CodeAssembler::Branch(TNode<BoolT> condition, Label* true_label,
- std::function<void()> false_body) {
+ const std::function<void()>& false_body) {
int32_t constant;
if (ToInt32Constant(condition, constant)) {
return constant ? Goto(true_label) : false_body();
@@ -1423,7 +1442,7 @@ void CodeAssembler::Branch(TNode<BoolT> condition, Label* true_label,
}
void CodeAssembler::Branch(TNode<BoolT> condition,
- std::function<void()> true_body,
+ const std::function<void()>& true_body,
Label* false_label) {
int32_t constant;
if (ToInt32Constant(condition, constant)) {
@@ -1735,6 +1754,43 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
bound_ = true;
}
+void CodeAssemblerParameterizedLabelBase::AddInputs(std::vector<Node*> inputs) {
+ if (!phi_nodes_.empty()) {
+ DCHECK_EQ(inputs.size(), phi_nodes_.size());
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ state_->raw_assembler_->AppendPhiInput(phi_nodes_[i], inputs[i]);
+ }
+ } else {
+ DCHECK_EQ(inputs.size(), phi_inputs_.size());
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ phi_inputs_[i].push_back(inputs[i]);
+ }
+ }
+}
+
+Node* CodeAssemblerParameterizedLabelBase::CreatePhi(
+ MachineRepresentation rep, const std::vector<Node*>& inputs) {
+ for (Node* input : inputs) {
+ // We use {nullptr} as a sentinel for an uninitialized value. We must not
+ // create phi nodes for these.
+ if (input == nullptr) return nullptr;
+ }
+ return state_->raw_assembler_->Phi(rep, static_cast<int>(inputs.size()),
+ &inputs.front());
+}
+
+const std::vector<Node*>& CodeAssemblerParameterizedLabelBase::CreatePhis(
+ std::vector<MachineRepresentation> representations) {
+ DCHECK(is_used());
+ DCHECK(phi_nodes_.empty());
+ phi_nodes_.reserve(phi_inputs_.size());
+ DCHECK_EQ(representations.size(), phi_inputs_.size());
+ for (size_t i = 0; i < phi_inputs_.size(); ++i) {
+ phi_nodes_.push_back(CreatePhi(representations[i], phi_inputs_[i]));
+ }
+ return phi_nodes_;
+}
+
} // namespace compiler
Smi* CheckObjectType(Object* value, Smi* type, String* location) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 6b9089da6b..3a5f06bb95 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -27,11 +27,23 @@
namespace v8 {
namespace internal {
-class Callable;
class CallInterfaceDescriptor;
+class Callable;
+class Factory;
+class InterpreterData;
class Isolate;
+class JSAsyncGeneratorObject;
+class JSCollator;
class JSCollection;
+class JSDateTimeFormat;
+class JSListFormat;
+class JSLocale;
+class JSNumberFormat;
+class JSPluralRules;
class JSRegExpStringIterator;
+class JSRelativeTimeFormat;
+class JSSegmenter;
+class JSV8BreakIterator;
class JSWeakCollection;
class JSWeakMap;
class JSWeakSet;
@@ -41,8 +53,7 @@ class PromiseFulfillReactionJobTask;
class PromiseReaction;
class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
-class InterpreterData;
-class Factory;
+class TorqueAssembler;
class Zone;
template <typename T>
@@ -245,6 +256,7 @@ class StringWrapper;
class SymbolWrapper;
class Undetectable;
class UniqueName;
+class WasmExceptionObject;
class WasmExportedFunctionData;
class WasmGlobalObject;
class WasmMemoryObject;
@@ -697,6 +709,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Int32T> Int32Constant(int32_t value);
TNode<Int64T> Int64Constant(int64_t value);
TNode<IntPtrT> IntPtrConstant(intptr_t value);
+ TNode<Uint32T> Uint32Constant(uint32_t value) {
+ return Unsigned(Int32Constant(bit_cast<int32_t>(value)));
+ }
+ TNode<UintPtrT> UintPtrConstant(uintptr_t value) {
+ return Unsigned(IntPtrConstant(bit_cast<intptr_t>(value)));
+ }
TNode<Number> NumberConstant(double value);
TNode<Smi> SmiConstant(Smi* value);
TNode<Smi> SmiConstant(int value);
@@ -773,11 +791,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Branch(SloppyTNode<IntegralT> condition, Label* true_label,
Label* false_label);
- void Branch(TNode<BoolT> condition, std::function<void()> true_body,
- std::function<void()> false_body);
+ void Branch(TNode<BoolT> condition, const std::function<void()>& true_body,
+ const std::function<void()>& false_body);
void Branch(TNode<BoolT> condition, Label* true_label,
- std::function<void()> false_body);
- void Branch(TNode<BoolT> condition, std::function<void()> true_body,
+ const std::function<void()>& false_body);
+ void Branch(TNode<BoolT> condition, const std::function<void()>& true_body,
Label* false_label);
void Switch(Node* index, Label* default_label, const int32_t* case_values,
@@ -808,7 +826,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
// Load a value from the root array.
- TNode<Object> LoadRoot(Heap::RootListIndex root_index);
+ TNode<Object> LoadRoot(RootIndex root_index);
// Store value to raw memory location.
Node* Store(Node* base, Node* value);
@@ -817,28 +835,38 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
+ // {value_high} is used for 64-bit stores on 32-bit platforms, must be
+ // nullptr in other cases.
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
- Node* value);
+ Node* value, Node* value_high = nullptr);
// Exchange value at raw memory location
- Node* AtomicExchange(MachineType type, Node* base, Node* offset, Node* value);
+ Node* AtomicExchange(MachineType type, Node* base, Node* offset, Node* value,
+ Node* value_high = nullptr);
// Compare and Exchange value at raw memory location
Node* AtomicCompareExchange(MachineType type, Node* base, Node* offset,
- Node* old_value, Node* new_value);
+ Node* old_value, Node* new_value,
+ Node* old_value_high = nullptr,
+ Node* new_value_high = nullptr);
- Node* AtomicAdd(MachineType type, Node* base, Node* offset, Node* value);
+ Node* AtomicAdd(MachineType type, Node* base, Node* offset, Node* value,
+ Node* value_high = nullptr);
- Node* AtomicSub(MachineType type, Node* base, Node* offset, Node* value);
+ Node* AtomicSub(MachineType type, Node* base, Node* offset, Node* value,
+ Node* value_high = nullptr);
- Node* AtomicAnd(MachineType type, Node* base, Node* offset, Node* value);
+ Node* AtomicAnd(MachineType type, Node* base, Node* offset, Node* value,
+ Node* value_high = nullptr);
- Node* AtomicOr(MachineType type, Node* base, Node* offset, Node* value);
+ Node* AtomicOr(MachineType type, Node* base, Node* offset, Node* value,
+ Node* value_high = nullptr);
- Node* AtomicXor(MachineType type, Node* base, Node* offset, Node* value);
+ Node* AtomicXor(MachineType type, Node* base, Node* offset, Node* value,
+ Node* value_high = nullptr);
// Store a value to the root array.
- Node* StoreRoot(Heap::RootListIndex root_index, Node* value);
+ Node* StoreRoot(RootIndex root_index, Node* value);
// Basic arithmetic operations.
#define DECLARE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
@@ -906,6 +934,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<Uint32T> Uint32Add(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
TNode<WordT> IntPtrAdd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<WordT> IntPtrSub(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<WordT> IntPtrMul(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
@@ -921,6 +954,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return Signed(
IntPtrMul(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<UintPtrT> UintPtrAdd(TNode<UintPtrT> left, TNode<UintPtrT> right) {
+ return Unsigned(
+ IntPtrAdd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<UintPtrT> UintPtrSub(TNode<UintPtrT> left, TNode<UintPtrT> right) {
+ return Unsigned(
+ IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
TNode<WordT> WordShr(SloppyTNode<WordT> value, int shift);
@@ -970,6 +1011,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
// Assumes that the double can be exactly represented as an int.
TNode<UintPtrT> ChangeFloat64ToUintPtr(SloppyTNode<Float64T> value);
+ // Same in the opposite direction.
+ TNode<Float64T> ChangeUintPtrToFloat64(TNode<UintPtrT> value);
// Changes an intptr_t to a double, e.g. for storing an element index
// outside Smi range in a HeapNumber. Lossless on 32-bit,
@@ -1117,7 +1160,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
Node* arity = Int32Constant(argc);
- Node* receiver = LoadRoot(Heap::kUndefinedValueRootIndex);
+ Node* receiver = LoadRoot(RootIndex::kUndefinedValue);
// Construct(target, new_target, arity, receiver, arguments...)
return CallStub(callable, context, new_target, new_target, arity, receiver,
@@ -1380,6 +1423,60 @@ class CodeAssemblerLabel {
std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>> variable_merges_;
};
+class CodeAssemblerParameterizedLabelBase {
+ public:
+ bool is_used() const { return plain_label_.is_used(); }
+ explicit CodeAssemblerParameterizedLabelBase(CodeAssembler* assembler,
+ size_t arity,
+ CodeAssemblerLabel::Type type)
+ : state_(assembler->state()),
+ phi_inputs_(arity),
+ plain_label_(assembler, type) {}
+
+ protected:
+ CodeAssemblerLabel* plain_label() { return &plain_label_; }
+ void AddInputs(std::vector<Node*> inputs);
+ Node* CreatePhi(MachineRepresentation rep, const std::vector<Node*>& inputs);
+ const std::vector<Node*>& CreatePhis(
+ std::vector<MachineRepresentation> representations);
+
+ private:
+ CodeAssemblerState* state_;
+ std::vector<std::vector<Node*>> phi_inputs_;
+ std::vector<Node*> phi_nodes_;
+ CodeAssemblerLabel plain_label_;
+};
+
+template <class... Types>
+class CodeAssemblerParameterizedLabel
+ : public CodeAssemblerParameterizedLabelBase {
+ public:
+ static constexpr size_t kArity = sizeof...(Types);
+ explicit CodeAssemblerParameterizedLabel(CodeAssembler* assembler,
+ CodeAssemblerLabel::Type type)
+ : CodeAssemblerParameterizedLabelBase(assembler, kArity, type) {}
+
+ private:
+ friend class internal::TorqueAssembler;
+
+ void AddInputs(TNode<Types>... inputs) {
+ CodeAssemblerParameterizedLabelBase::AddInputs(
+ std::vector<Node*>{inputs...});
+ }
+ void CreatePhis(TNode<Types>*... results) {
+ const std::vector<Node*>& phi_nodes =
+ CodeAssemblerParameterizedLabelBase::CreatePhis(
+ {MachineRepresentationOf<Types>::value...});
+ auto it = phi_nodes.begin();
+ USE(it);
+ ITERATE_PACK(AssignPhi(results, *(it++)));
+ }
+ template <class T>
+ static void AssignPhi(TNode<T>* result, Node* phi) {
+ if (phi != nullptr) *result = TNode<T>::UncheckedCast(phi);
+ }
+};
+
class V8_EXPORT_PRIVATE CodeAssemblerState {
public:
// Create with CallStub linkage.
@@ -1413,6 +1510,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
friend class CodeAssemblerLabel;
friend class CodeAssemblerVariable;
friend class CodeAssemblerTester;
+ friend class CodeAssemblerParameterizedLabelBase;
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, Code::Kind kind,
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 83060f9e38..b6d782d96a 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -16,6 +16,7 @@
#include "src/lsan.h"
#include "src/macro-assembler-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -69,7 +70,7 @@ CodeGenerator::CodeGenerator(
caller_registers_saved_(false),
jump_tables_(nullptr),
ools_(nullptr),
- osr_helper_(osr_helper),
+ osr_helper_(std::move(osr_helper)),
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
source_position_table_builder_(
@@ -453,8 +454,8 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
}
}
-bool CodeGenerator::IsMaterializableFromRoot(
- Handle<HeapObject> object, Heap::RootListIndex* index_return) {
+bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
+ RootIndex* index_return) {
const CallDescriptor* incoming_descriptor =
linkage()->GetIncomingDescriptor();
if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
@@ -1033,7 +1034,7 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
DCHECK(descriptor->bailout_id().IsValidForConstructStub());
translation->BeginConstructStubFrame(
descriptor->bailout_id(), shared_info_id,
- static_cast<unsigned int>(descriptor->parameters_count()));
+ static_cast<unsigned int>(descriptor->parameters_count() + 1));
break;
case FrameStateType::kBuiltinContinuation: {
BailoutId bailout_id = descriptor->bailout_id();
@@ -1111,6 +1112,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
+ } else if (type == MachineType::Int64()) {
+ translation->StoreInt64StackSlot(LocationOperand::cast(op)->index());
} else {
CHECK_EQ(MachineRepresentation::kTagged, type.representation());
translation->StoreStackSlot(LocationOperand::cast(op)->index());
@@ -1132,6 +1135,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
translation->StoreUint32Register(converter.ToRegister(op));
+ } else if (type == MachineType::Int64()) {
+ translation->StoreInt64Register(converter.ToRegister(op));
} else {
CHECK_EQ(MachineRepresentation::kTagged, type.representation());
translation->StoreRegister(converter.ToRegister(op));
@@ -1182,12 +1187,14 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
}
break;
case Constant::kInt64:
- // When pointers are 8 bytes, we can use int64 constants to represent
- // Smis.
- DCHECK(type.representation() == MachineRepresentation::kWord64 ||
- type.representation() == MachineRepresentation::kTagged);
DCHECK_EQ(8, kPointerSize);
- {
+ if (type.representation() == MachineRepresentation::kWord64) {
+ literal =
+ DeoptimizationLiteral(static_cast<double>(constant.ToInt64()));
+ } else {
+ // When pointers are 8 bytes, we can use int64 constants to represent
+ // Smis.
+ DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
Smi* smi = reinterpret_cast<Smi*>(constant.ToInt64());
DCHECK(smi->IsSmi());
literal = DeoptimizationLiteral(smi->value());
@@ -1207,6 +1214,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
literal = DeoptimizationLiteral(constant.ToHeapObject());
break;
+ case Constant::kDelayedStringConstant:
+ DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
+ literal = DeoptimizationLiteral(constant.ToDelayedStringConstant());
+ break;
default:
UNREACHABLE();
}
@@ -1262,10 +1273,21 @@ OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
gen->ools_ = this;
}
-OutOfLineCode::~OutOfLineCode() {}
+OutOfLineCode::~OutOfLineCode() = default;
Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
- return object_.is_null() ? isolate->factory()->NewNumber(number_) : object_;
+ switch (kind_) {
+ case DeoptimizationLiteralKind::kObject: {
+ return object_;
+ }
+ case DeoptimizationLiteralKind::kNumber: {
+ return isolate->factory()->NewNumber(number_);
+ }
+ case DeoptimizationLiteralKind::kString: {
+ return string_->AllocateStringConstant(isolate);
+ }
+ }
+ UNREACHABLE();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 5d4941f825..1ba0e32ce6 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -50,28 +50,40 @@ class InstructionOperandIterator {
size_t pos_;
};
-// Either a non-null Handle<Object> or a double.
+enum class DeoptimizationLiteralKind { kObject, kNumber, kString };
+
+// Either a non-null Handle<Object>, a double or a StringConstantBase.
class DeoptimizationLiteral {
public:
- DeoptimizationLiteral() : object_(), number_(0) {}
+ DeoptimizationLiteral() : object_(), number_(0), string_(nullptr) {}
explicit DeoptimizationLiteral(Handle<Object> object)
- : object_(object), number_(0) {
+ : kind_(DeoptimizationLiteralKind::kObject), object_(object) {
DCHECK(!object_.is_null());
}
- explicit DeoptimizationLiteral(double number) : object_(), number_(number) {}
+ explicit DeoptimizationLiteral(double number)
+ : kind_(DeoptimizationLiteralKind::kNumber), number_(number) {}
+ explicit DeoptimizationLiteral(const StringConstantBase* string)
+ : kind_(DeoptimizationLiteralKind::kString), string_(string) {}
Handle<Object> object() const { return object_; }
+ const StringConstantBase* string() const { return string_; }
bool operator==(const DeoptimizationLiteral& other) const {
- return object_.equals(other.object_) &&
- bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_);
+ return kind_ == other.kind_ && object_.equals(other.object_) &&
+ bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_) &&
+ bit_cast<intptr_t>(string_) == bit_cast<intptr_t>(other.string_);
}
Handle<Object> Reify(Isolate* isolate) const;
+ DeoptimizationLiteralKind kind() const { return kind_; }
+
private:
+ DeoptimizationLiteralKind kind_;
+
Handle<Object> object_;
- double number_;
+ double number_ = 0;
+ const StringConstantBase* string_ = nullptr;
};
// Generates native code for a sequence of instructions.
@@ -151,7 +163,7 @@ class CodeGenerator final : public GapResolver::Assembler {
// which is cheaper on some platforms than materializing the actual heap
// object constant.
bool IsMaterializableFromRoot(Handle<HeapObject> object,
- Heap::RootListIndex* index_return);
+ RootIndex* index_return);
enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index bce8d0f62e..6a36c979a1 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -23,7 +23,7 @@ namespace compiler {
class CommonNodeCache final {
public:
explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
- ~CommonNodeCache() {}
+ ~CommonNodeCache() = default;
Node** FindInt32Constant(int32_t value) {
return int32_constants_.Find(zone(), value);
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index f1b29eaf76..32b3181b7a 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
JSHeapBroker* js_heap_broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, Zone* temp_zone);
- ~CommonOperatorReducer() final {}
+ ~CommonOperatorReducer() final = default;
const char* reducer_name() const override { return "CommonOperatorReducer"; }
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 253a92eb84..9ed6943367 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -136,6 +136,13 @@ const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(
}
}
+const Operator* CommonOperatorBuilder::DelayedStringConstant(
+ const StringConstantBase* str) {
+ return new (zone()) Operator1<const StringConstantBase*>(
+ IrOpcode::kDelayedStringConstant, Operator::kPure,
+ "DelayedStringConstant", 0, 0, 0, 1, 0, 0, str);
+}
+
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
return lhs.representation() == rhs.representation() &&
lhs.hint() == rhs.hint();
@@ -1194,6 +1201,11 @@ Handle<HeapObject> HeapConstantOf(const Operator* op) {
return OpParameter<Handle<HeapObject>>(op);
}
+const StringConstantBase* StringConstantBaseOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kDelayedStringConstant, op->opcode());
+ return OpParameter<const StringConstantBase*>(op);
+}
+
const Operator* CommonOperatorBuilder::RelocatableInt32Constant(
int32_t value, RelocInfo::Mode rmode) {
return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
@@ -1431,7 +1443,8 @@ const Operator* CommonOperatorBuilder::Call(
Operator::ZeroIfNoThrow(call_descriptor->properties()),
call_descriptor) {}
- void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
+ void PrintParameter(std::ostream& os,
+ PrintVerbosity verbose) const override {
os << "[" << *parameter() << "]";
}
};
@@ -1455,7 +1468,8 @@ const Operator* CommonOperatorBuilder::CallWithCallerSavedRegisters(
Operator::ZeroIfNoThrow(call_descriptor->properties()),
call_descriptor) {}
- void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
+ void PrintParameter(std::ostream& os,
+ PrintVerbosity verbose) const override {
os << "[" << *parameter() << "]";
}
};
@@ -1474,7 +1488,8 @@ const Operator* CommonOperatorBuilder::TailCall(
call_descriptor->FrameStateCount(),
1, 1, 0, 0, 1, call_descriptor) {}
- void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
+ void PrintParameter(std::ostream& os,
+ PrintVerbosity verbose) const override {
os << "[" << *parameter() << "]";
}
};
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 9bdaedea20..609dfc8c1b 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -11,12 +11,16 @@
#include "src/globals.h"
#include "src/machine-type.h"
#include "src/reloc-info.h"
+#include "src/string-constants.h"
#include "src/vector-slot-pair.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
+
+class StringConstantBase;
+
namespace compiler {
// Forward declarations.
@@ -255,7 +259,7 @@ class SparseInputMask final {
// An iterator over a node's sparse inputs.
class InputIterator final {
public:
- InputIterator() {}
+ InputIterator() = default;
InputIterator(BitMaskType bit_mask, Node* parent);
Node* parent() const { return parent_; }
@@ -433,6 +437,9 @@ const FrameStateInfo& FrameStateInfoOf(const Operator* op)
Handle<HeapObject> HeapConstantOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+const StringConstantBase* StringConstantBaseOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
@@ -535,6 +542,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* MarkAsSafetyCheck(const Operator* op,
IsSafetyCheck safety_check);
+ const Operator* DelayedStringConstant(const StringConstantBase* str);
+
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index b67adbd7ca..d5eb8b54be 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -17,7 +17,7 @@ CompilationDependencies::CompilationDependencies(Isolate* isolate, Zone* zone)
class CompilationDependencies::Dependency : public ZoneObject {
public:
virtual bool IsValid() const = 0;
- virtual void Install(MaybeObjectHandle code) = 0;
+ virtual void Install(const MaybeObjectHandle& code) = 0;
};
class InitialMapDependency final : public CompilationDependencies::Dependency {
@@ -36,7 +36,7 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
function->initial_map() == *initial_map_.object<Map>();
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(function_.isolate(), code,
initial_map_.object<Map>(),
@@ -68,7 +68,7 @@ class PrototypePropertyDependency final
function->prototype() == *prototype_.object();
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
Handle<JSFunction> function = function_.object<JSFunction>();
if (!function->has_initial_map()) JSFunction::EnsureHasInitialMap(function);
@@ -90,7 +90,7 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
bool IsValid() const override { return map_.object<Map>()->is_stable(); }
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
DependentCode::kPrototypeCheckGroup);
@@ -108,7 +108,7 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
bool IsValid() const override { return !map_.object<Map>()->is_deprecated(); }
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
DependentCode::kTransitionGroup);
@@ -132,7 +132,7 @@ class PretenureModeDependency final
return mode_ == site_.object<AllocationSite>()->GetPretenureMode();
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
site_.isolate(), code, site_.object<AllocationSite>(),
@@ -162,7 +162,7 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
return *type == owner->instance_descriptors()->GetFieldType(descriptor_);
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(owner_.isolate(), code,
owner_.object<Map>(),
@@ -193,7 +193,7 @@ class GlobalPropertyDependency final
read_only_ == cell->property_details().IsReadOnly();
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(cell_.isolate(), code,
cell_.object<PropertyCell>(),
@@ -217,7 +217,7 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(cell_.isolate(), code,
cell_.object<PropertyCell>(),
@@ -249,7 +249,7 @@ class ElementsKindDependency final
return kind_ == kind;
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
site_.isolate(), code, site_.object<AllocationSite>(),
@@ -271,13 +271,14 @@ class InitialMapInstanceSizePredictionDependency final
bool IsValid() const override {
// The dependency is valid if the prediction is the same as the current
// slack tracking result.
+ if (!function_.object<JSFunction>()->has_initial_map()) return false;
int instance_size =
function_.object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
function_.isolate());
return instance_size == instance_size_;
}
- void Install(MaybeObjectHandle code) override {
+ void Install(const MaybeObjectHandle& code) override {
DCHECK(IsValid());
// Finish the slack tracking.
function_.object<JSFunction>()->CompleteInobjectSlackTrackingIfActive();
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index a447b2a07c..4508d90b71 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -17,7 +17,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker) {}
-ConstantFoldingReducer::~ConstantFoldingReducer() {}
+ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index 217d58ef31..a1cab2f0f0 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -41,7 +41,7 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
public:
DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common, Zone* temp_zone);
- ~DeadCodeElimination() final {}
+ ~DeadCodeElimination() final = default;
const char* reducer_name() const override { return "DeadCodeElimination"; }
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 4bb4f8df77..97f78418d0 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -628,9 +628,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeInt32ToTagged:
result = LowerChangeInt32ToTagged(node);
break;
+ case IrOpcode::kChangeInt64ToTagged:
+ result = LowerChangeInt64ToTagged(node);
+ break;
case IrOpcode::kChangeUint32ToTagged:
result = LowerChangeUint32ToTagged(node);
break;
+ case IrOpcode::kChangeUint64ToTagged:
+ result = LowerChangeUint64ToTagged(node);
+ break;
case IrOpcode::kChangeFloat64ToTagged:
result = LowerChangeFloat64ToTagged(node);
break;
@@ -640,6 +646,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeTaggedSignedToInt32:
result = LowerChangeTaggedSignedToInt32(node);
break;
+ case IrOpcode::kChangeTaggedSignedToInt64:
+ result = LowerChangeTaggedSignedToInt64(node);
+ break;
case IrOpcode::kChangeTaggedToBit:
result = LowerChangeTaggedToBit(node);
break;
@@ -649,6 +658,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeTaggedToUint32:
result = LowerChangeTaggedToUint32(node);
break;
+ case IrOpcode::kChangeTaggedToInt64:
+ result = LowerChangeTaggedToInt64(node);
+ break;
case IrOpcode::kChangeTaggedToFloat64:
result = LowerChangeTaggedToFloat64(node);
break;
@@ -718,12 +730,24 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedInt32ToTaggedSigned:
result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
break;
+ case IrOpcode::kCheckedInt64ToInt32:
+ result = LowerCheckedInt64ToInt32(node, frame_state);
+ break;
+ case IrOpcode::kCheckedInt64ToTaggedSigned:
+ result = LowerCheckedInt64ToTaggedSigned(node, frame_state);
+ break;
case IrOpcode::kCheckedUint32ToInt32:
result = LowerCheckedUint32ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToTaggedSigned:
result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
break;
+ case IrOpcode::kCheckedUint64ToInt32:
+ result = LowerCheckedUint64ToInt32(node, frame_state);
+ break;
+ case IrOpcode::kCheckedUint64ToTaggedSigned:
+ result = LowerCheckedUint64ToTaggedSigned(node, frame_state);
+ break;
case IrOpcode::kCheckedFloat64ToInt32:
result = LowerCheckedFloat64ToInt32(node, frame_state);
break;
@@ -824,15 +848,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kNewConsString:
result = LowerNewConsString(node);
break;
- case IrOpcode::kArrayBufferWasNeutered:
- result = LowerArrayBufferWasNeutered(node);
- break;
case IrOpcode::kSameValue:
result = LowerSameValue(node);
break;
case IrOpcode::kDeadValue:
result = LowerDeadValue(node);
break;
+ case IrOpcode::kStringConcat:
+ result = LowerStringConcat(node);
+ break;
case IrOpcode::kStringFromSingleCharCode:
result = LowerStringFromSingleCharCode(node);
break;
@@ -1120,6 +1144,35 @@ Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerChangeInt64ToTagged(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_not_in_smi_range = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ Node* value32 = __ TruncateInt64ToInt32(value);
+ __ GotoIfNot(__ Word64Equal(__ ChangeInt32ToInt64(value32), value),
+ &if_not_in_smi_range);
+
+ if (SmiValuesAre32Bits()) {
+ Node* value_smi = ChangeInt64ToSmi(value);
+ __ Goto(&done, value_smi);
+ } else {
+ Node* add = __ Int32AddWithOverflow(value32, value32);
+ Node* ovf = __ Projection(1, add);
+ __ GotoIf(ovf, &if_not_in_smi_range);
+ Node* value_smi = ChangeInt32ToIntPtr(__ Projection(0, add));
+ __ Goto(&done, value_smi);
+ }
+
+ __ Bind(&if_not_in_smi_range);
+ Node* number = AllocateHeapNumberWithValue(__ ChangeInt64ToFloat64(value));
+ __ Goto(&done, number);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
Node* value = node->InputAt(0);
@@ -1139,11 +1192,36 @@ Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerChangeUint64ToTagged(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_not_in_smi_range = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ Node* check =
+ __ Uint64LessThanOrEqual(value, __ Int64Constant(Smi::kMaxValue));
+ __ GotoIfNot(check, &if_not_in_smi_range);
+ __ Goto(&done, ChangeInt64ToSmi(value));
+
+ __ Bind(&if_not_in_smi_range);
+ Node* number = AllocateHeapNumberWithValue(__ ChangeInt64ToFloat64(value));
+
+ __ Goto(&done, number);
+ __ Bind(&done);
+
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
Node* value = node->InputAt(0);
return ChangeSmiToInt32(value);
}
+Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
+ Node* value = node->InputAt(0);
+ return ChangeSmiToInt64(value);
+}
+
Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
Node* value = node->InputAt(0);
return __ WordEqual(value, __ TrueConstant());
@@ -1280,6 +1358,26 @@ Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerChangeTaggedToInt64(Node* node) {
+ Node* value = node->InputAt(0);
+
+ auto if_not_smi = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord64);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoIfNot(check, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt64(value));
+
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ ChangeFloat64ToInt64(vfalse);
+ __ Goto(&done, vfalse);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
return LowerTruncateTaggedToFloat64(node);
}
@@ -1353,7 +1451,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
size_t const map_count = maps.size();
if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
- auto done = __ MakeDeferredLabel();
+ auto done = __ MakeLabel();
auto migrate = __ MakeDeferredLabel();
// Load the current map of the {value}.
@@ -1364,10 +1462,11 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
- __ GotoIfNot(check, &migrate);
- __ Goto(&done);
+ __ Branch(check, &done, &migrate, IsSafetyCheck::kCriticalSafetyCheck);
} else {
- __ GotoIf(check, &done);
+ auto next_map = __ MakeLabel();
+ __ Branch(check, &done, &next_map, IsSafetyCheck::kCriticalSafetyCheck);
+ __ Bind(&next_map);
}
}
@@ -1382,7 +1481,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
__ Int32Constant(Map::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
__ DeoptimizeIf(DeoptimizeReason::kWrongMap, p.feedback(),
- if_not_deprecated, frame_state);
+ if_not_deprecated, frame_state,
+ IsSafetyCheck::kCriticalSafetyCheck);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
@@ -1393,7 +1493,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
__ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, p.feedback(),
- check, frame_state);
+ check, frame_state, IsSafetyCheck::kCriticalSafetyCheck);
}
// Reload the current map of the {value}.
@@ -1405,9 +1505,11 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state);
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
} else {
- __ GotoIf(check, &done);
+ auto next_map = __ MakeLabel();
+ __ Branch(check, &done, &next_map, IsSafetyCheck::kCriticalSafetyCheck);
+ __ Bind(&next_map);
}
}
@@ -1424,9 +1526,11 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state);
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
} else {
- __ GotoIf(check, &done);
+ auto next_map = __ MakeLabel();
+ __ Branch(check, &done, &next_map, IsSafetyCheck::kCriticalSafetyCheck);
+ __ Bind(&next_map);
}
}
__ Goto(&done);
@@ -1447,7 +1551,14 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
for (size_t i = 0; i < map_count; ++i) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
- __ GotoIf(check, &done, __ Int32Constant(1));
+ auto next_map = __ MakeLabel();
+ auto passed = __ MakeLabel();
+ __ Branch(check, &passed, &next_map, IsSafetyCheck::kCriticalSafetyCheck);
+
+ __ Bind(&passed);
+ __ Goto(&done, __ Int32Constant(1));
+
+ __ Bind(&next_map);
}
__ Goto(&done, __ Int32Constant(0));
@@ -1544,6 +1655,24 @@ void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
__ DeoptimizeIfNot(p.reason(), p.feedback(), value, frame_state);
}
+Node* EffectControlLinearizer::LowerStringConcat(Node* node) {
+ Node* lhs = node->InputAt(1);
+ Node* rhs = node->InputAt(2);
+
+ Callable const callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow);
+
+ Node* value =
+ __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
+
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
Node* frame_state) {
Node* lhs = node->InputAt(0);
@@ -1572,63 +1701,87 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
-
- auto if_not_positive = __ MakeDeferredLabel();
- auto if_is_minint = __ MakeDeferredLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32);
- auto minint_check_done = __ MakeLabel();
-
Node* zero = __ Int32Constant(0);
- // Check if {rhs} is positive (and not zero).
- Node* check0 = __ Int32LessThan(zero, rhs);
- __ GotoIfNot(check0, &if_not_positive);
+ // Check if the {rhs} is a known power of two.
+ Int32Matcher m(rhs);
+ if (m.IsPowerOf2()) {
+ // Since we know that {rhs} is a power of two, we can perform a fast
+ // check to see if the relevant least significant bits of the {lhs}
+ // are all zero, and if so we know that we can perform a division
+ // safely (and fast by doing an arithmetic - aka sign preserving -
+ // right shift on {lhs}).
+ int32_t divisor = m.Value();
+ Node* mask = __ Int32Constant(divisor - 1);
+ Node* shift = __ Int32Constant(WhichPowerOf2(divisor));
+ Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+ check, frame_state);
+ return __ Word32Sar(lhs, shift);
+ } else {
+ auto if_rhs_positive = __ MakeLabel();
+ auto if_rhs_negative = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord32);
- // Fast case, no additional checking required.
- __ Goto(&done, __ Int32Div(lhs, rhs));
+ // Check if {rhs} is positive (and not zero).
+ Node* check_rhs_positive = __ Int32LessThan(zero, rhs);
+ __ Branch(check_rhs_positive, &if_rhs_positive, &if_rhs_negative);
- {
- __ Bind(&if_not_positive);
+ __ Bind(&if_rhs_positive);
+ {
+ // Fast case, no additional checking required.
+ __ Goto(&done, __ Int32Div(lhs, rhs));
+ }
- // Check if {rhs} is zero.
- Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
- frame_state);
+ __ Bind(&if_rhs_negative);
+ {
+ auto if_lhs_minint = __ MakeDeferredLabel();
+ auto if_lhs_notminint = __ MakeLabel();
+
+ // Check if {rhs} is zero.
+ Node* check_rhs_zero = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(),
+ check_rhs_zero, frame_state);
+
+ // Check if {lhs} is zero, as that would produce minus zero.
+ Node* check_lhs_zero = __ Word32Equal(lhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(),
+ check_lhs_zero, frame_state);
+
+ // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
+ // to return -kMinInt, which is not representable as Word32.
+ Node* check_lhs_minint = graph()->NewNode(machine()->Word32Equal(), lhs,
+ __ Int32Constant(kMinInt));
+ __ Branch(check_lhs_minint, &if_lhs_minint, &if_lhs_notminint);
+
+ __ Bind(&if_lhs_minint);
+ {
+ // Check that {rhs} is not -1, otherwise result would be -kMinInt.
+ Node* check_rhs_minusone = __ Word32Equal(rhs, __ Int32Constant(-1));
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(),
+ check_rhs_minusone, frame_state);
- // Check if {lhs} is zero, as that would produce minus zero.
- check = __ Word32Equal(lhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
- frame_state);
+ // Perform the actual integer division.
+ __ Goto(&done, __ Int32Div(lhs, rhs));
+ }
- // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
- // to return -kMinInt, which is not representable.
- Node* minint = __ Int32Constant(std::numeric_limits<int32_t>::min());
- Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
- __ GotoIf(check1, &if_is_minint);
- __ Goto(&minint_check_done);
-
- __ Bind(&if_is_minint);
- // Check if {rhs} is -1.
- Node* minusone = __ Int32Constant(-1);
- Node* is_minus_one = __ Word32Equal(rhs, minusone);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), is_minus_one,
- frame_state);
- __ Goto(&minint_check_done);
+ __ Bind(&if_lhs_notminint);
+ {
+ // Perform the actual integer division.
+ __ Goto(&done, __ Int32Div(lhs, rhs));
+ }
+ }
- __ Bind(&minint_check_done);
- // Perform the actual integer division.
- __ Goto(&done, __ Int32Div(lhs, rhs));
- }
+ __ Bind(&done);
+ Node* value = done.PhiAt(0);
- __ Bind(&done);
- Node* value = done.PhiAt(0);
+ // Check if the remainder is non-zero.
+ Node* check = __ Word32Equal(lhs, __ Int32Mul(value, rhs));
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+ check, frame_state);
- // Check if the remainder is non-zero.
- Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
- frame_state);
-
- return value;
+ return value;
+ }
}
Node* EffectControlLinearizer::BuildUint32Mod(Node* lhs, Node* rhs) {
@@ -1722,8 +1875,11 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
__ Bind(&if_lhs_negative);
{
- // The {lhs} is a negative integer.
- Node* res = BuildUint32Mod(__ Int32Sub(zero, lhs), rhs);
+ // The {lhs} is a negative integer. This is very unlikely and
+ // we intentionally don't use the BuildUint32Mod() here, which
+ // would try to figure out whether {rhs} is a power of two,
+ // since this is intended to be a slow-path.
+ Node* res = __ Uint32Mod(__ Int32Sub(zero, lhs), rhs);
// Check if we would have to return -0.
__ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(),
@@ -1739,22 +1895,38 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
-
Node* zero = __ Int32Constant(0);
- // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
- frame_state);
+ // Check if the {rhs} is a known power of two.
+ Uint32Matcher m(rhs);
+ if (m.IsPowerOf2()) {
+ // Since we know that {rhs} is a power of two, we can perform a fast
+ // check to see if the relevant least significant bits of the {lhs}
+ // are all zero, and if so we know that we can perform a division
+ // safely (and fast by doing a logical - aka zero extending - right
+ // shift on {lhs}).
+ uint32_t divisor = m.Value();
+ Node* mask = __ Uint32Constant(divisor - 1);
+ Node* shift = __ Uint32Constant(WhichPowerOf2(divisor));
+ Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+ check, frame_state);
+ return __ Word32Shr(lhs, shift);
+ } else {
+ // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+ Node* check = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
- // Perform the actual unsigned integer division.
- Node* value = __ Uint32Div(lhs, rhs);
+ // Perform the actual unsigned integer division.
+ Node* value = __ Uint32Div(lhs, rhs);
- // Check if the remainder is non-zero.
- check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
- frame_state);
- return value;
+ // Check if the remainder is non-zero.
+ check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(),
+ check, frame_state);
+ return value;
+ }
}
Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
@@ -1815,13 +1987,48 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* add = __ Int32AddWithOverflow(value, value);
Node* check = __ Projection(1, add);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, params.feedback(), check,
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check,
frame_state);
Node* result = __ Projection(0, add);
result = ChangeInt32ToIntPtr(result);
return result;
}
+Node* EffectControlLinearizer::LowerCheckedInt64ToInt32(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* value32 = __ TruncateInt64ToInt32(value);
+ Node* check = __ Word64Equal(__ ChangeInt32ToInt64(value32), value);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+ return value32;
+}
+
+Node* EffectControlLinearizer::LowerCheckedInt64ToTaggedSigned(
+ Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* value32 = __ TruncateInt64ToInt32(value);
+ Node* check = __ Word64Equal(__ ChangeInt32ToInt64(value32), value);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+
+ if (SmiValuesAre32Bits()) {
+ return ChangeInt64ToSmi(value);
+ } else {
+ Node* add = __ Int32AddWithOverflow(value32, value32);
+ Node* check = __ Projection(1, add);
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+ Node* result = __ Projection(0, add);
+ result = ChangeInt32ToIntPtr(result);
+ return result;
+ }
+}
+
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -1842,6 +2049,29 @@ Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
return ChangeUint32ToSmi(value);
}
+Node* EffectControlLinearizer::LowerCheckedUint64ToInt32(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* check = __ Uint64LessThanOrEqual(value, __ Int64Constant(kMaxInt));
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+ return __ TruncateInt64ToInt32(value);
+}
+
+Node* EffectControlLinearizer::LowerCheckedUint64ToTaggedSigned(
+ Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* check =
+ __ Uint64LessThanOrEqual(value, __ Int64Constant(Smi::kMaxValue));
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+ return ChangeInt64ToSmi(value);
+}
+
Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value,
Node* frame_state) {
@@ -2065,7 +2295,8 @@ Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), argument,
__ NoContextConstant());
}
@@ -2521,7 +2752,8 @@ Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2533,7 +2765,8 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2721,7 +2954,8 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), frame,
length, __ SmiConstant(mapped_count), __ NoContextConstant());
}
@@ -2765,26 +2999,13 @@ Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(ConsString::kSize));
__ StoreField(AccessBuilder::ForMap(), result, result_map);
__ StoreField(AccessBuilder::ForNameHashField(), result,
- jsgraph()->Int32Constant(Name::kEmptyHashField));
+ __ Int32Constant(Name::kEmptyHashField));
__ StoreField(AccessBuilder::ForStringLength(), result, length);
__ StoreField(AccessBuilder::ForConsStringFirst(), result, first);
__ StoreField(AccessBuilder::ForConsStringSecond(), result, second);
return result;
}
-Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
- Node* value = node->InputAt(0);
-
- Node* value_bit_field =
- __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), value);
- return __ Word32Equal(
- __ Word32Equal(
- __ Word32And(value_bit_field,
- __ Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
- __ Int32Constant(0)),
- __ Int32Constant(0));
-}
-
Node* EffectControlLinearizer::LowerSameValue(Node* node) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
@@ -2794,7 +3015,8 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
@@ -2816,7 +3038,8 @@ Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
__ NoContextConstant());
}
@@ -2828,9 +3051,9 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
// We need a loop here to properly deal with indirect strings
// (SlicedString, ConsString and ThinString).
auto loop = __ MakeLoopLabel(MachineRepresentation::kTagged,
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
auto loop_next = __ MakeLabel(MachineRepresentation::kTagged,
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
auto loop_done = __ MakeLabel(MachineRepresentation::kWord32);
__ Goto(&loop, receiver, position);
__ Bind(&loop);
@@ -2897,11 +3120,11 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ Bind(&if_externalstring);
{
- // We need to bailout to the runtime for short external strings.
+ // We need to bailout to the runtime for uncached external strings.
__ GotoIf(__ Word32Equal(
__ Word32And(receiver_instance_type,
- __ Int32Constant(kShortExternalStringMask)),
- __ Int32Constant(kShortExternalStringTag)),
+ __ Int32Constant(kUncachedExternalStringMask)),
+ __ Int32Constant(kUncachedExternalStringTag)),
&if_runtime);
Node* receiver_data = __ LoadField(
@@ -2917,16 +3140,14 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ Bind(&if_onebyte);
{
- Node* result = __ Load(MachineType::Uint8(), receiver_data,
- ChangeInt32ToIntPtr(position));
+ Node* result = __ Load(MachineType::Uint8(), receiver_data, position);
__ Goto(&loop_done, result);
}
__ Bind(&if_twobyte);
{
- Node* result = __ Load(
- MachineType::Uint16(), receiver_data,
- __ Word32Shl(ChangeInt32ToIntPtr(position), __ Int32Constant(1)));
+ Node* result = __ Load(MachineType::Uint16(), receiver_data,
+ __ WordShl(position, __ IntPtrConstant(1)));
__ Goto(&loop_done, result);
}
}
@@ -2938,7 +3159,7 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
Node* receiver_parent =
__ LoadField(AccessBuilder::ForSlicedStringParent(), receiver);
__ Goto(&loop_next, receiver_parent,
- __ Int32Add(position, ChangeSmiToInt32(receiver_offset)));
+ __ IntAdd(position, ChangeSmiToIntPtr(receiver_offset)));
}
__ Bind(&if_runtime);
@@ -2948,7 +3169,7 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
Node* result = __ Call(call_descriptor, __ CEntryStubConstant(1),
- receiver, ChangeInt32ToSmi(position),
+ receiver, ChangeIntPtrToSmi(position),
__ ExternalConstant(ExternalReference::Create(id)),
__ Int32Constant(2), __ NoContextConstant());
__ Goto(&loop_done, ChangeSmiToInt32(result));
@@ -2974,7 +3195,8 @@ Node* EffectControlLinearizer::LowerStringCodePointAt(
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
position, __ NoContextConstant());
}
@@ -3035,9 +3257,9 @@ Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
__ StoreField(AccessBuilder::ForMap(), vtrue2,
__ HeapConstant(factory()->one_byte_string_map()));
__ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
- __ IntPtrConstant(Name::kEmptyHashField));
+ __ Int32Constant(Name::kEmptyHashField));
__ StoreField(AccessBuilder::ForStringLength(), vtrue2,
- __ SmiConstant(1));
+ __ Int32Constant(1));
__ Store(
StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
vtrue2,
@@ -3059,8 +3281,9 @@ Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
__ StoreField(AccessBuilder::ForMap(), vfalse1,
__ HeapConstant(factory()->string_map()));
__ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
- __ IntPtrConstant(Name::kEmptyHashField));
- __ StoreField(AccessBuilder::ForStringLength(), vfalse1, __ SmiConstant(1));
+ __ Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
+ __ Int32Constant(1));
__ Store(
StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
vfalse1,
@@ -3083,7 +3306,8 @@ Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
__ NoContextConstant());
}
@@ -3157,9 +3381,9 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
__ StoreField(AccessBuilder::ForMap(), vtrue2,
__ HeapConstant(factory()->one_byte_string_map()));
__ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
- __ IntPtrConstant(Name::kEmptyHashField));
+ __ Int32Constant(Name::kEmptyHashField));
__ StoreField(AccessBuilder::ForStringLength(), vtrue2,
- __ SmiConstant(1));
+ __ Int32Constant(1));
__ Store(
StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
vtrue2,
@@ -3183,7 +3407,7 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
__ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
__ IntPtrConstant(Name::kEmptyHashField));
__ StoreField(AccessBuilder::ForStringLength(), vfalse1,
- __ SmiConstant(1));
+ __ Int32Constant(1));
__ Store(
StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
vfalse1,
@@ -3228,8 +3452,9 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
__ StoreField(AccessBuilder::ForMap(), vfalse0,
__ HeapConstant(factory()->string_map()));
__ StoreField(AccessBuilder::ForNameHashField(), vfalse0,
- __ IntPtrConstant(Name::kEmptyHashField));
- __ StoreField(AccessBuilder::ForStringLength(), vfalse0, __ SmiConstant(2));
+ __ Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vfalse0,
+ __ Int32Constant(2));
__ Store(
StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
vfalse0,
@@ -3252,7 +3477,8 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), subject,
search_string, position, __ NoContextConstant());
}
@@ -3271,7 +3497,8 @@ Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
@@ -3286,7 +3513,8 @@ Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
start, end, __ NoContextConstant());
}
@@ -3314,10 +3542,25 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
CheckFloat64HoleParameters const& params =
CheckFloat64HoleParametersOf(node->op());
Node* value = node->InputAt(0);
- Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
- __ Int32Constant(kHoleNanUpper32));
- __ DeoptimizeIf(DeoptimizeReason::kHole, params.feedback(), check,
- frame_state);
+
+ auto if_nan = __ MakeDeferredLabel();
+ auto done = __ MakeLabel();
+
+ // First check whether {value} is a NaN at all...
+ __ Branch(__ Float64Equal(value, value), &done, &if_nan);
+
+ __ Bind(&if_nan);
+ {
+ // ...and only if {value} is a NaN, perform the expensive bit
+ // check. See http://crbug.com/v8/8264 for details.
+ Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(kHoleNanUpper32));
+ __ DeoptimizeIf(DeoptimizeReason::kHole, params.feedback(), check,
+ frame_state);
+ __ Goto(&done);
+ }
+
+ __ Bind(&done);
return value;
}
@@ -3440,8 +3683,8 @@ Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
return result;
}
-Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
- return __ WordShl(ChangeInt32ToIntPtr(value), SmiShiftBitsConstant());
+Node* EffectControlLinearizer::ChangeIntPtrToSmi(Node* value) {
+ return __ WordShl(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeInt32ToIntPtr(Node* value) {
@@ -3458,6 +3701,15 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
return value;
}
+Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
+ return ChangeIntPtrToSmi(ChangeInt32ToIntPtr(value));
+}
+
+Node* EffectControlLinearizer::ChangeInt64ToSmi(Node* value) {
+ DCHECK(machine()->Is64());
+ return ChangeIntPtrToSmi(value);
+}
+
Node* EffectControlLinearizer::ChangeUint32ToUintPtr(Node* value) {
if (machine()->Is64()) {
value = __ ChangeUint32ToUint64(value);
@@ -3482,6 +3734,11 @@ Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
return value;
}
+Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
+ CHECK(machine()->Is64());
+ return ChangeSmiToIntPtr(value);
+}
+
Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
__ IntPtrConstant(kSmiTag));
@@ -3578,7 +3835,8 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kCopyFastSmiOrObjectElements);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
object, __ NoContextConstant());
__ Goto(&done, result);
@@ -3614,7 +3872,8 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Builtins::kGrowFastSmiOrObjectElements);
CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, call_flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), call_flags, properties);
Node* new_elements =
__ Call(call_descriptor, __ HeapConstant(callable.code()), object,
ChangeInt32ToSmi(index), __ NoContextConstant());
@@ -3830,12 +4089,6 @@ Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
Node* index = node->InputAt(2);
Node* is_little_endian = node->InputAt(3);
- // On 64-bit platforms, we need to feed a Word64 index to the Load and
- // Store operators.
- if (machine()->Is64()) {
- index = __ ChangeUint32ToUint64(index);
- }
-
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
@@ -3878,12 +4131,6 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
Node* value = node->InputAt(3);
Node* is_little_endian = node->InputAt(4);
- // On 64-bit platforms, we need to feed a Word64 index to the Load and
- // Store operators.
- if (machine()->Is64()) {
- index = __ ChangeUint32ToUint64(index);
- }
-
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
@@ -4366,7 +4613,8 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
@@ -4401,7 +4649,8 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
@@ -4762,7 +5011,8 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, flags, properties);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), table,
key, __ NoContextConstant());
}
@@ -4799,14 +5049,14 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
kHeapObjectTag))));
auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
- auto done = __ MakeLabel(MachineRepresentation::kWord32);
+ auto done = __ MakeLabel(MachineType::PointerRepresentation());
__ Goto(&loop, first_entry);
__ Bind(&loop);
{
Node* entry = loop.PhiAt(0);
Node* check =
__ WordEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound));
- __ GotoIf(check, &done, __ Int32Constant(-1));
+ __ GotoIf(check, &done, entry);
entry = __ IntAdd(
__ IntMul(entry, __ IntPtrConstant(OrderedHashMap::kEntrySize)),
number_of_buckets);
@@ -4835,10 +5085,7 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
&if_match, &if_notmatch);
__ Bind(&if_match);
- {
- Node* index = ChangeIntPtrToInt32(entry);
- __ Goto(&done, index);
- }
+ __ Goto(&done, entry);
__ Bind(&if_notmatch);
{
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 272bc44599..fcc4cad728 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -49,13 +49,17 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerChangeBitToTagged(Node* node);
Node* LowerChangeInt31ToTaggedSigned(Node* node);
Node* LowerChangeInt32ToTagged(Node* node);
+ Node* LowerChangeInt64ToTagged(Node* node);
Node* LowerChangeUint32ToTagged(Node* node);
+ Node* LowerChangeUint64ToTagged(Node* node);
Node* LowerChangeFloat64ToTagged(Node* node);
Node* LowerChangeFloat64ToTaggedPointer(Node* node);
Node* LowerChangeTaggedSignedToInt32(Node* node);
+ Node* LowerChangeTaggedSignedToInt64(Node* node);
Node* LowerChangeTaggedToBit(Node* node);
Node* LowerChangeTaggedToInt32(Node* node);
Node* LowerChangeTaggedToUint32(Node* node);
+ Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerCheckBounds(Node* node, Node* frame_state);
Node* LowerPoisonIndex(Node* node);
@@ -75,8 +79,12 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedUint64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedUint64ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
@@ -120,9 +128,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerNewSmiOrObjectElements(Node* node);
Node* LowerNewArgumentsElements(Node* node);
Node* LowerNewConsString(Node* node);
- Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerSameValue(Node* node);
Node* LowerDeadValue(Node* node);
+ Node* LowerStringConcat(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
@@ -188,11 +196,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
+ Node* ChangeInt64ToSmi(Node* value);
Node* ChangeIntPtrToInt32(Node* value);
+ Node* ChangeIntPtrToSmi(Node* value);
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
+ Node* ChangeSmiToInt64(Node* value);
Node* ObjectIsSmi(Node* value);
Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 0e6822a9ca..9b1ef8d907 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -9,6 +9,8 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/handles-inl.h"
+#include "src/objects/map-inl.h"
#ifdef DEBUG
#define TRACE(...) \
@@ -282,7 +284,7 @@ EffectGraphReducer::EffectGraphReducer(
state_(graph, kNumStates),
revisit_(zone),
stack_(zone),
- reduce_(reduce) {}
+ reduce_(std::move(reduce)) {}
void EffectGraphReducer::ReduceFrom(Node* node) {
// Perform DFS and eagerly trigger revisitation as soon as possible.
@@ -498,6 +500,14 @@ int OffsetOfFieldAccess(const Operator* op) {
return access.offset;
}
+int OffsetOfElementAt(ElementAccess const& access, int index) {
+ DCHECK_GE(index, 0);
+ DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ return access.header_size +
+ (index << ElementSizeLog2Of(access.machine_type.representation()));
+}
+
Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
DCHECK(op->opcode() == IrOpcode::kLoadElement ||
op->opcode() == IrOpcode::kStoreElement);
@@ -507,11 +517,7 @@ Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
double min = index_type.Min();
int index = static_cast<int>(min);
if (!(index == min && index == max)) return Nothing<int>();
- ElementAccess access = ElementAccessOf(op);
- DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
- return Just(access.header_size + (index << ElementSizeLog2Of(
- access.machine_type.representation())));
+ return Just(OffsetOfElementAt(ElementAccessOf(op), index));
}
Node* LowerCompareMapsWithoutLoad(Node* checked_map,
@@ -616,9 +622,62 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
OffsetOfElementsAccess(op, index).To(&offset) &&
vobject->FieldAt(offset).To(&var) && current->Get(var).To(&value)) {
current->SetReplacement(value);
- } else {
- current->SetEscaped(object);
+ } else if (vobject && !vobject->HasEscaped()) {
+ // Compute the known length (aka the number of elements) of {object}
+ // based on the virtual object information.
+ ElementAccess const& access = ElementAccessOf(op);
+ int const length =
+ (vobject->size() - access.header_size) >>
+ ElementSizeLog2Of(access.machine_type.representation());
+ Variable var0, var1;
+ Node* value0;
+ Node* value1;
+ if (length == 1 &&
+ vobject->FieldAt(OffsetOfElementAt(access, 0)).To(&var) &&
+ current->Get(var).To(&value) &&
+ (value == nullptr ||
+ NodeProperties::GetType(value).Is(access.type))) {
+ // The {object} has no elements, and we know that the LoadElement
+ // {index} must be within bounds, thus it must always yield this
+ // one element of {object}.
+ current->SetReplacement(value);
+ break;
+ } else if (length == 2 &&
+ vobject->FieldAt(OffsetOfElementAt(access, 0)).To(&var0) &&
+ current->Get(var0).To(&value0) &&
+ (value0 == nullptr ||
+ NodeProperties::GetType(value0).Is(access.type)) &&
+ vobject->FieldAt(OffsetOfElementAt(access, 1)).To(&var1) &&
+ current->Get(var1).To(&value1) &&
+ (value1 == nullptr ||
+ NodeProperties::GetType(value1).Is(access.type))) {
+ if (value0 && value1) {
+ // The {object} has exactly two elements, so the LoadElement
+ // must return one of them (i.e. either the element at index
+ // 0 or the one at index 1). So we can turn the LoadElement
+ // into a Select operation instead (still allowing the {object}
+ // to be scalar replaced). We must however mark the elements
+ // of the {object} itself as escaping.
+ Node* check =
+ jsgraph->graph()->NewNode(jsgraph->simplified()->NumberEqual(),
+ index, jsgraph->ZeroConstant());
+ NodeProperties::SetType(check, Type::Boolean());
+ Node* select = jsgraph->graph()->NewNode(
+ jsgraph->common()->Select(access.machine_type.representation()),
+ check, value0, value1);
+ NodeProperties::SetType(select, access.type);
+ current->SetReplacement(select);
+ current->SetEscaped(value0);
+ current->SetEscaped(value1);
+ break;
+ } else {
+ // If the variables have no values, we have
+ // not reached the fixed-point yet.
+ break;
+ }
+ }
}
+ current->SetEscaped(object);
break;
}
case IrOpcode::kTypeGuard: {
@@ -669,9 +728,10 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
current->Get(map_field).To(&map)) {
if (map) {
Type const map_type = NodeProperties::GetType(map);
+ AllowHandleDereference handle_dereference;
if (map_type.IsHeapConstant() &&
params.maps().contains(
- bit_cast<Handle<Map>>(map_type.AsHeapConstant()->Value()))) {
+ Handle<Map>::cast(map_type.AsHeapConstant()->Value()))) {
current->MarkForDeletion();
break;
}
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 71aae6b2a4..c3d4e5978d 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -121,13 +121,7 @@ class VirtualObject : public Dependable {
typedef ZoneVector<Variable>::const_iterator const_iterator;
VirtualObject(VariableTracker* var_states, Id id, int size);
Maybe<Variable> FieldAt(int offset) const {
- if (offset % kPointerSize != 0) {
- // We do not support fields that are not word-aligned. Bail out by
- // treating the object as escaping. This can only happen for
- // {Name::kHashFieldOffset} on 64bit big endian architectures.
- DCHECK_EQ(Name::kHashFieldOffset, offset);
- return Nothing<Variable>();
- }
+ CHECK_EQ(0, offset % kPointerSize);
CHECK(!HasEscaped());
if (offset >= size()) {
// TODO(tebbi): Reading out-of-bounds can only happen in unreachable
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 15ca8367b0..bd210d714d 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -145,6 +145,12 @@ Node* CreateStubBuiltinContinuationFrameState(
// by the deoptimizer and aren't explicitly passed in the frame state.
int stack_parameter_count = descriptor.GetRegisterParameterCount() -
DeoptimizerParameterCountFor(mode);
+ // Reserving space in the vector, except for the case where
+ // stack_parameter_count is -1.
+ actual_parameters.reserve(stack_parameter_count >= 0
+ ? stack_parameter_count +
+ descriptor.GetRegisterParameterCount()
+ : 0);
for (int i = 0; i < stack_parameter_count; ++i) {
actual_parameters.push_back(
parameters[descriptor.GetRegisterParameterCount() + i]);
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 4542a73685..c102c62ad4 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -13,11 +13,6 @@ namespace compiler {
namespace {
-#define REP_BIT(rep) (1 << static_cast<int>(rep))
-
-const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
-const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);
-
// Splits a FP move between two location operands into the equivalent series of
// moves between smaller sub-operands, e.g. a double move to two single moves.
// This helps reduce the number of cycles that would normally occur under FP
@@ -91,8 +86,8 @@ void GapResolver::Resolve(ParallelMove* moves) {
}
i++;
if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
- reps |=
- REP_BIT(LocationOperand::cast(move->destination()).representation());
+ reps |= RepresentationBit(
+ LocationOperand::cast(move->destination()).representation());
}
}
@@ -100,7 +95,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
if (reps && !base::bits::IsPowerOfTwo(reps)) {
// Start with the smallest FP moves, so we never encounter smaller moves
// in the middle of a cycle of larger moves.
- if ((reps & kFloat32Bit) != 0) {
+ if ((reps & RepresentationBit(MachineRepresentation::kFloat32)) != 0) {
split_rep_ = MachineRepresentation::kFloat32;
for (size_t i = 0; i < moves->size(); ++i) {
auto move = (*moves)[i];
@@ -108,7 +103,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
PerformMove(moves, move);
}
}
- if ((reps & kFloat64Bit) != 0) {
+ if ((reps & RepresentationBit(MachineRepresentation::kFloat64)) != 0) {
split_rep_ = MachineRepresentation::kFloat64;
for (size_t i = 0; i < moves->size(); ++i) {
auto move = (*moves)[i];
diff --git a/deps/v8/src/compiler/gap-resolver.h b/deps/v8/src/compiler/gap-resolver.h
index d4c402587f..9a4fe4e6d6 100644
--- a/deps/v8/src/compiler/gap-resolver.h
+++ b/deps/v8/src/compiler/gap-resolver.h
@@ -16,7 +16,7 @@ class GapResolver final {
// Interface used by the gap resolver to emit moves and swaps.
class Assembler {
public:
- virtual ~Assembler() {}
+ virtual ~Assembler() = default;
// Assemble move.
virtual void AssembleMove(InstructionOperand* source,
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 496f322106..de02f941be 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -26,8 +26,14 @@ Node* GraphAssembler::Int32Constant(int32_t value) {
return jsgraph()->Int32Constant(value);
}
-Node* GraphAssembler::UniqueInt32Constant(int32_t value) {
- return graph()->NewNode(common()->Int32Constant(value));
+Node* GraphAssembler::Int64Constant(int64_t value) {
+ return jsgraph()->Int64Constant(value);
+}
+
+Node* GraphAssembler::UniqueIntPtrConstant(intptr_t value) {
+ return graph()->NewNode(
+ machine()->Is64() ? common()->Int64Constant(value)
+ : common()->Int32Constant(static_cast<int32_t>(value)));
}
Node* GraphAssembler::SmiConstant(int32_t value) {
@@ -208,9 +214,11 @@ Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
VectorSlotPair const& feedback,
- Node* condition, Node* frame_state) {
+ Node* condition, Node* frame_state,
+ IsSafetyCheck is_safety_check) {
return current_control_ = current_effect_ = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback,
+ is_safety_check),
condition, frame_state, current_effect_, current_control_);
}
@@ -225,7 +233,8 @@ Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
}
void GraphAssembler::Branch(Node* condition, GraphAssemblerLabel<0u>* if_true,
- GraphAssemblerLabel<0u>* if_false) {
+ GraphAssemblerLabel<0u>* if_false,
+ IsSafetyCheck is_safety_check) {
DCHECK_NOT_NULL(current_control_);
BranchHint hint = BranchHint::kNone;
@@ -233,8 +242,8 @@ void GraphAssembler::Branch(Node* condition, GraphAssemblerLabel<0u>* if_true,
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- Node* branch =
- graph()->NewNode(common()->Branch(hint), condition, current_control_);
+ Node* branch = graph()->NewNode(common()->Branch(hint, is_safety_check),
+ condition, current_control_);
current_control_ = graph()->NewNode(common()->IfTrue(), branch);
MergeState(if_true);
@@ -269,9 +278,10 @@ Operator const* GraphAssembler::ToNumberOperator() {
Callable callable =
Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
- 0, flags, Operator::kEliminatable);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags,
+ Operator::kEliminatable);
to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 79eb493608..fa527a8bb0 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -21,9 +21,11 @@ namespace compiler {
#define PURE_ASSEMBLER_MACH_UNOP_LIST(V) \
V(ChangeInt32ToInt64) \
V(ChangeInt32ToFloat64) \
+ V(ChangeInt64ToFloat64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
@@ -57,8 +59,10 @@ namespace compiler {
V(Int32Sub) \
V(Int32Mul) \
V(Int32LessThanOrEqual) \
- V(Uint32LessThanOrEqual) \
V(Uint32LessThan) \
+ V(Uint32LessThanOrEqual) \
+ V(Uint64LessThan) \
+ V(Uint64LessThanOrEqual) \
V(Int32LessThan) \
V(Float64Add) \
V(Float64Sub) \
@@ -70,6 +74,7 @@ namespace compiler {
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(Word32Equal) \
+ V(Word64Equal) \
V(WordEqual)
#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
@@ -176,7 +181,8 @@ class GraphAssembler {
Node* IntPtrConstant(intptr_t value);
Node* Uint32Constant(int32_t value);
Node* Int32Constant(int32_t value);
- Node* UniqueInt32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* UniqueIntPtrConstant(intptr_t value);
Node* SmiConstant(int32_t value);
Node* Float64Constant(double value);
Node* Projection(int index, Node* value);
@@ -228,8 +234,10 @@ class GraphAssembler {
Node* Word32PoisonOnSpeculation(Node* value);
- Node* DeoptimizeIf(DeoptimizeReason reason, VectorSlotPair const& feedback,
- Node* condition, Node* frame_state);
+ Node* DeoptimizeIf(
+ DeoptimizeReason reason, VectorSlotPair const& feedback, Node* condition,
+ Node* frame_state,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
Node* DeoptimizeIfNot(
DeoptimizeReason reason, VectorSlotPair const& feedback, Node* condition,
Node* frame_state,
@@ -247,7 +255,8 @@ class GraphAssembler {
void Goto(GraphAssemblerLabel<sizeof...(Vars)>* label, Vars...);
void Branch(Node* condition, GraphAssemblerLabel<0u>* if_true,
- GraphAssemblerLabel<0u>* if_false);
+ GraphAssemblerLabel<0u>* if_false,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kNoSafetyCheck);
// Control helpers.
// {GotoIf(c, l)} is equivalent to {Branch(c, l, templ);Bind(templ)}.
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index dc7b23521f..fafa322d87 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -37,7 +37,7 @@ GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
}
}
-GraphReducer::~GraphReducer() {}
+GraphReducer::~GraphReducer() = default;
void GraphReducer::AddReducer(Reducer* reducer) {
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index adb97ddf4d..b9fd455b9b 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -46,7 +46,7 @@ class Reduction final {
// phase.
class V8_EXPORT_PRIVATE Reducer {
public:
- virtual ~Reducer() {}
+ virtual ~Reducer() = default;
// Only used for tracing, when using the --trace_turbo_reduction flag.
virtual const char* reducer_name() const = 0;
@@ -73,7 +73,7 @@ class AdvancedReducer : public Reducer {
// Observe the actions of this reducer.
class Editor {
public:
- virtual ~Editor() {}
+ virtual ~Editor() = default;
// Replace {node} with {replacement}.
virtual void Replace(Node* node, Node* replacement) = 0;
@@ -130,7 +130,7 @@ class V8_EXPORT_PRIVATE GraphReducer
: public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
- ~GraphReducer();
+ ~GraphReducer() override;
Graph* graph() const { return graph_; }
diff --git a/deps/v8/src/compiler/graph-trimmer.cc b/deps/v8/src/compiler/graph-trimmer.cc
index c3de2cd809..e1dbfffff5 100644
--- a/deps/v8/src/compiler/graph-trimmer.cc
+++ b/deps/v8/src/compiler/graph-trimmer.cc
@@ -16,7 +16,7 @@ GraphTrimmer::GraphTrimmer(Zone* zone, Graph* graph)
}
-GraphTrimmer::~GraphTrimmer() {}
+GraphTrimmer::~GraphTrimmer() = default;
void GraphTrimmer::TrimGraph() {
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index feb0a8e9d3..cbb7188993 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -46,6 +46,12 @@ TurboJsonFile::TurboJsonFile(OptimizedCompilationInfo* info,
TurboJsonFile::~TurboJsonFile() { flush(); }
+TurboCfgFile::TurboCfgFile(Isolate* isolate)
+ : std::ofstream(Isolate::GetTurboCfgFileName(isolate).c_str(),
+ std::ios_base::app) {}
+
+TurboCfgFile::~TurboCfgFile() { flush(); }
+
std::ostream& operator<<(std::ostream& out,
const SourcePositionAsJSON& asJSON) {
asJSON.sp.PrintJson(out);
@@ -302,9 +308,11 @@ class JSONGraphNodeWriter {
if (opcode == IrOpcode::kBranch) {
os_ << ",\"rankInputs\":[0]";
}
- SourcePosition position = positions_->GetSourcePosition(node);
- if (position.IsKnown()) {
- os_ << ", \"sourcePosition\" : " << AsJSON(position);
+ if (positions_ != nullptr) {
+ SourcePosition position = positions_->GetSourcePosition(node);
+ if (position.IsKnown()) {
+ os_ << ", \"sourcePosition\" : " << AsJSON(position);
+ }
}
if (origins_) {
NodeOrigin origin = origins_->GetNodeOrigin(node);
@@ -432,7 +440,7 @@ class GraphC1Visualizer {
void PrintLiveRange(const LiveRange* range, const char* type, int vreg);
void PrintLiveRangeChain(const TopLevelLiveRange* range, const char* type);
- class Tag final BASE_EMBEDDED {
+ class Tag final {
public:
Tag(GraphC1Visualizer* visualizer, const char* name) {
name_ = name;
@@ -766,7 +774,12 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
}
}
- os_ << " " << vreg;
+ // The toplevel range is always suffixed with :0. Use that as parent.
+ os_ << " " << vreg << ":0";
+
+ // TODO(herhut) Find something useful to print for the hint field
+ os_ << " unknown";
+
for (const UseInterval* interval = range->first_interval();
interval != nullptr; interval = interval->next()) {
os_ << " [" << interval->start().value() << ", "
@@ -950,6 +963,290 @@ std::ostream& operator<<(std::ostream& os, const AsScheduledGraph& scheduled) {
return os;
}
+std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
+ const RegisterConfiguration* conf = o.register_configuration_;
+ const InstructionOperand* op = o.op_;
+ const InstructionSequence* code = o.code_;
+ os << "{";
+ switch (op->kind()) {
+ case InstructionOperand::UNALLOCATED: {
+ const UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
+ os << "\"type\": \"unallocated\", ";
+ os << "\"text\": \"v" << unalloc->virtual_register() << "\"";
+ if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+ os << ",\"tooltip\": \"FIXED_SLOT: " << unalloc->fixed_slot_index()
+ << "\"";
+ break;
+ }
+ switch (unalloc->extended_policy()) {
+ case UnallocatedOperand::NONE:
+ break;
+ case UnallocatedOperand::FIXED_REGISTER: {
+ os << ",\"tooltip\": \"FIXED_REGISTER: "
+ << conf->GetGeneralRegisterName(unalloc->fixed_register_index())
+ << "\"";
+ break;
+ }
+ case UnallocatedOperand::FIXED_FP_REGISTER: {
+ os << ",\"tooltip\": \"FIXED_FP_REGISTER: "
+ << conf->GetDoubleRegisterName(unalloc->fixed_register_index())
+ << "\"";
+ break;
+ }
+ case UnallocatedOperand::MUST_HAVE_REGISTER: {
+ os << ",\"tooltip\": \"MUST_HAVE_REGISTER\"";
+ break;
+ }
+ case UnallocatedOperand::MUST_HAVE_SLOT: {
+ os << ",\"tooltip\": \"MUST_HAVE_SLOT\"";
+ break;
+ }
+ case UnallocatedOperand::SAME_AS_FIRST_INPUT: {
+ os << ",\"tooltip\": \"SAME_AS_FIRST_INPUT\"";
+ break;
+ }
+ case UnallocatedOperand::REGISTER_OR_SLOT: {
+ os << ",\"tooltip\": \"REGISTER_OR_SLOT\"";
+ break;
+ }
+ case UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT: {
+ os << ",\"tooltip\": \"REGISTER_OR_SLOT_OR_CONSTANT\"";
+ break;
+ }
+ }
+ break;
+ }
+ case InstructionOperand::CONSTANT: {
+ int vreg = ConstantOperand::cast(op)->virtual_register();
+ os << "\"type\": \"constant\", ";
+ os << "\"text\": \"v" << vreg << "\",";
+ os << "\"tooltip\": \"";
+ std::stringstream tooltip;
+ tooltip << code->GetConstant(vreg);
+ for (const auto& c : tooltip.str()) {
+ os << AsEscapedUC16ForJSON(c);
+ }
+ os << "\"";
+ break;
+ }
+ case InstructionOperand::IMMEDIATE: {
+ os << "\"type\": \"immediate\", ";
+ const ImmediateOperand* imm = ImmediateOperand::cast(op);
+ switch (imm->type()) {
+ case ImmediateOperand::INLINE: {
+ os << "\"text\": \"#" << imm->inline_value() << "\"";
+ break;
+ }
+ case ImmediateOperand::INDEXED: {
+ int index = imm->indexed_value();
+ os << "\"text\": \"imm:" << index << "\",";
+ os << "\"tooltip\": \"";
+ std::stringstream tooltip;
+ tooltip << code->GetImmediate(imm);
+ for (const auto& c : tooltip.str()) {
+ os << AsEscapedUC16ForJSON(c);
+ }
+ os << "\"";
+ break;
+ }
+ }
+ break;
+ }
+ case InstructionOperand::EXPLICIT:
+ case InstructionOperand::ALLOCATED: {
+ const LocationOperand* allocated = LocationOperand::cast(op);
+ os << "\"type\": ";
+ if (allocated->IsExplicit()) {
+ os << "\"explicit\", ";
+ } else {
+ os << "\"allocated\", ";
+ }
+ os << "\"text\": \"";
+ if (op->IsStackSlot()) {
+ os << "stack:" << allocated->index();
+ } else if (op->IsFPStackSlot()) {
+ os << "fp_stack:" << allocated->index();
+ } else if (op->IsRegister()) {
+ os << conf->GetGeneralOrSpecialRegisterName(allocated->register_code());
+ } else if (op->IsDoubleRegister()) {
+ os << conf->GetDoubleRegisterName(allocated->register_code());
+ } else if (op->IsFloatRegister()) {
+ os << conf->GetFloatRegisterName(allocated->register_code());
+ } else {
+ DCHECK(op->IsSimd128Register());
+ os << conf->GetSimd128RegisterName(allocated->register_code());
+ }
+ os << "\",";
+ os << "\"tooltip\": \""
+ << MachineReprToString(allocated->representation()) << "\"";
+ break;
+ }
+ case InstructionOperand::INVALID:
+ UNREACHABLE();
+ }
+ os << "}";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i) {
+ const Instruction* instr = i.instr_;
+ InstructionOperandAsJSON json_op = {i.register_configuration_, nullptr,
+ i.code_};
+
+ os << "{";
+ os << "\"id\": " << i.index_ << ",";
+ os << "\"opcode\": \"" << ArchOpcodeField::decode(instr->opcode()) << "\",";
+ os << "\"flags\": \"";
+ FlagsMode fm = FlagsModeField::decode(instr->opcode());
+ AddressingMode am = AddressingModeField::decode(instr->opcode());
+ if (am != kMode_None) {
+ os << " : " << AddressingModeField::decode(instr->opcode());
+ }
+ if (fm != kFlags_none) {
+ os << " && " << fm << " if "
+ << FlagsConditionField::decode(instr->opcode());
+ }
+ os << "\",";
+
+ os << "\"gaps\": [";
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; i++) {
+ if (i != Instruction::FIRST_GAP_POSITION) os << ",";
+ os << "[";
+ const ParallelMove* pm = instr->parallel_moves()[i];
+ if (pm == nullptr) {
+ os << "]";
+ continue;
+ }
+ bool first = true;
+ for (MoveOperands* move : *pm) {
+ if (move->IsEliminated()) continue;
+ if (!first) os << ",";
+ first = false;
+ json_op.op_ = &move->destination();
+ os << "[" << json_op << ",";
+ json_op.op_ = &move->source();
+ os << json_op << "]";
+ }
+ os << "]";
+ }
+ os << "],";
+
+ os << "\"outputs\": [";
+ bool need_comma = false;
+ for (size_t i = 0; i < instr->OutputCount(); i++) {
+ if (need_comma) os << ",";
+ need_comma = true;
+ json_op.op_ = instr->OutputAt(i);
+ os << json_op;
+ }
+ os << "],";
+
+ os << "\"inputs\": [";
+ need_comma = false;
+ for (size_t i = 0; i < instr->InputCount(); i++) {
+ if (need_comma) os << ",";
+ need_comma = true;
+ json_op.op_ = instr->InputAt(i);
+ os << json_op;
+ }
+ os << "],";
+
+ os << "\"temps\": [";
+ need_comma = false;
+ for (size_t i = 0; i < instr->TempCount(); i++) {
+ if (need_comma) os << ",";
+ need_comma = true;
+ json_op.op_ = instr->TempAt(i);
+ os << json_op;
+ }
+ os << "]";
+ os << "}";
+
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const InstructionBlockAsJSON& b) {
+ const InstructionBlock* block = b.block_;
+ const InstructionSequence* code = b.code_;
+ os << "{";
+ os << "\"id\": " << block->rpo_number() << ",";
+ os << "\"deferred\": " << block->IsDeferred() << ",";
+ os << "\"loop_header\": " << block->IsLoopHeader() << ",";
+ if (block->IsLoopHeader()) {
+ os << "\"loop_end\": " << block->loop_end() << ",";
+ }
+ os << "\"predecessors\": [";
+ bool need_comma = false;
+ for (RpoNumber pred : block->predecessors()) {
+ if (need_comma) os << ",";
+ need_comma = true;
+ os << pred.ToInt();
+ }
+ os << "],";
+ os << "\"successors\": [";
+ need_comma = false;
+ for (RpoNumber succ : block->successors()) {
+ if (need_comma) os << ",";
+ need_comma = true;
+ os << succ.ToInt();
+ }
+ os << "],";
+ os << "\"phis\": [";
+ bool needs_comma = false;
+ InstructionOperandAsJSON json_op = {b.register_configuration_, nullptr, code};
+ for (const PhiInstruction* phi : block->phis()) {
+ if (needs_comma) os << ",";
+ needs_comma = true;
+ json_op.op_ = &phi->output();
+ os << "{\"output\" : " << json_op << ",";
+ os << "\"operands\": [";
+ bool op_needs_comma = false;
+ for (int input : phi->operands()) {
+ if (op_needs_comma) os << ",";
+ op_needs_comma = true;
+ os << "\"v" << input << "\"";
+ }
+ os << "]}";
+ }
+ os << "],";
+
+ os << "\"instructions\": [";
+ InstructionAsJSON json_instr = {b.register_configuration_, -1, nullptr, code};
+ need_comma = false;
+ for (int j = block->first_instruction_index();
+ j <= block->last_instruction_index(); j++) {
+ if (need_comma) os << ",";
+ need_comma = true;
+ json_instr.index_ = j;
+ json_instr.instr_ = code->InstructionAt(j);
+ os << json_instr;
+ }
+ os << "]";
+ os << "}";
+
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const InstructionSequenceAsJSON& s) {
+ const InstructionSequence* code = s.sequence_;
+
+ os << "\"blocks\": [";
+ InstructionBlockAsJSON json_block = {s.register_configuration_, nullptr,
+ code};
+
+ bool need_comma = false;
+ for (int i = 0; i < code->InstructionBlockCount(); i++) {
+ if (need_comma) os << ",";
+ need_comma = true;
+ json_block.block_ = code->InstructionBlockAt(RpoNumber::FromInt(i));
+ os << json_block;
+ }
+ os << "]";
+
+ return os;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 5573c346ee..93524d74cd 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -17,11 +17,15 @@ namespace v8 {
namespace internal {
class OptimizedCompilationInfo;
+class RegisterConfiguration;
class SharedFunctionInfo;
class SourcePosition;
namespace compiler {
class Graph;
+class Instruction;
+class InstructionBlock;
+class InstructionOperand;
class InstructionSequence;
class NodeOrigin;
class NodeOriginTable;
@@ -31,7 +35,12 @@ class SourcePositionTable;
struct TurboJsonFile : public std::ofstream {
TurboJsonFile(OptimizedCompilationInfo* info, std::ios_base::openmode mode);
- ~TurboJsonFile();
+ ~TurboJsonFile() override;
+};
+
+struct TurboCfgFile : public std::ofstream {
+ explicit TurboCfgFile(Isolate* isolate = nullptr);
+ ~TurboCfgFile() override;
};
struct SourcePositionAsJSON {
@@ -147,6 +156,36 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac);
+struct InstructionOperandAsJSON {
+ const RegisterConfiguration* register_configuration_;
+ const InstructionOperand* op_;
+ const InstructionSequence* code_;
+};
+
+std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o);
+
+struct InstructionAsJSON {
+ const RegisterConfiguration* register_configuration_;
+ int index_;
+ const Instruction* instr_;
+ const InstructionSequence* code_;
+};
+std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i);
+
+struct InstructionBlockAsJSON {
+ const RegisterConfiguration* register_configuration_;
+ const InstructionBlock* block_;
+ const InstructionSequence* code_;
+};
+
+std::ostream& operator<<(std::ostream& os, const InstructionBlockAsJSON& b);
+
+struct InstructionSequenceAsJSON {
+ const RegisterConfiguration* register_configuration_;
+ const InstructionSequence* sequence_;
+};
+std::ostream& operator<<(std::ostream& os, const InstructionSequenceAsJSON& s);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 3c5c9c4de8..eafb043992 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -110,7 +110,7 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
// in a graph.
class GraphDecorator : public ZoneObject {
public:
- virtual ~GraphDecorator() {}
+ virtual ~GraphDecorator() = default;
virtual void Decorate(Node* node) = 0;
};
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 9d54eaeb90..c73ad99ad1 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -80,6 +80,9 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
return Immediate(constant.ToHeapObject());
+ case Constant::kDelayedStringConstant:
+ return Immediate::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kInt64:
break;
case Constant::kRpoNumber:
@@ -427,21 +430,30 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
-#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
- do { \
- Label binop; \
- __ bind(&binop); \
- __ mov(i.OutputRegister(0), i.MemoryOperand(2)); \
- __ mov(i.OutputRegister(1), i.NextMemoryOperand(2)); \
- __ push(i.InputRegister(0)); \
- __ push(i.InputRegister(1)); \
- __ instr1(i.InputRegister(0), i.OutputRegister(0)); \
- __ instr2(i.InputRegister(1), i.OutputRegister(1)); \
- __ lock(); \
- __ cmpxchg8b(i.MemoryOperand(2)); \
- __ pop(i.InputRegister(1)); \
- __ pop(i.InputRegister(0)); \
- __ j(not_equal, &binop); \
+#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
+ do { \
+ Label binop; \
+ __ bind(&binop); \
+ TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm()); \
+ __ mov(eax, i.MemoryOperand(2)); \
+ __ mov(edx, i.NextMemoryOperand(2)); \
+ __ push(ebx); \
+ frame_access_state()->IncreaseSPDelta(1); \
+ InstructionOperand* op = instr->InputAt(0); \
+ if (op->IsImmediate() || op->IsConstant()) { \
+ __ mov(ebx, i.ToImmediate(op)); \
+ } else { \
+ __ mov(ebx, i.ToOperand(op)); \
+ } \
+ __ push(i.InputRegister(1)); \
+ __ instr1(ebx, eax); \
+ __ instr2(i.InputRegister(1), edx); \
+ __ lock(); \
+ __ cmpxchg8b(i.MemoryOperand(2)); \
+ __ pop(i.InputRegister(1)); \
+ __ pop(ebx); \
+ frame_access_state()->IncreaseSPDelta(-1); \
+ __ j(not_equal, &binop); \
} while (false);
#define ASSEMBLE_MOVX(mov_instr) \
@@ -497,7 +509,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// There are not enough temp registers left on ia32 for a call instruction
// so we pick some scratch registers and save/restore them manually here.
int scratch_count = 3;
- Register scratch1 = ebx;
+ Register scratch1 = esi;
Register scratch2 = ecx;
Register scratch3 = edx;
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
@@ -547,6 +559,18 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
}
}
+#ifdef DEBUG
+bool VerifyOutputOfAtomicPairInstr(IA32OperandConverter* converter,
+ const Instruction* instr) {
+ if (instr->OutputCount() > 0) {
+ if (converter->OutputRegister(0) != eax) return false;
+ if (instr->OutputCount() == 2 && converter->OutputRegister(1) != edx)
+ return false;
+ }
+ return true;
+}
+#endif
+
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -609,9 +633,11 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ mov(ebx, Operand(kJavaScriptCallCodeStartRegister, offset));
- __ test(FieldOperand(ebx, CodeDataContainer::kKindSpecificFlagsOffset),
+ __ push(eax); // Push eax so we can use it as a scratch register.
+ __ mov(eax, Operand(kJavaScriptCallCodeStartRegister, offset));
+ __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ pop(eax); // Restore eax.
// Ensure we're not serializing (otherwise we'd need to use an indirection to
// access the builtin below).
DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
@@ -1369,8 +1395,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat64Mod: {
- // TODO(dcarney): alignment is wrong.
+ Register tmp = i.TempRegister(1);
+ __ mov(tmp, esp);
__ sub(esp, Immediate(kDoubleSize));
+ __ and_(esp, -8); // align to 8 byte boundary.
// Move values to st(0) and st(1).
__ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(esp, 0));
@@ -1379,10 +1407,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Loop while fprem isn't done.
Label mod_loop;
__ bind(&mod_loop);
- // This instructions traps on all kinds inputs, but we are assuming the
+ // This instruction traps on all kinds of inputs, but we are assuming the
// floating point control word is set to ignore them all.
__ fprem();
- // The following 2 instruction implicitly use eax.
+ // fnstsw_ax clobbers eax.
+ DCHECK_EQ(eax, i.TempRegister(0));
__ fnstsw_ax();
__ sahf();
__ j(parity_even, &mod_loop);
@@ -1390,7 +1419,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ fstp(1);
__ fstp_d(Operand(esp, 0));
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
+ __ mov(esp, tmp);
break;
}
case kSSEFloat64Abs: {
@@ -1463,10 +1492,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kSSEFloat64InsertLowWord32:
- __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0, true);
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
break;
case kSSEFloat64InsertHighWord32:
- __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1, true);
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
break;
case kSSEFloat64LoadLowWord32:
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -3639,15 +3668,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Word32AtomicPairLoad: {
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
__ movq(tmp, i.MemoryOperand());
- __ Pextrd(i.OutputRegister(0), tmp, 0);
- __ Pextrd(i.OutputRegister(1), tmp, 1);
+ if (instr->OutputCount() == 2) {
+ __ Pextrd(i.OutputRegister(0), tmp, 0);
+ __ Pextrd(i.OutputRegister(1), tmp, 1);
+ } else if (instr->OutputCount() == 1) {
+ __ Pextrd(i.OutputRegister(0), tmp, 0);
+ __ Pextrd(i.TempRegister(1), tmp, 1);
+ }
break;
}
case kIA32Word32AtomicPairStore: {
+ TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm());
__ mov(i.TempRegister(0), i.MemoryOperand(2));
__ mov(i.TempRegister(1), i.NextMemoryOperand(2));
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(1);
+ InstructionOperand* op = instr->InputAt(0);
+ if (op->IsImmediate() || op->IsConstant()) {
+ __ mov(ebx, i.ToImmediate(op));
+ } else {
+ __ mov(ebx, i.ToOperand(op));
+ }
__ lock();
__ cmpxchg8b(i.MemoryOperand(2));
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-1);
break;
}
case kWord32AtomicExchangeInt8: {
@@ -3674,32 +3719,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- // For the narrow Word64 operations below, i.OutputRegister(1) contains
- // the high-order 32 bits for the 64bit operation. As the data exchange
- // fits in one register, the i.OutputRegister(1) needs to be cleared for
- // the correct return value to be propagated back.
- case kIA32Word64AtomicNarrowExchangeUint8: {
- __ xchg_b(i.OutputRegister(0), i.MemoryOperand(1));
- __ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
- __ xor_(i.OutputRegister(1), i.OutputRegister(1));
- break;
- }
- case kIA32Word64AtomicNarrowExchangeUint16: {
- __ xchg_w(i.OutputRegister(0), i.MemoryOperand(1));
- __ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
- __ xor_(i.OutputRegister(1), i.OutputRegister(1));
- break;
- }
- case kIA32Word64AtomicNarrowExchangeUint32: {
- __ xchg(i.OutputRegister(0), i.MemoryOperand(1));
- __ xor_(i.OutputRegister(1), i.OutputRegister(1));
- break;
- }
case kIA32Word32AtomicPairExchange: {
- __ mov(i.OutputRegister(0), i.MemoryOperand(2));
- __ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr));
+ TurboAssembler::AllowExplicitEbxAccessScope spill_ebx(tasm());
+ __ mov(eax, i.MemoryOperand(2));
+ __ mov(edx, i.NextMemoryOperand(2));
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(1);
+ InstructionOperand* op = instr->InputAt(0);
+ if (op->IsImmediate() || op->IsConstant()) {
+ __ mov(ebx, i.ToImmediate(op));
+ } else {
+ __ mov(ebx, i.ToOperand(op));
+ }
__ lock();
__ cmpxchg8b(i.MemoryOperand(2));
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-1);
break;
}
case kWord32AtomicCompareExchangeInt8: {
@@ -3731,29 +3767,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
}
- case kIA32Word64AtomicNarrowCompareExchangeUint8: {
- __ lock();
- __ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
- __ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
- __ xor_(i.OutputRegister(1), i.OutputRegister(1));
- break;
- }
- case kIA32Word64AtomicNarrowCompareExchangeUint16: {
- __ lock();
- __ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
- __ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
- __ xor_(i.OutputRegister(1), i.OutputRegister(1));
- break;
- }
- case kIA32Word64AtomicNarrowCompareExchangeUint32: {
- __ lock();
- __ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
- __ xor_(i.OutputRegister(1), i.OutputRegister(1));
- break;
- }
case kIA32Word32AtomicPairCompareExchange: {
+ TurboAssembler::AllowExplicitEbxAccessScope spill_ebx(tasm());
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(1);
+ InstructionOperand* op = instr->InputAt(2);
+ if (op->IsImmediate() || op->IsConstant()) {
+ __ mov(ebx, i.ToImmediate(op));
+ } else {
+ __ mov(ebx, i.ToOperand(op));
+ }
__ lock();
__ cmpxchg8b(i.MemoryOperand(4));
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-1);
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
@@ -3762,12 +3789,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movsx_b(eax, eax); \
break; \
} \
- case kIA32Word64AtomicNarrow##op##Uint8: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
- __ movzx_b(i.OutputRegister(0), i.OutputRegister(0)); \
- __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
- break; \
- } \
case kWord32Atomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
@@ -3778,22 +3799,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movsx_w(eax, eax); \
break; \
} \
- case kIA32Word64AtomicNarrow##op##Uint16: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
- __ movzx_w(i.OutputRegister(0), i.OutputRegister(0)); \
- __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
- break; \
- } \
case kWord32Atomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
- case kIA32Word64AtomicNarrow##op##Uint32: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
- __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
- break; \
- } \
case kWord32Atomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
@@ -3804,10 +3814,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, or_)
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, instr1, instr2) \
- case kIA32Word32AtomicPair##op: { \
- ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
- break; \
+#define ATOMIC_BINOP_CASE(op, instr1, instr2) \
+ case kIA32Word32AtomicPair##op: { \
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr)); \
+ ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
+ break; \
}
ATOMIC_BINOP_CASE(Add, add, adc)
ATOMIC_BINOP_CASE(And, and_, and_)
@@ -3815,26 +3826,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, xor_, xor_)
#undef ATOMIC_BINOP_CASE
case kIA32Word32AtomicPairSub: {
+ DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr));
Label binop;
__ bind(&binop);
+ TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm());
// Move memory operand into edx:eax
- __ mov(i.OutputRegister(0), i.MemoryOperand(2));
- __ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
+ __ mov(eax, i.MemoryOperand(2));
+ __ mov(edx, i.NextMemoryOperand(2));
// Save input registers temporarily on the stack.
- __ push(i.InputRegister(0));
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(1);
+ InstructionOperand* op = instr->InputAt(0);
+ if (op->IsImmediate() || op->IsConstant()) {
+ __ mov(ebx, i.ToImmediate(op));
+ } else {
+ __ mov(ebx, i.ToOperand(op));
+ }
__ push(i.InputRegister(1));
// Negate input in place
- __ neg(i.InputRegister(0));
+ __ neg(ebx);
__ adc(i.InputRegister(1), 0);
__ neg(i.InputRegister(1));
// Add memory operand, negated input.
- __ add(i.InputRegister(0), i.OutputRegister(0));
- __ adc(i.InputRegister(1), i.OutputRegister(1));
+ __ add(ebx, eax);
+ __ adc(i.InputRegister(1), edx);
__ lock();
__ cmpxchg8b(i.MemoryOperand(2));
// Restore input registers
__ pop(i.InputRegister(1));
- __ pop(i.InputRegister(0));
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-1);
__ j(not_equal, &binop);
break;
}
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 97f3763cf5..e157a29e13 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -11,377 +11,356 @@ namespace compiler {
// IA32-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(IA32Add) \
- V(IA32And) \
- V(IA32Cmp) \
- V(IA32Cmp16) \
- V(IA32Cmp8) \
- V(IA32Test) \
- V(IA32Test16) \
- V(IA32Test8) \
- V(IA32Or) \
- V(IA32Xor) \
- V(IA32Sub) \
- V(IA32Imul) \
- V(IA32ImulHigh) \
- V(IA32UmulHigh) \
- V(IA32Idiv) \
- V(IA32Udiv) \
- V(IA32Not) \
- V(IA32Neg) \
- V(IA32Shl) \
- V(IA32Shr) \
- V(IA32Sar) \
- V(IA32AddPair) \
- V(IA32SubPair) \
- V(IA32MulPair) \
- V(IA32ShlPair) \
- V(IA32ShrPair) \
- V(IA32SarPair) \
- V(IA32Ror) \
- V(IA32Lzcnt) \
- V(IA32Tzcnt) \
- V(IA32Popcnt) \
- V(IA32Bswap) \
- V(LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEInt32ToFloat32) \
- V(SSEUint32ToFloat32) \
- V(SSEInt32ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(IA32Movsxbl) \
- V(IA32Movzxbl) \
- V(IA32Movb) \
- V(IA32Movsxwl) \
- V(IA32Movzxwl) \
- V(IA32Movw) \
- V(IA32Movl) \
- V(IA32Movss) \
- V(IA32Movsd) \
- V(IA32Movdqu) \
- V(IA32BitcastFI) \
- V(IA32BitcastIF) \
- V(IA32Lea) \
- V(IA32Push) \
- V(IA32PushFloat32) \
- V(IA32PushFloat64) \
- V(IA32PushSimd128) \
- V(IA32Poke) \
- V(IA32Peek) \
- V(IA32StackCheck) \
- V(SSEF32x4Splat) \
- V(AVXF32x4Splat) \
- V(SSEF32x4ExtractLane) \
- V(AVXF32x4ExtractLane) \
- V(SSEF32x4ReplaceLane) \
- V(AVXF32x4ReplaceLane) \
- V(IA32F32x4SConvertI32x4) \
- V(SSEF32x4UConvertI32x4) \
- V(AVXF32x4UConvertI32x4) \
- V(SSEF32x4Abs) \
- V(AVXF32x4Abs) \
- V(SSEF32x4Neg) \
- V(AVXF32x4Neg) \
- V(IA32F32x4RecipApprox) \
- V(IA32F32x4RecipSqrtApprox) \
- V(SSEF32x4Add) \
- V(AVXF32x4Add) \
- V(SSEF32x4AddHoriz) \
- V(AVXF32x4AddHoriz) \
- V(SSEF32x4Sub) \
- V(AVXF32x4Sub) \
- V(SSEF32x4Mul) \
- V(AVXF32x4Mul) \
- V(SSEF32x4Min) \
- V(AVXF32x4Min) \
- V(SSEF32x4Max) \
- V(AVXF32x4Max) \
- V(SSEF32x4Eq) \
- V(AVXF32x4Eq) \
- V(SSEF32x4Ne) \
- V(AVXF32x4Ne) \
- V(SSEF32x4Lt) \
- V(AVXF32x4Lt) \
- V(SSEF32x4Le) \
- V(AVXF32x4Le) \
- V(IA32I32x4Splat) \
- V(IA32I32x4ExtractLane) \
- V(SSEI32x4ReplaceLane) \
- V(AVXI32x4ReplaceLane) \
- V(SSEI32x4SConvertF32x4) \
- V(AVXI32x4SConvertF32x4) \
- V(IA32I32x4SConvertI16x8Low) \
- V(IA32I32x4SConvertI16x8High) \
- V(IA32I32x4Neg) \
- V(SSEI32x4Shl) \
- V(AVXI32x4Shl) \
- V(SSEI32x4ShrS) \
- V(AVXI32x4ShrS) \
- V(SSEI32x4Add) \
- V(AVXI32x4Add) \
- V(SSEI32x4AddHoriz) \
- V(AVXI32x4AddHoriz) \
- V(SSEI32x4Sub) \
- V(AVXI32x4Sub) \
- V(SSEI32x4Mul) \
- V(AVXI32x4Mul) \
- V(SSEI32x4MinS) \
- V(AVXI32x4MinS) \
- V(SSEI32x4MaxS) \
- V(AVXI32x4MaxS) \
- V(SSEI32x4Eq) \
- V(AVXI32x4Eq) \
- V(SSEI32x4Ne) \
- V(AVXI32x4Ne) \
- V(SSEI32x4GtS) \
- V(AVXI32x4GtS) \
- V(SSEI32x4GeS) \
- V(AVXI32x4GeS) \
- V(SSEI32x4UConvertF32x4) \
- V(AVXI32x4UConvertF32x4) \
- V(IA32I32x4UConvertI16x8Low) \
- V(IA32I32x4UConvertI16x8High) \
- V(SSEI32x4ShrU) \
- V(AVXI32x4ShrU) \
- V(SSEI32x4MinU) \
- V(AVXI32x4MinU) \
- V(SSEI32x4MaxU) \
- V(AVXI32x4MaxU) \
- V(SSEI32x4GtU) \
- V(AVXI32x4GtU) \
- V(SSEI32x4GeU) \
- V(AVXI32x4GeU) \
- V(IA32I16x8Splat) \
- V(IA32I16x8ExtractLane) \
- V(SSEI16x8ReplaceLane) \
- V(AVXI16x8ReplaceLane) \
- V(IA32I16x8SConvertI8x16Low) \
- V(IA32I16x8SConvertI8x16High) \
- V(IA32I16x8Neg) \
- V(SSEI16x8Shl) \
- V(AVXI16x8Shl) \
- V(SSEI16x8ShrS) \
- V(AVXI16x8ShrS) \
- V(SSEI16x8SConvertI32x4) \
- V(AVXI16x8SConvertI32x4) \
- V(SSEI16x8Add) \
- V(AVXI16x8Add) \
- V(SSEI16x8AddSaturateS) \
- V(AVXI16x8AddSaturateS) \
- V(SSEI16x8AddHoriz) \
- V(AVXI16x8AddHoriz) \
- V(SSEI16x8Sub) \
- V(AVXI16x8Sub) \
- V(SSEI16x8SubSaturateS) \
- V(AVXI16x8SubSaturateS) \
- V(SSEI16x8Mul) \
- V(AVXI16x8Mul) \
- V(SSEI16x8MinS) \
- V(AVXI16x8MinS) \
- V(SSEI16x8MaxS) \
- V(AVXI16x8MaxS) \
- V(SSEI16x8Eq) \
- V(AVXI16x8Eq) \
- V(SSEI16x8Ne) \
- V(AVXI16x8Ne) \
- V(SSEI16x8GtS) \
- V(AVXI16x8GtS) \
- V(SSEI16x8GeS) \
- V(AVXI16x8GeS) \
- V(IA32I16x8UConvertI8x16Low) \
- V(IA32I16x8UConvertI8x16High) \
- V(SSEI16x8ShrU) \
- V(AVXI16x8ShrU) \
- V(SSEI16x8UConvertI32x4) \
- V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSaturateU) \
- V(AVXI16x8AddSaturateU) \
- V(SSEI16x8SubSaturateU) \
- V(AVXI16x8SubSaturateU) \
- V(SSEI16x8MinU) \
- V(AVXI16x8MinU) \
- V(SSEI16x8MaxU) \
- V(AVXI16x8MaxU) \
- V(SSEI16x8GtU) \
- V(AVXI16x8GtU) \
- V(SSEI16x8GeU) \
- V(AVXI16x8GeU) \
- V(IA32I8x16Splat) \
- V(IA32I8x16ExtractLane) \
- V(SSEI8x16ReplaceLane) \
- V(AVXI8x16ReplaceLane) \
- V(SSEI8x16SConvertI16x8) \
- V(AVXI8x16SConvertI16x8) \
- V(IA32I8x16Neg) \
- V(SSEI8x16Shl) \
- V(AVXI8x16Shl) \
- V(IA32I8x16ShrS) \
- V(SSEI8x16Add) \
- V(AVXI8x16Add) \
- V(SSEI8x16AddSaturateS) \
- V(AVXI8x16AddSaturateS) \
- V(SSEI8x16Sub) \
- V(AVXI8x16Sub) \
- V(SSEI8x16SubSaturateS) \
- V(AVXI8x16SubSaturateS) \
- V(SSEI8x16Mul) \
- V(AVXI8x16Mul) \
- V(SSEI8x16MinS) \
- V(AVXI8x16MinS) \
- V(SSEI8x16MaxS) \
- V(AVXI8x16MaxS) \
- V(SSEI8x16Eq) \
- V(AVXI8x16Eq) \
- V(SSEI8x16Ne) \
- V(AVXI8x16Ne) \
- V(SSEI8x16GtS) \
- V(AVXI8x16GtS) \
- V(SSEI8x16GeS) \
- V(AVXI8x16GeS) \
- V(SSEI8x16UConvertI16x8) \
- V(AVXI8x16UConvertI16x8) \
- V(SSEI8x16AddSaturateU) \
- V(AVXI8x16AddSaturateU) \
- V(SSEI8x16SubSaturateU) \
- V(AVXI8x16SubSaturateU) \
- V(IA32I8x16ShrU) \
- V(SSEI8x16MinU) \
- V(AVXI8x16MinU) \
- V(SSEI8x16MaxU) \
- V(AVXI8x16MaxU) \
- V(SSEI8x16GtU) \
- V(AVXI8x16GtU) \
- V(SSEI8x16GeU) \
- V(AVXI8x16GeU) \
- V(IA32S128Zero) \
- V(SSES128Not) \
- V(AVXS128Not) \
- V(SSES128And) \
- V(AVXS128And) \
- V(SSES128Or) \
- V(AVXS128Or) \
- V(SSES128Xor) \
- V(AVXS128Xor) \
- V(SSES128Select) \
- V(AVXS128Select) \
- V(IA32S8x16Shuffle) \
- V(IA32S32x4Swizzle) \
- V(IA32S32x4Shuffle) \
- V(IA32S16x8Blend) \
- V(IA32S16x8HalfShuffle1) \
- V(IA32S16x8HalfShuffle2) \
- V(IA32S8x16Alignr) \
- V(IA32S16x8Dup) \
- V(IA32S8x16Dup) \
- V(SSES16x8UnzipHigh) \
- V(AVXS16x8UnzipHigh) \
- V(SSES16x8UnzipLow) \
- V(AVXS16x8UnzipLow) \
- V(SSES8x16UnzipHigh) \
- V(AVXS8x16UnzipHigh) \
- V(SSES8x16UnzipLow) \
- V(AVXS8x16UnzipLow) \
- V(IA32S64x2UnpackHigh) \
- V(IA32S32x4UnpackHigh) \
- V(IA32S16x8UnpackHigh) \
- V(IA32S8x16UnpackHigh) \
- V(IA32S64x2UnpackLow) \
- V(IA32S32x4UnpackLow) \
- V(IA32S16x8UnpackLow) \
- V(IA32S8x16UnpackLow) \
- V(SSES8x16TransposeLow) \
- V(AVXS8x16TransposeLow) \
- V(SSES8x16TransposeHigh) \
- V(AVXS8x16TransposeHigh) \
- V(SSES8x8Reverse) \
- V(AVXS8x8Reverse) \
- V(SSES8x4Reverse) \
- V(AVXS8x4Reverse) \
- V(SSES8x2Reverse) \
- V(AVXS8x2Reverse) \
- V(IA32S1x4AnyTrue) \
- V(IA32S1x4AllTrue) \
- V(IA32S1x8AnyTrue) \
- V(IA32S1x8AllTrue) \
- V(IA32S1x16AnyTrue) \
- V(IA32S1x16AllTrue) \
- V(IA32Word32AtomicPairLoad) \
- V(IA32Word32AtomicPairStore) \
- V(IA32Word32AtomicPairAdd) \
- V(IA32Word32AtomicPairSub) \
- V(IA32Word32AtomicPairAnd) \
- V(IA32Word32AtomicPairOr) \
- V(IA32Word32AtomicPairXor) \
- V(IA32Word32AtomicPairExchange) \
- V(IA32Word32AtomicPairCompareExchange) \
- V(IA32Word64AtomicNarrowAddUint8) \
- V(IA32Word64AtomicNarrowAddUint16) \
- V(IA32Word64AtomicNarrowAddUint32) \
- V(IA32Word64AtomicNarrowSubUint8) \
- V(IA32Word64AtomicNarrowSubUint16) \
- V(IA32Word64AtomicNarrowSubUint32) \
- V(IA32Word64AtomicNarrowAndUint8) \
- V(IA32Word64AtomicNarrowAndUint16) \
- V(IA32Word64AtomicNarrowAndUint32) \
- V(IA32Word64AtomicNarrowOrUint8) \
- V(IA32Word64AtomicNarrowOrUint16) \
- V(IA32Word64AtomicNarrowOrUint32) \
- V(IA32Word64AtomicNarrowXorUint8) \
- V(IA32Word64AtomicNarrowXorUint16) \
- V(IA32Word64AtomicNarrowXorUint32) \
- V(IA32Word64AtomicNarrowExchangeUint8) \
- V(IA32Word64AtomicNarrowExchangeUint16) \
- V(IA32Word64AtomicNarrowExchangeUint32) \
- V(IA32Word64AtomicNarrowCompareExchangeUint8) \
- V(IA32Word64AtomicNarrowCompareExchangeUint16) \
- V(IA32Word64AtomicNarrowCompareExchangeUint32)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Cmp16) \
+ V(IA32Cmp8) \
+ V(IA32Test) \
+ V(IA32Test16) \
+ V(IA32Test8) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32ImulHigh) \
+ V(IA32UmulHigh) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32AddPair) \
+ V(IA32SubPair) \
+ V(IA32MulPair) \
+ V(IA32ShlPair) \
+ V(IA32ShrPair) \
+ V(IA32SarPair) \
+ V(IA32Ror) \
+ V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
+ V(IA32Bswap) \
+ V(LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Abs) \
+ V(SSEFloat32Neg) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64Abs) \
+ V(SSEFloat64Neg) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEInt32ToFloat32) \
+ V(SSEUint32ToFloat32) \
+ V(SSEInt32ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
+ V(IA32Movsxbl) \
+ V(IA32Movzxbl) \
+ V(IA32Movb) \
+ V(IA32Movsxwl) \
+ V(IA32Movzxwl) \
+ V(IA32Movw) \
+ V(IA32Movl) \
+ V(IA32Movss) \
+ V(IA32Movsd) \
+ V(IA32Movdqu) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
+ V(IA32Lea) \
+ V(IA32Push) \
+ V(IA32PushFloat32) \
+ V(IA32PushFloat64) \
+ V(IA32PushSimd128) \
+ V(IA32Poke) \
+ V(IA32Peek) \
+ V(IA32StackCheck) \
+ V(SSEF32x4Splat) \
+ V(AVXF32x4Splat) \
+ V(SSEF32x4ExtractLane) \
+ V(AVXF32x4ExtractLane) \
+ V(SSEF32x4ReplaceLane) \
+ V(AVXF32x4ReplaceLane) \
+ V(IA32F32x4SConvertI32x4) \
+ V(SSEF32x4UConvertI32x4) \
+ V(AVXF32x4UConvertI32x4) \
+ V(SSEF32x4Abs) \
+ V(AVXF32x4Abs) \
+ V(SSEF32x4Neg) \
+ V(AVXF32x4Neg) \
+ V(IA32F32x4RecipApprox) \
+ V(IA32F32x4RecipSqrtApprox) \
+ V(SSEF32x4Add) \
+ V(AVXF32x4Add) \
+ V(SSEF32x4AddHoriz) \
+ V(AVXF32x4AddHoriz) \
+ V(SSEF32x4Sub) \
+ V(AVXF32x4Sub) \
+ V(SSEF32x4Mul) \
+ V(AVXF32x4Mul) \
+ V(SSEF32x4Min) \
+ V(AVXF32x4Min) \
+ V(SSEF32x4Max) \
+ V(AVXF32x4Max) \
+ V(SSEF32x4Eq) \
+ V(AVXF32x4Eq) \
+ V(SSEF32x4Ne) \
+ V(AVXF32x4Ne) \
+ V(SSEF32x4Lt) \
+ V(AVXF32x4Lt) \
+ V(SSEF32x4Le) \
+ V(AVXF32x4Le) \
+ V(IA32I32x4Splat) \
+ V(IA32I32x4ExtractLane) \
+ V(SSEI32x4ReplaceLane) \
+ V(AVXI32x4ReplaceLane) \
+ V(SSEI32x4SConvertF32x4) \
+ V(AVXI32x4SConvertF32x4) \
+ V(IA32I32x4SConvertI16x8Low) \
+ V(IA32I32x4SConvertI16x8High) \
+ V(IA32I32x4Neg) \
+ V(SSEI32x4Shl) \
+ V(AVXI32x4Shl) \
+ V(SSEI32x4ShrS) \
+ V(AVXI32x4ShrS) \
+ V(SSEI32x4Add) \
+ V(AVXI32x4Add) \
+ V(SSEI32x4AddHoriz) \
+ V(AVXI32x4AddHoriz) \
+ V(SSEI32x4Sub) \
+ V(AVXI32x4Sub) \
+ V(SSEI32x4Mul) \
+ V(AVXI32x4Mul) \
+ V(SSEI32x4MinS) \
+ V(AVXI32x4MinS) \
+ V(SSEI32x4MaxS) \
+ V(AVXI32x4MaxS) \
+ V(SSEI32x4Eq) \
+ V(AVXI32x4Eq) \
+ V(SSEI32x4Ne) \
+ V(AVXI32x4Ne) \
+ V(SSEI32x4GtS) \
+ V(AVXI32x4GtS) \
+ V(SSEI32x4GeS) \
+ V(AVXI32x4GeS) \
+ V(SSEI32x4UConvertF32x4) \
+ V(AVXI32x4UConvertF32x4) \
+ V(IA32I32x4UConvertI16x8Low) \
+ V(IA32I32x4UConvertI16x8High) \
+ V(SSEI32x4ShrU) \
+ V(AVXI32x4ShrU) \
+ V(SSEI32x4MinU) \
+ V(AVXI32x4MinU) \
+ V(SSEI32x4MaxU) \
+ V(AVXI32x4MaxU) \
+ V(SSEI32x4GtU) \
+ V(AVXI32x4GtU) \
+ V(SSEI32x4GeU) \
+ V(AVXI32x4GeU) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLane) \
+ V(SSEI16x8ReplaceLane) \
+ V(AVXI16x8ReplaceLane) \
+ V(IA32I16x8SConvertI8x16Low) \
+ V(IA32I16x8SConvertI8x16High) \
+ V(IA32I16x8Neg) \
+ V(SSEI16x8Shl) \
+ V(AVXI16x8Shl) \
+ V(SSEI16x8ShrS) \
+ V(AVXI16x8ShrS) \
+ V(SSEI16x8SConvertI32x4) \
+ V(AVXI16x8SConvertI32x4) \
+ V(SSEI16x8Add) \
+ V(AVXI16x8Add) \
+ V(SSEI16x8AddSaturateS) \
+ V(AVXI16x8AddSaturateS) \
+ V(SSEI16x8AddHoriz) \
+ V(AVXI16x8AddHoriz) \
+ V(SSEI16x8Sub) \
+ V(AVXI16x8Sub) \
+ V(SSEI16x8SubSaturateS) \
+ V(AVXI16x8SubSaturateS) \
+ V(SSEI16x8Mul) \
+ V(AVXI16x8Mul) \
+ V(SSEI16x8MinS) \
+ V(AVXI16x8MinS) \
+ V(SSEI16x8MaxS) \
+ V(AVXI16x8MaxS) \
+ V(SSEI16x8Eq) \
+ V(AVXI16x8Eq) \
+ V(SSEI16x8Ne) \
+ V(AVXI16x8Ne) \
+ V(SSEI16x8GtS) \
+ V(AVXI16x8GtS) \
+ V(SSEI16x8GeS) \
+ V(AVXI16x8GeS) \
+ V(IA32I16x8UConvertI8x16Low) \
+ V(IA32I16x8UConvertI8x16High) \
+ V(SSEI16x8ShrU) \
+ V(AVXI16x8ShrU) \
+ V(SSEI16x8UConvertI32x4) \
+ V(AVXI16x8UConvertI32x4) \
+ V(SSEI16x8AddSaturateU) \
+ V(AVXI16x8AddSaturateU) \
+ V(SSEI16x8SubSaturateU) \
+ V(AVXI16x8SubSaturateU) \
+ V(SSEI16x8MinU) \
+ V(AVXI16x8MinU) \
+ V(SSEI16x8MaxU) \
+ V(AVXI16x8MaxU) \
+ V(SSEI16x8GtU) \
+ V(AVXI16x8GtU) \
+ V(SSEI16x8GeU) \
+ V(AVXI16x8GeU) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLane) \
+ V(SSEI8x16ReplaceLane) \
+ V(AVXI8x16ReplaceLane) \
+ V(SSEI8x16SConvertI16x8) \
+ V(AVXI8x16SConvertI16x8) \
+ V(IA32I8x16Neg) \
+ V(SSEI8x16Shl) \
+ V(AVXI8x16Shl) \
+ V(IA32I8x16ShrS) \
+ V(SSEI8x16Add) \
+ V(AVXI8x16Add) \
+ V(SSEI8x16AddSaturateS) \
+ V(AVXI8x16AddSaturateS) \
+ V(SSEI8x16Sub) \
+ V(AVXI8x16Sub) \
+ V(SSEI8x16SubSaturateS) \
+ V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16Mul) \
+ V(AVXI8x16Mul) \
+ V(SSEI8x16MinS) \
+ V(AVXI8x16MinS) \
+ V(SSEI8x16MaxS) \
+ V(AVXI8x16MaxS) \
+ V(SSEI8x16Eq) \
+ V(AVXI8x16Eq) \
+ V(SSEI8x16Ne) \
+ V(AVXI8x16Ne) \
+ V(SSEI8x16GtS) \
+ V(AVXI8x16GtS) \
+ V(SSEI8x16GeS) \
+ V(AVXI8x16GeS) \
+ V(SSEI8x16UConvertI16x8) \
+ V(AVXI8x16UConvertI16x8) \
+ V(SSEI8x16AddSaturateU) \
+ V(AVXI8x16AddSaturateU) \
+ V(SSEI8x16SubSaturateU) \
+ V(AVXI8x16SubSaturateU) \
+ V(IA32I8x16ShrU) \
+ V(SSEI8x16MinU) \
+ V(AVXI8x16MinU) \
+ V(SSEI8x16MaxU) \
+ V(AVXI8x16MaxU) \
+ V(SSEI8x16GtU) \
+ V(AVXI8x16GtU) \
+ V(SSEI8x16GeU) \
+ V(AVXI8x16GeU) \
+ V(IA32S128Zero) \
+ V(SSES128Not) \
+ V(AVXS128Not) \
+ V(SSES128And) \
+ V(AVXS128And) \
+ V(SSES128Or) \
+ V(AVXS128Or) \
+ V(SSES128Xor) \
+ V(AVXS128Xor) \
+ V(SSES128Select) \
+ V(AVXS128Select) \
+ V(IA32S8x16Shuffle) \
+ V(IA32S32x4Swizzle) \
+ V(IA32S32x4Shuffle) \
+ V(IA32S16x8Blend) \
+ V(IA32S16x8HalfShuffle1) \
+ V(IA32S16x8HalfShuffle2) \
+ V(IA32S8x16Alignr) \
+ V(IA32S16x8Dup) \
+ V(IA32S8x16Dup) \
+ V(SSES16x8UnzipHigh) \
+ V(AVXS16x8UnzipHigh) \
+ V(SSES16x8UnzipLow) \
+ V(AVXS16x8UnzipLow) \
+ V(SSES8x16UnzipHigh) \
+ V(AVXS8x16UnzipHigh) \
+ V(SSES8x16UnzipLow) \
+ V(AVXS8x16UnzipLow) \
+ V(IA32S64x2UnpackHigh) \
+ V(IA32S32x4UnpackHigh) \
+ V(IA32S16x8UnpackHigh) \
+ V(IA32S8x16UnpackHigh) \
+ V(IA32S64x2UnpackLow) \
+ V(IA32S32x4UnpackLow) \
+ V(IA32S16x8UnpackLow) \
+ V(IA32S8x16UnpackLow) \
+ V(SSES8x16TransposeLow) \
+ V(AVXS8x16TransposeLow) \
+ V(SSES8x16TransposeHigh) \
+ V(AVXS8x16TransposeHigh) \
+ V(SSES8x8Reverse) \
+ V(AVXS8x8Reverse) \
+ V(SSES8x4Reverse) \
+ V(AVXS8x4Reverse) \
+ V(SSES8x2Reverse) \
+ V(AVXS8x2Reverse) \
+ V(IA32S1x4AnyTrue) \
+ V(IA32S1x4AllTrue) \
+ V(IA32S1x8AnyTrue) \
+ V(IA32S1x8AllTrue) \
+ V(IA32S1x16AnyTrue) \
+ V(IA32S1x16AllTrue) \
+ V(IA32Word32AtomicPairLoad) \
+ V(IA32Word32AtomicPairStore) \
+ V(IA32Word32AtomicPairAdd) \
+ V(IA32Word32AtomicPairSub) \
+ V(IA32Word32AtomicPairAnd) \
+ V(IA32Word32AtomicPairOr) \
+ V(IA32Word32AtomicPairXor) \
+ V(IA32Word32AtomicPairExchange) \
+ V(IA32Word32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 07d42bc614..54454e41cb 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -380,27 +380,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word32AtomicPairXor:
case kIA32Word32AtomicPairExchange:
case kIA32Word32AtomicPairCompareExchange:
- case kIA32Word64AtomicNarrowAddUint8:
- case kIA32Word64AtomicNarrowAddUint16:
- case kIA32Word64AtomicNarrowAddUint32:
- case kIA32Word64AtomicNarrowSubUint8:
- case kIA32Word64AtomicNarrowSubUint16:
- case kIA32Word64AtomicNarrowSubUint32:
- case kIA32Word64AtomicNarrowAndUint8:
- case kIA32Word64AtomicNarrowAndUint16:
- case kIA32Word64AtomicNarrowAndUint32:
- case kIA32Word64AtomicNarrowOrUint8:
- case kIA32Word64AtomicNarrowOrUint16:
- case kIA32Word64AtomicNarrowOrUint32:
- case kIA32Word64AtomicNarrowXorUint8:
- case kIA32Word64AtomicNarrowXorUint16:
- case kIA32Word64AtomicNarrowXorUint32:
- case kIA32Word64AtomicNarrowExchangeUint8:
- case kIA32Word64AtomicNarrowExchangeUint16:
- case kIA32Word64AtomicNarrowExchangeUint32:
- case kIA32Word64AtomicNarrowCompareExchangeUint8:
- case kIA32Word64AtomicNarrowCompareExchangeUint16:
- case kIA32Word64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index ce2f14e97f..43b572170f 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -935,10 +935,10 @@ void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempRegister(eax)};
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister()};
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
- temps);
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ arraysize(temps), temps);
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1343,43 +1343,42 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
// For Word64 operations, the value input is split into the a high node,
// and a low node in the int64-lowering phase.
Node* value_high = node->InputAt(3);
+#if defined(V8_EMBEDDED_BUILTINS)
+ bool block_root_register = !selector->CanUseRootsRegister();
+#else
+ bool block_root_register = true;
+#endif
// Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64.
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
- g.UseFixed(value, ebx), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
-}
-
-void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, MachineType type) {
- IA32OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- // Wasm lives in 32-bit address space, so we do not need to worry about
- // base/index lowering. This will need to be fixed for Wasm64.
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegister(value), g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
- InstructionOperand temp[] = {(type == MachineType::Uint8())
- ? g.UseByteRegister(node)
- : g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temp), temp);
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand temps[] = {g.TempRegister(ebx)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
+ g.DefineAsFixed(projection1, edx)};
+ const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ num_temps, temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
+ InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
+ const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ num_temps, temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
+ g.TempRegister(ebx)};
+ const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps,
+ temps);
+ }
}
} // namespace
@@ -1769,14 +1768,27 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
Node* index = node->InputAt(1);
InstructionOperand inputs[] = {g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &mode)};
- InstructionOperand temps[] = {g.TempDoubleRegister()};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
InstructionCode code =
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
+
+ if (projection1) {
+ InstructionOperand temps[] = {g.TempDoubleRegister()};
+ InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
+ g.DefineAsRegister(projection1)};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else if (projection0) {
+ InstructionOperand temps[] = {g.TempDoubleRegister(), g.TempRegister()};
+ InstructionOperand outputs[] = {g.DefineAsRegister(projection0)};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ InstructionOperand temps[] = {g.TempDoubleRegister(), g.TempRegister(),
+ g.TempRegister()};
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ }
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
@@ -1785,19 +1797,26 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
+#if defined(V8_EMBEDDED_BUILTINS)
+ bool block_root_register = !CanUseRootsRegister();
+#else
+ bool block_root_register = true;
+#endif
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
- g.UseFixed(value, ebx), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
// Allocating temp registers here as stores are performed using an atomic
// exchange, the output of which is stored in edx:eax, which should be saved
// and restored at the end of the instruction.
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
+ g.TempRegister(ebx)};
+ const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionCode code =
kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
}
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
@@ -1828,125 +1847,45 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* index = node->InputAt(1);
AddressingMode addressing_mode;
+#if defined(V8_EMBEDDED_BUILTINS)
+ bool block_root_register = !CanUseRootsRegister();
+#else
+ bool block_root_register = true;
+#endif
+
InstructionOperand inputs[] = {
// High, Low values of old value
g.UseFixed(node->InputAt(2), eax), g.UseFixed(node->InputAt(3), edx),
// High, Low values of new value
- g.UseFixed(node->InputAt(4), ebx), g.UseFixed(node->InputAt(5), ecx),
+ g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(4)),
+ g.UseFixed(node->InputAt(5), ecx),
// InputAt(0) => base
g.UseUniqueRegister(node->InputAt(0)),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
InstructionCode code = kIA32Word32AtomicPairCompareExchange |
AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
-}
-void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
- ArchOpcode uint8_op,
- ArchOpcode uint16_op,
- ArchOpcode uint32_op) {
- MachineType type = AtomicOpType(node->op());
- DCHECK(type != MachineType::Uint64());
- ArchOpcode opcode = kArchNop;
- if (type == MachineType::Uint32()) {
- opcode = uint32_op;
- } else if (type == MachineType::Uint16()) {
- opcode = uint16_op;
- } else if (type == MachineType::Uint8()) {
- opcode = uint8_op;
- } else {
- UNREACHABLE();
- return;
- }
- VisitNarrowAtomicBinOp(this, node, opcode, type);
-}
-
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
- VisitWord64AtomicNarrowBinop(node, kIA32Word64AtomicNarrow##op##Uint8, \
- kIA32Word64AtomicNarrow##op##Uint16, \
- kIA32Word64AtomicNarrow##op##Uint32); \
- }
-VISIT_ATOMIC_BINOP(Add)
-VISIT_ATOMIC_BINOP(Sub)
-VISIT_ATOMIC_BINOP(And)
-VISIT_ATOMIC_BINOP(Or)
-VISIT_ATOMIC_BINOP(Xor)
-#undef VISIT_ATOMIC_BINOP
-
-void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
- MachineType type = AtomicOpType(node->op());
- DCHECK(type != MachineType::Uint64());
- ArchOpcode opcode = kArchNop;
- if (type == MachineType::Uint32()) {
- opcode = kIA32Word64AtomicNarrowExchangeUint32;
- } else if (type == MachineType::Uint16()) {
- opcode = kIA32Word64AtomicNarrowExchangeUint16;
- } else if (type == MachineType::Uint8()) {
- opcode = kIA32Word64AtomicNarrowExchangeUint8;
- } else {
- UNREACHABLE();
- return;
- }
- IA32OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- AddressingMode addressing_mode;
- InstructionOperand value_operand =
- (type.representation() == MachineRepresentation::kWord8)
- ? g.UseFixed(value, edx)
- : g.UseUniqueRegister(value);
- InstructionOperand inputs[] = {
- value_operand, g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[2];
- if (type.representation() == MachineRepresentation::kWord8) {
- // Using DefineSameAsFirst requires the register to be unallocated.
- outputs[0] = g.DefineAsFixed(NodeProperties::FindProjection(node, 0), edx);
- } else {
- outputs[0] = g.DefineSameAsFirst(NodeProperties::FindProjection(node, 0));
- }
- outputs[1] = g.DefineAsRegister(NodeProperties::FindProjection(node, 1));
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
- MachineType type = AtomicOpType(node->op());
- DCHECK(type != MachineType::Uint64());
- ArchOpcode opcode = kArchNop;
- if (type == MachineType::Uint32()) {
- opcode = kIA32Word64AtomicNarrowCompareExchangeUint32;
- } else if (type == MachineType::Uint16()) {
- opcode = kIA32Word64AtomicNarrowCompareExchangeUint16;
- } else if (type == MachineType::Uint8()) {
- opcode = kIA32Word64AtomicNarrowCompareExchangeUint8;
+ if (projection1) {
+ InstructionOperand temps[] = {g.TempRegister(ebx)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
+ g.DefineAsFixed(projection1, edx)};
+ const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ num_temps, temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
+ InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
+ const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ num_temps, temps);
} else {
- UNREACHABLE();
- return;
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
+ g.TempRegister(ebx)};
+ const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
}
- IA32OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* old_value = node->InputAt(2);
- Node* new_value = node->InputAt(3);
- AddressingMode addressing_mode;
- InstructionOperand new_value_operand =
- (type.representation() == MachineRepresentation::kWord8)
- ? g.UseByteRegister(new_value)
- : g.UseUniqueRegister(new_value);
- InstructionOperand inputs[] = {
- g.UseFixed(old_value, eax), new_value_operand, g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
#define SIMD_INT_TYPES(V) \
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 27f37215df..bff70d5edf 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#include "src/compiler/common-operator.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
@@ -154,6 +155,12 @@ class OperandGenerator {
UnallocatedOperand::USED_AT_START, GetVReg(node)));
}
+ InstructionOperand UseUniqueRegisterOrSlotOrConstant(Node* node) {
+ return Use(node, UnallocatedOperand(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ GetVReg(node)));
+ }
+
InstructionOperand UseRegister(Node* node) {
return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START,
@@ -319,6 +326,8 @@ class OperandGenerator {
}
case IrOpcode::kHeapConstant:
return Constant(HeapConstantOf(node->op()));
+ case IrOpcode::kDelayedStringConstant:
+ return Constant(StringConstantBaseOf(node->op()));
case IrOpcode::kDeadValue: {
switch (DeadValueRepresentationOf(node->op())) {
case MachineRepresentation::kBit:
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index d15a633257..4bd4dc18fe 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -456,6 +456,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
case IrOpcode::kNumberConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
+ case IrOpcode::kDelayedStringConstant:
return g->UseImmediate(input);
case IrOpcode::kHeapConstant: {
if (!CanBeTaggedPointer(rep)) {
@@ -470,9 +471,9 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
}
Handle<HeapObject> constant = HeapConstantOf(input->op());
- Heap::RootListIndex root_index;
+ RootIndex root_index;
if (isolate->heap()->IsRootHandle(constant, &root_index) &&
- root_index == Heap::kOptimizedOutRootIndex) {
+ root_index == RootIndex::kOptimizedOut) {
// For an optimized-out object we return an invalid instruction
// operand, so that we take the fast path for optimized-out values.
return InstructionOperand();
@@ -1081,6 +1082,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
std::reverse(instructions_.begin() + instruction_start,
instructions_.end());
if (!node) return true;
+ if (!source_positions_) return true;
SourcePosition source_position = source_positions_->GetSourcePosition(node);
if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
sequence()->SetSourcePosition(instructions_[instruction_start],
@@ -1293,6 +1295,8 @@ void InstructionSelector::VisitNode(Node* node) {
if (!IsSmiDouble(value)) MarkAsReference(node);
return VisitConstant(node);
}
+ case IrOpcode::kDelayedStringConstant:
+ return MarkAsReference(node), VisitConstant(node);
case IrOpcode::kCall:
return VisitCall(node);
case IrOpcode::kCallWithCallerSavedRegisters:
@@ -1467,10 +1471,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
+ case IrOpcode::kChangeInt64ToFloat64:
+ return MarkAsFloat64(node), VisitChangeInt64ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
case IrOpcode::kChangeFloat64ToInt32:
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
+ case IrOpcode::kChangeFloat64ToInt64:
+ return MarkAsWord64(node), VisitChangeFloat64ToInt64(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
case IrOpcode::kChangeFloat64ToUint64:
@@ -1747,21 +1755,6 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE
-#define ATOMIC_CASE(name) \
- case IrOpcode::kWord64AtomicNarrow##name: { \
- MachineType type = AtomicOpType(node->op()); \
- MarkAsRepresentation(type.representation(), node); \
- MarkPairProjectionsAsWord32(node); \
- return VisitWord64AtomicNarrow##name(node); \
- }
- ATOMIC_CASE(Add)
- ATOMIC_CASE(Sub)
- ATOMIC_CASE(And)
- ATOMIC_CASE(Or)
- ATOMIC_CASE(Xor)
- ATOMIC_CASE(Exchange)
- ATOMIC_CASE(CompareExchange)
-#undef ATOMIC_CASE
case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node);
case IrOpcode::kProtectedLoad: {
@@ -2299,11 +2292,18 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
UNIMPLEMENTED();
}
@@ -2389,7 +2389,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
UNIMPLEMENTED();
}
@@ -2425,49 +2425,10 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
-void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowSub(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowAnd(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2491,65 +2452,11 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
+ // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- UNIMPLEMENTED();
-}
-
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
@@ -2559,18 +2466,6 @@ void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
@@ -2807,7 +2702,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
int first_unused_stack_slot =
- (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
+ (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? true : false) +
stack_param_delta;
buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 83ed28fb53..04a2bd9581 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -593,6 +593,13 @@ Handle<Code> Constant::ToCode() const {
return value;
}
+const StringConstantBase* Constant::ToDelayedStringConstant() const {
+ DCHECK_EQ(kDelayedStringConstant, type());
+ const StringConstantBase* value =
+ bit_cast<StringConstantBase*>(static_cast<intptr_t>(value_));
+ return value;
+}
+
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:
@@ -609,6 +616,9 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
return os << Brief(*constant.ToHeapObject());
case Constant::kRpoNumber:
return os << "RPO" << constant.ToRpoNumber().ToInt();
+ case Constant::kDelayedStringConstant:
+ return os << "DelayedStringConstant: "
+ << constant.ToDelayedStringConstant();
}
UNREACHABLE();
}
@@ -942,7 +952,7 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
DCHECK_IMPLIES(representations_[virtual_register] != rep,
representations_[virtual_register] == DefaultRepresentation());
representations_[virtual_register] = rep;
- representation_mask_ |= 1 << static_cast<int>(rep);
+ representation_mask_ |= RepresentationBit(rep);
}
int InstructionSequence::AddDeoptimizationEntry(
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 1991e309d3..39d083c2de 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1039,7 +1039,8 @@ class V8_EXPORT_PRIVATE Constant final {
kFloat64,
kExternalReference,
kHeapObject,
- kRpoNumber
+ kRpoNumber,
+ kDelayedStringConstant
};
explicit Constant(int32_t v);
@@ -1047,10 +1048,12 @@ class V8_EXPORT_PRIVATE Constant final {
explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
explicit Constant(ExternalReference ref)
- : type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
+ : type_(kExternalReference), value_(bit_cast<intptr_t>(ref.address())) {}
explicit Constant(Handle<HeapObject> obj)
: type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
+ explicit Constant(const StringConstantBase* str)
+ : type_(kDelayedStringConstant), value_(bit_cast<intptr_t>(str)) {}
explicit Constant(RelocatablePtrConstantInfo info);
Type type() const { return type_; }
@@ -1090,7 +1093,7 @@ class V8_EXPORT_PRIVATE Constant final {
ExternalReference ToExternalReference() const {
DCHECK_EQ(kExternalReference, type());
- return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
+ return ExternalReference::FromRawAddress(static_cast<Address>(value_));
}
RpoNumber ToRpoNumber() const {
@@ -1100,6 +1103,7 @@ class V8_EXPORT_PRIVATE Constant final {
Handle<HeapObject> ToHeapObject() const;
Handle<Code> ToCode() const;
+ const StringConstantBase* ToDelayedStringConstant() const;
private:
Type type_;
@@ -1293,7 +1297,8 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* outer_state() const { return outer_state_; }
bool HasContext() const {
return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
- type_ == FrameStateType::kBuiltinContinuation;
+ type_ == FrameStateType::kBuiltinContinuation ||
+ type_ == FrameStateType::kConstructStub;
}
size_t GetSize() const;
@@ -1321,7 +1326,7 @@ class FrameStateDescriptor : public ZoneObject {
// frame state descriptor that we have to go back to.
class DeoptimizationEntry final {
public:
- DeoptimizationEntry() {}
+ DeoptimizationEntry() = default;
DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
DeoptimizeReason reason, VectorSlotPair const& feedback)
: descriptor_(descriptor),
@@ -1520,7 +1525,6 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
MachineRepresentation GetRepresentation(int virtual_register) const;
void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
- int representation_mask() const { return representation_mask_; }
bool IsReference(int virtual_register) const {
return CanBeTaggedPointer(GetRepresentation(virtual_register));
@@ -1528,6 +1532,14 @@ class V8_EXPORT_PRIVATE InstructionSequence final
bool IsFP(int virtual_register) const {
return IsFloatingPoint(GetRepresentation(virtual_register));
}
+ int representation_mask() const { return representation_mask_; }
+ bool HasFPVirtualRegisters() const {
+ constexpr int kFPRepMask =
+ RepresentationBit(MachineRepresentation::kFloat32) |
+ RepresentationBit(MachineRepresentation::kFloat64) |
+ RepresentationBit(MachineRepresentation::kSimd128);
+ return (representation_mask() & kFPRepMask) != 0;
+ }
Instruction* GetBlockStart(RpoNumber rpo) const;
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 8066ce5dca..41a5098081 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -127,9 +127,10 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
}
void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
- DefaultLowering(node, true);
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, op);
- ReplaceNodeWithProjections(node);
+ ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
// static
@@ -915,8 +916,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (type == MachineType::Uint64()) { \
LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \
} else { \
- LowerWord64AtomicNarrowOp(node, \
- machine()->Word64AtomicNarrow##name(type)); \
+ LowerWord64AtomicNarrowOp(node, machine()->Word32Atomic##name(type)); \
} \
break; \
}
@@ -940,8 +940,15 @@ void Int64Lowering::LowerNode(Node* node) {
machine()->Word32AtomicPairCompareExchange());
ReplaceNodeWithProjections(node);
} else {
- LowerWord64AtomicNarrowOp(
- node, machine()->Word64AtomicNarrowCompareExchange(type));
+ DCHECK(type == MachineType::Uint32() || type == MachineType::Uint16() ||
+ type == MachineType::Uint8());
+ Node* old_value = node->InputAt(2);
+ node->ReplaceInput(2, GetReplacementLow(old_value));
+ Node* new_value = node->InputAt(3);
+ node->ReplaceInput(3, GetReplacementLow(new_value));
+ NodeProperties::ChangeOp(node,
+ machine()->Word32AtomicCompareExchange(type));
+ ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index a06f4490a6..5b04731a64 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -25,6 +25,7 @@
#include "src/objects/arguments-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-objects.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -865,7 +866,8 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kGetProperty);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0,
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
vtrue = etrue = if_true =
@@ -2160,12 +2162,8 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
Node* callback_value) {
Node* boolean_result =
graph()->NewNode(simplified()->ToBoolean(), callback_value);
-
- Node* check_boolean_result =
- graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
- jsgraph()->TrueConstant());
Node* boolean_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check_boolean_result, *control);
+ boolean_result, *control);
Node* if_true = graph()->NewNode(common()->IfTrue(), boolean_branch);
Node* etrue = *effect;
@@ -2465,11 +2463,8 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
{
Node* boolean_result =
graph()->NewNode(simplified()->ToBoolean(), callback_value);
- Node* check_boolean_result =
- graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
- jsgraph()->TrueConstant());
Node* boolean_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check_boolean_result, control);
+ boolean_result, control);
if_false_callback = graph()->NewNode(common()->IfFalse(), boolean_branch);
efalse_callback = effect;
@@ -2585,7 +2580,8 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
: GetCallableForArrayIncludes(receiver_map->elements_kind(),
isolate());
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kEliminatable);
// The stub expects the following arguments: the receiver array, its elements,
// the search_element, the array length, and the index to start searching
@@ -2821,11 +2817,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
{
Node* boolean_result =
graph()->NewNode(simplified()->ToBoolean(), callback_value);
- Node* check_boolean_result =
- graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
- jsgraph()->TrueConstant());
Node* boolean_branch = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_boolean_result, control);
+ common()->Branch(BranchHint::kFalse), boolean_result, control);
if_true_callback = graph()->NewNode(common()->IfTrue(), boolean_branch);
etrue_callback = effect;
@@ -3026,7 +3019,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
if (access.offset == JSArray::kLengthOffset) {
// Ignore uses for arguments#length.
STATIC_ASSERT(JSArray::kLengthOffset ==
- JSArgumentsObject::kLengthOffset);
+ JSArgumentsObjectWithLength::kLengthOffset);
continue;
} else if (access.offset == JSObject::kElementsOffset) {
// Ignore safe uses for arguments#elements.
@@ -3332,7 +3325,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
}
HeapObject* heap_object;
- if (nexus.GetFeedback()->ToWeakHeapObject(&heap_object)) {
+ if (nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object)) {
Handle<HeapObject> feedback(heap_object, isolate());
// Check if we want to use CallIC feedback here.
if (!ShouldUseCallICFeedback(target)) return NoChange();
@@ -3468,53 +3461,53 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
node, JS_DATA_VIEW_TYPE,
AccessBuilder::ForJSArrayBufferViewByteOffset());
case Builtins::kDataViewPrototypeGetUint8:
- return ReduceDataViewPrototypeGet(node,
- ExternalArrayType::kExternalUint8Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalUint8Array);
case Builtins::kDataViewPrototypeGetInt8:
- return ReduceDataViewPrototypeGet(node,
- ExternalArrayType::kExternalInt8Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalInt8Array);
case Builtins::kDataViewPrototypeGetUint16:
- return ReduceDataViewPrototypeGet(
- node, ExternalArrayType::kExternalUint16Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalUint16Array);
case Builtins::kDataViewPrototypeGetInt16:
- return ReduceDataViewPrototypeGet(node,
- ExternalArrayType::kExternalInt16Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalInt16Array);
case Builtins::kDataViewPrototypeGetUint32:
- return ReduceDataViewPrototypeGet(
- node, ExternalArrayType::kExternalUint32Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalUint32Array);
case Builtins::kDataViewPrototypeGetInt32:
- return ReduceDataViewPrototypeGet(node,
- ExternalArrayType::kExternalInt32Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalInt32Array);
case Builtins::kDataViewPrototypeGetFloat32:
- return ReduceDataViewPrototypeGet(
- node, ExternalArrayType::kExternalFloat32Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalFloat32Array);
case Builtins::kDataViewPrototypeGetFloat64:
- return ReduceDataViewPrototypeGet(
- node, ExternalArrayType::kExternalFloat64Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kGet,
+ ExternalArrayType::kExternalFloat64Array);
case Builtins::kDataViewPrototypeSetUint8:
- return ReduceDataViewPrototypeSet(node,
- ExternalArrayType::kExternalUint8Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalUint8Array);
case Builtins::kDataViewPrototypeSetInt8:
- return ReduceDataViewPrototypeSet(node,
- ExternalArrayType::kExternalInt8Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalInt8Array);
case Builtins::kDataViewPrototypeSetUint16:
- return ReduceDataViewPrototypeSet(
- node, ExternalArrayType::kExternalUint16Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalUint16Array);
case Builtins::kDataViewPrototypeSetInt16:
- return ReduceDataViewPrototypeSet(node,
- ExternalArrayType::kExternalInt16Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalInt16Array);
case Builtins::kDataViewPrototypeSetUint32:
- return ReduceDataViewPrototypeSet(
- node, ExternalArrayType::kExternalUint32Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalUint32Array);
case Builtins::kDataViewPrototypeSetInt32:
- return ReduceDataViewPrototypeSet(node,
- ExternalArrayType::kExternalInt32Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalInt32Array);
case Builtins::kDataViewPrototypeSetFloat32:
- return ReduceDataViewPrototypeSet(
- node, ExternalArrayType::kExternalFloat32Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalFloat32Array);
case Builtins::kDataViewPrototypeSetFloat64:
- return ReduceDataViewPrototypeSet(
- node, ExternalArrayType::kExternalFloat64Array);
+ return ReduceDataViewAccess(node, DataViewAccess::kSet,
+ ExternalArrayType::kExternalFloat64Array);
case Builtins::kTypedArrayPrototypeByteLength:
return ReduceArrayBufferViewAccessor(
node, JS_TYPED_ARRAY_TYPE,
@@ -3758,7 +3751,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
HeapObject* feedback_object;
- if (nexus.GetFeedback()->ToStrongHeapObject(&feedback_object) &&
+ if (nexus.GetFeedback()->GetHeapObjectIfStrong(&feedback_object) &&
feedback_object->IsAllocationSite()) {
// The feedback is an AllocationSite, which means we have called the
// Array function and collected transition (and pretenuring) feedback
@@ -3787,7 +3780,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
NodeProperties::ReplaceValueInput(node, array_function, 1);
NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
return Changed(node);
- } else if (nexus.GetFeedback()->ToWeakHeapObject(&feedback_object) &&
+ } else if (nexus.GetFeedback()->GetHeapObjectIfWeak(&feedback_object) &&
!HeapObjectMatcher(new_target).HasValue()) {
Handle<HeapObject> object(feedback_object, isolate());
if (object->IsConstructor()) {
@@ -4835,7 +4828,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCloneFastJSArray);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kNoThrow | Operator::kNoDeopt);
// Calls to Builtins::kCloneFastJSArray produce COW arrays
@@ -4993,18 +4987,22 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
dependencies()->DependOnProtector(PropertyCellRef(
js_heap_broker(), factory()->array_buffer_neutering_protector()));
} else {
- // Deoptimize if the array buffer was neutered.
+ // Bail out if the {iterated_object}s JSArrayBuffer was neutered.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
iterated_object, effect, control);
-
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- check = graph()->NewNode(simplified()->BooleanNot(), check);
- // TODO(bmeurer): Pass p.feedback(), or better introduce
- // CheckArrayBufferNotNeutered?
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->ZeroConstant());
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
+ p.feedback()),
check, effect, control);
}
}
@@ -5341,9 +5339,6 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* input = NodeProperties::GetValueInput(node, 2);
- input = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- input, effect, control);
-
input = effect =
graph()->NewNode(simplified()->CheckBounds(p.feedback()), input,
jsgraph()->Constant(0x10FFFF + 1), effect, control);
@@ -5452,9 +5447,9 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
+
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
Node* receiver = effect =
graph()->NewNode(simplified()->CheckString(p.feedback()),
NodeProperties::GetValueInput(node, 1), effect, control);
@@ -5463,26 +5458,22 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(
ReplaceWithValue(node, receiver, effect, control);
return Replace(receiver);
}
+
Node* argument = effect =
graph()->NewNode(simplified()->CheckString(p.feedback()),
NodeProperties::GetValueInput(node, 2), effect, control);
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+ Node* argument_length =
+ graph()->NewNode(simplified()->StringLength(), argument);
+ Node* length = graph()->NewNode(simplified()->NumberAdd(), receiver_length,
+ argument_length);
+ length = effect = graph()->NewNode(
+ simplified()->CheckBounds(p.feedback()), length,
+ jsgraph()->Constant(String::kMaxLength + 1), effect, control);
- Callable const callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState,
- Operator::kNoDeopt | Operator::kNoWrite);
-
- // TODO(turbofan): Massage the FrameState of the {node} here once we
- // have an artificial builtin frame type, so that it looks like the
- // exception from StringAdd overflow came from String.prototype.concat
- // builtin instead of the calling function.
- Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
-
- Node* value = effect = control = graph()->NewNode(
- common()->Call(call_descriptor), jsgraph()->HeapConstant(callable.code()),
- receiver, argument, context, outer_frame_state, effect, control);
+ Node* value = graph()->NewNode(simplified()->StringConcat(), length, receiver,
+ argument);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -5524,7 +5515,7 @@ Reduction JSCallReducer::ReduceAsyncFunctionPromiseRelease(Node* node) {
Node* JSCallReducer::CreateArtificialFrameState(
Node* node, Node* outer_frame_state, int parameter_count,
BailoutId bailout_id, FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared) {
+ Handle<SharedFunctionInfo> shared, Node* context) {
const FrameStateFunctionInfo* state_info =
common()->CreateFrameStateFunctionInfo(frame_state_type,
parameter_count + 1, 0, shared);
@@ -5534,6 +5525,7 @@ Node* JSCallReducer::CreateArtificialFrameState(
const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
Node* node0 = graph()->NewNode(op0);
std::vector<Node*> params;
+ params.reserve(parameter_count + 1);
for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
params.push_back(node->InputAt(1 + parameter));
}
@@ -5541,9 +5533,11 @@ Node* JSCallReducer::CreateArtificialFrameState(
static_cast<int>(params.size()), SparseInputMask::Dense());
Node* params_node = graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
- return graph()->NewNode(op, params_node, node0, node0,
- jsgraph()->UndefinedConstant(), node->InputAt(0),
- outer_frame_state);
+ if (!context) {
+ context = jsgraph()->UndefinedConstant();
+ }
+ return graph()->NewNode(op, params_node, node0, node0, context,
+ node->InputAt(0), outer_frame_state);
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
@@ -5580,7 +5574,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
DCHECK_EQ(1, promise_shared->internal_formal_parameter_count());
Node* constructor_frame_state = CreateArtificialFrameState(
node, outer_frame_state, 1, BailoutId::ConstructStubInvoke(),
- FrameStateType::kConstructStub, promise_shared);
+ FrameStateType::kConstructStub, promise_shared, context);
// The deopt continuation of this frame state is never called; the frame state
// is only necessary to obtain the right stack trace.
@@ -6150,7 +6144,7 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
node, frame_state, arity, BailoutId::ConstructStubInvoke(),
- FrameStateType::kConstructStub, shared);
+ FrameStateType::kConstructStub, shared, context);
// This continuation just returns the newly created JSTypedArray. We
// pass the_hole as the receiver, just like the builtin construct stub
@@ -6497,8 +6491,9 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
- Operator::kEliminatable);
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kEliminatable);
index = effect =
graph()->NewNode(common()->Call(call_descriptor),
jsgraph()->HeapConstant(callable.code()), table, index,
@@ -6720,6 +6715,7 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+
if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
instance_type)) {
// Load the {receiver}s field.
@@ -6733,17 +6729,28 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
dependencies()->DependOnProtector(PropertyCellRef(
js_heap_broker(), factory()->array_buffer_neutering_protector()));
} else {
- // Check if the {receiver}s buffer was neutered.
+ // Check whether {receiver}s JSArrayBuffer was neutered.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
-
- // Default to zero if the {receiver}s buffer was neutered.
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->ZeroConstant());
+
+ // TODO(turbofan): Ideally we would bail out here if the {receiver}s
+ // JSArrayBuffer was neutered, but there's no way to guard against
+ // deoptimization loops right now, since the JSCall {node} is usually
+ // created from a LOAD_IC inlining, and so there's no CALL_IC slot
+ // from which we could use the speculation bit.
value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), value);
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ check, value, jsgraph()->ZeroConstant());
}
ReplaceWithValue(node, value, effect, control);
@@ -6767,160 +6774,33 @@ uint32_t ExternalArrayElementSize(const ExternalArrayType element_type) {
}
} // namespace
-Reduction JSCallReducer::ReduceDataViewPrototypeGet(
- Node* node, ExternalArrayType element_type) {
- uint32_t const element_size = ExternalArrayElementSize(element_type);
+Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
+ ExternalArrayType element_type) {
+ size_t const element_size = ExternalArrayElementSize(element_type);
CallParameters const& p = CallParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* receiver = NodeProperties::GetValueInput(node, 1);
-
- if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
- return NoChange();
- }
-
Node* offset = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
: jsgraph()->ZeroConstant();
-
- Node* is_little_endian = node->op()->ValueInputCount() > 3
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->FalseConstant();
-
- // Only do stuff if the {receiver} is really a DataView.
- if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
- JS_DATA_VIEW_TYPE)) {
- // Check that the {offset} is within range for the {receiver}.
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
- // We only deal with DataViews here whose [[ByteLength]] is at least
- // {element_size} and less than 2^31-{element_size}.
- Handle<JSDataView> dataview = Handle<JSDataView>::cast(m.Value());
- if (dataview->byte_length()->Number() < element_size ||
- dataview->byte_length()->Number() - element_size > kMaxInt) {
- return NoChange();
- }
-
- // The {receiver}s [[ByteOffset]] must be within Unsigned31 range.
- if (dataview->byte_offset()->Number() > kMaxInt) {
- return NoChange();
- }
-
- // Check that the {offset} is within range of the {byte_length}.
- Node* byte_length = jsgraph()->Constant(
- dataview->byte_length()->Number() - (element_size - 1));
- offset = effect =
- graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
- byte_length, effect, control);
-
- // Add the [[ByteOffset]] to compute the effective offset.
- Node* byte_offset =
- jsgraph()->Constant(dataview->byte_offset()->Number());
- offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
- } else {
- // We only deal with DataViews here that have Smi [[ByteLength]]s.
- Node* byte_length = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteLength()),
- receiver, effect, control);
- byte_length = effect = graph()->NewNode(
- simplified()->CheckSmi(p.feedback()), byte_length, effect, control);
-
- // Check that the {offset} is within range of the {byte_length}.
- offset = effect =
- graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
- byte_length, effect, control);
-
- if (element_size > 0) {
- // For non-byte accesses we also need to check that the {offset}
- // plus the {element_size}-1 fits within the given {byte_length}.
- Node* end_offset =
- graph()->NewNode(simplified()->NumberAdd(), offset,
- jsgraph()->Constant(element_size - 1));
- effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
- end_offset, byte_length, effect, control);
- }
-
- // The {receiver}s [[ByteOffset]] also needs to be a (positive) Smi.
- Node* byte_offset = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteOffset()),
- receiver, effect, control);
- byte_offset = effect = graph()->NewNode(
- simplified()->CheckSmi(p.feedback()), byte_offset, effect, control);
-
- // Compute the buffer index at which we'll read.
- offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
- }
-
- // Coerce {is_little_endian} to boolean.
- is_little_endian =
- graph()->NewNode(simplified()->ToBoolean(), is_little_endian);
-
- // Get the underlying buffer and check that it has not been neutered.
- Node* buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
-
- if (isolate()->IsArrayBufferNeuteringIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
- dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->array_buffer_neutering_protector()));
- } else {
- // If the buffer was neutered, deopt and let the unoptimized code throw.
- Node* check_neutered = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- check_neutered =
- graph()->NewNode(simplified()->BooleanNot(), check_neutered);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
- p.feedback()),
- check_neutered, effect, control);
- }
-
- // Get the buffer's backing store.
- Node* backing_store = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBackingStore()),
- buffer, effect, control);
-
- // Perform the load.
- Node* value = effect = graph()->NewNode(
- simplified()->LoadDataViewElement(element_type), buffer, backing_store,
- offset, is_little_endian, effect, control);
-
- // Continue on the regular path.
- ReplaceWithValue(node, value, effect, control);
- return Changed(value);
- }
-
- return NoChange();
-}
-
-Reduction JSCallReducer::ReduceDataViewPrototypeSet(
- Node* node, ExternalArrayType element_type) {
- uint32_t const element_size = ExternalArrayElementSize(element_type);
- CallParameters const& p = CallParametersOf(node->op());
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* value = (access == DataViewAccess::kGet)
+ ? nullptr
+ : (node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->ZeroConstant());
+ Node* is_little_endian = (access == DataViewAccess::kGet)
+ ? (node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->FalseConstant())
+ : (node->op()->ValueInputCount() > 4
+ ? NodeProperties::GetValueInput(node, 4)
+ : jsgraph()->FalseConstant());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- Node* offset = node->op()->ValueInputCount() > 2
- ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->ZeroConstant();
-
- Node* value = node->op()->ValueInputCount() > 3
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->ZeroConstant();
-
- Node* is_little_endian = node->op()->ValueInputCount() > 4
- ? NodeProperties::GetValueInput(node, 4)
- : jsgraph()->FalseConstant();
-
// Only do stuff if the {receiver} is really a DataView.
if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
JS_DATA_VIEW_TYPE)) {
@@ -6930,26 +6810,25 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
// We only deal with DataViews here whose [[ByteLength]] is at least
// {element_size} and less than 2^31-{element_size}.
Handle<JSDataView> dataview = Handle<JSDataView>::cast(m.Value());
- if (dataview->byte_length()->Number() < element_size ||
- dataview->byte_length()->Number() - element_size > kMaxInt) {
+ if (dataview->byte_length() < element_size ||
+ dataview->byte_length() - element_size > kMaxInt) {
return NoChange();
}
// The {receiver}s [[ByteOffset]] must be within Unsigned31 range.
- if (dataview->byte_offset()->Number() > kMaxInt) {
+ if (dataview->byte_offset() > kMaxInt) {
return NoChange();
}
// Check that the {offset} is within range of the {byte_length}.
- Node* byte_length = jsgraph()->Constant(
- dataview->byte_length()->Number() - (element_size - 1));
+ Node* byte_length =
+ jsgraph()->Constant(dataview->byte_length() - (element_size - 1));
offset = effect =
graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
byte_length, effect, control);
// Add the [[ByteOffset]] to compute the effective offset.
- Node* byte_offset =
- jsgraph()->Constant(dataview->byte_offset()->Number());
+ Node* byte_offset = jsgraph()->Constant(dataview->byte_offset());
offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
} else {
// We only deal with DataViews here that have Smi [[ByteLength]]s.
@@ -6992,10 +6871,12 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
graph()->NewNode(simplified()->ToBoolean(), is_little_endian);
// Coerce {value} to Number.
- value = effect = graph()->NewNode(
- simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
- p.feedback()),
- value, effect, control);
+ if (access == DataViewAccess::kSet) {
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ value, effect, control);
+ }
// Get the underlying buffer and check that it has not been neutered.
Node* buffer = effect = graph()->NewNode(
@@ -7008,15 +6889,20 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
dependencies()->DependOnProtector(PropertyCellRef(
js_heap_broker(), factory()->array_buffer_neutering_protector()));
} else {
- // If the buffer was neutered, deopt and let the unoptimized code throw.
- Node* check_neutered = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- check_neutered =
- graph()->NewNode(simplified()->BooleanNot(), check_neutered);
+ // Bail out if the {buffer} was neutered.
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->ZeroConstant());
effect = graph()->NewNode(
simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
p.feedback()),
- check_neutered, effect, control);
+ check, effect, control);
}
// Get the buffer's backing store.
@@ -7024,12 +6910,21 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferBackingStore()),
buffer, effect, control);
- // Perform the store.
- effect = graph()->NewNode(simplified()->StoreDataViewElement(element_type),
- buffer, backing_store, offset, value,
- is_little_endian, effect, control);
-
- Node* value = jsgraph()->UndefinedConstant();
+ switch (access) {
+ case DataViewAccess::kGet:
+ // Perform the load.
+ value = effect = graph()->NewNode(
+ simplified()->LoadDataViewElement(element_type), buffer,
+ backing_store, offset, is_little_endian, effect, control);
+ break;
+ case DataViewAccess::kSet:
+ // Perform the store.
+ effect = graph()->NewNode(
+ simplified()->StoreDataViewElement(element_type), buffer,
+ backing_store, offset, value, is_little_endian, effect, control);
+ value = jsgraph()->UndefinedConstant();
+ break;
+ }
// Continue on the regular path.
ReplaceWithValue(node, value, effect, control);
@@ -7242,39 +7137,30 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* value = p.arity() < 3 ? jsgraph()->ZeroConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
- if (p.arity() <= 2) {
- ReplaceWithValue(node, jsgraph()->ZeroConstant());
- }
-
- // We don't have a new.target argument, so we can convert to number,
- // but must also convert BigInts.
- if (p.arity() == 3) {
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
- Node* value = NodeProperties::GetValueInput(node, 2);
- Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
- Handle<SharedFunctionInfo> number_constructor(
- handle(native_context()->number_function()->shared(), isolate()));
-
- const std::vector<Node*> checkpoint_parameters({
- jsgraph()->UndefinedConstant(), /* receiver */
- });
- int checkpoint_parameters_size =
- static_cast<int>(checkpoint_parameters.size());
-
- Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), number_constructor,
- Builtins::kGenericConstructorLazyDeoptContinuation, target, context,
- checkpoint_parameters.data(), checkpoint_parameters_size,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
-
- NodeProperties::ReplaceValueInputs(node, value);
- NodeProperties::ChangeOp(node, javascript()->ToNumberConvertBigInt());
- NodeProperties::ReplaceFrameStateInput(node, frame_state);
- return Changed(node);
- }
- return NoChange();
+ // Create the artificial frame state in the middle of the Number constructor.
+ Handle<SharedFunctionInfo> shared_info(
+ handle(native_context()->number_function()->shared(), isolate()));
+ Node* stack_parameters[] = {receiver};
+ int stack_parameter_count = arraysize(stack_parameters);
+ Node* continuation_frame_state =
+ CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared_info,
+ Builtins::kGenericConstructorLazyDeoptContinuation, target, context,
+ stack_parameters, stack_parameter_count, frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ // Convert the {value} to a Number.
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToNumberConvertBigInt());
+ NodeProperties::ReplaceFrameStateInput(node, continuation_frame_state);
+ return Changed(node);
}
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index e04870ed2f..6683a0b18e 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -15,6 +15,7 @@ namespace internal {
// Forward declarations.
class Factory;
+class JSGlobalProxy;
class VectorSlotPair;
namespace compiler {
@@ -182,10 +183,9 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
InstanceType instance_type,
FieldAccess const& access);
- Reduction ReduceDataViewPrototypeGet(Node* node,
- ExternalArrayType element_type);
- Reduction ReduceDataViewPrototypeSet(Node* node,
- ExternalArrayType element_type);
+ enum class DataViewAccess { kGet, kSet };
+ Reduction ReduceDataViewAccess(Node* node, DataViewAccess access,
+ ExternalArrayType element_type);
Reduction ReduceDatePrototypeGetTime(Node* node);
Reduction ReduceDateNow(Node* node);
@@ -230,7 +230,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared);
+ Handle<SharedFunctionInfo> shared,
+ Node* context = nullptr);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index ef2297c9d6..1b2f3c3a7c 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -144,8 +144,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
+ concrete.Serialize(); // TODO(neis): Remove later.
for (; depth > 0; --depth) {
- concrete = concrete.previous().value();
+ concrete = concrete.previous();
}
if (!access.immutable()) {
@@ -164,7 +165,7 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// We must be conservative and check if the value in the slot is currently
// the hole or undefined. Only if it is neither of these, can we be sure
// that it won't change anymore.
- OddballType oddball_type = maybe_value->oddball_type();
+ OddballType oddball_type = maybe_value->AsHeapObject().map().oddball_type();
if (oddball_type == OddballType::kUndefined ||
oddball_type == OddballType::kHole) {
maybe_value.reset();
@@ -205,8 +206,9 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
+ concrete.Serialize(); // TODO(neis): Remove later.
for (; depth > 0; --depth) {
- concrete = concrete.previous().value();
+ concrete = concrete.previous();
}
return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 6484e05061..3848e1f814 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -129,10 +129,10 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
JSFunctionRef constructor =
target_type.AsHeapConstant()->Ref().AsJSFunction();
- if (!constructor.IsConstructor()) return NoChange();
+ if (!constructor.map().is_constructor()) return NoChange();
JSFunctionRef original_constructor =
new_target_type.AsHeapConstant()->Ref().AsJSFunction();
- if (!original_constructor.IsConstructor()) return NoChange();
+ if (!original_constructor.map().is_constructor()) return NoChange();
// Check if we can inline the allocation.
if (!IsAllocationInlineable(constructor, original_constructor)) {
@@ -198,8 +198,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
has_aliased_arguments
- ? native_context_ref().fast_aliased_arguments_map()
- : native_context_ref().sloppy_arguments_map());
+ ? native_context().fast_aliased_arguments_map()
+ : native_context().sloppy_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -228,7 +228,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
arguments_frame, arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map =
- jsgraph()->Constant(native_context_ref().strict_arguments_map());
+ jsgraph()->Constant(native_context().strict_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -258,7 +258,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
arguments_frame, rest_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
- native_context_ref().js_array_packed_elements_map());
+ native_context().js_array_packed_elements_map());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -302,9 +302,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
- has_aliased_arguments
- ? native_context_ref().fast_aliased_arguments_map()
- : native_context_ref().sloppy_arguments_map());
+ has_aliased_arguments ? native_context().fast_aliased_arguments_map()
+ : native_context().sloppy_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -340,7 +339,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
Node* const arguments_map =
- jsgraph()->Constant(native_context_ref().strict_arguments_map());
+ jsgraph()->Constant(native_context().strict_arguments_map());
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -376,8 +375,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
AllocateRestArguments(effect, control, args_state, start_index);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the JSArray object map.
- Node* const jsarray_map = jsgraph()->Constant(
- native_context_ref().js_array_packed_elements_map());
+ Node* const jsarray_map =
+ jsgraph()->Constant(native_context().js_array_packed_elements_map());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -476,7 +475,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Constructs an array with a variable {length} when no upper bound
// is known for the capacity.
Reduction JSCreateLowering::ReduceNewArray(
- Node* node, Node* length, MapRef initial_map, PretenureFlag pretenure,
+ Node* node, Node* length, MapRef initial_map, ElementsKind elements_kind,
+ PretenureFlag pretenure,
const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
@@ -485,8 +485,8 @@ Reduction JSCreateLowering::ReduceNewArray(
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(
- GetHoleyElementsKind(initial_map.elements_kind())));
+ initial_map,
+ initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind)));
// Check that the {limit} is an unsigned integer in the valid range.
// This has to be kept in sync with src/runtime/runtime-array.cc,
@@ -525,7 +525,7 @@ Reduction JSCreateLowering::ReduceNewArray(
// upper bound is known for the {capacity}.
Reduction JSCreateLowering::ReduceNewArray(
Node* node, Node* length, int capacity, MapRef initial_map,
- PretenureFlag pretenure,
+ ElementsKind elements_kind, PretenureFlag pretenure,
const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK(node->opcode() == IrOpcode::kJSCreateArray ||
node->opcode() == IrOpcode::kJSCreateEmptyLiteralArray);
@@ -533,12 +533,11 @@ Reduction JSCreateLowering::ReduceNewArray(
Node* control = NodeProperties::GetControlInput(node);
// Determine the appropriate elements kind.
- ElementsKind elements_kind = initial_map.elements_kind();
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
}
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
DCHECK(IsFastElementsKind(elements_kind));
// Setup elements and properties.
@@ -570,15 +569,16 @@ Reduction JSCreateLowering::ReduceNewArray(
Reduction JSCreateLowering::ReduceNewArray(
Node* node, std::vector<Node*> values, MapRef initial_map,
- PretenureFlag pretenure,
+ ElementsKind elements_kind, PretenureFlag pretenure,
const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Determine the appropriate elements kind.
- ElementsKind elements_kind = initial_map.elements_kind();
DCHECK(IsFastElementsKind(elements_kind));
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
@@ -625,68 +625,6 @@ Reduction JSCreateLowering::ReduceNewArray(
return Changed(node);
}
-Reduction JSCreateLowering::ReduceNewArrayToStubCall(
- Node* node, base::Optional<AllocationSiteRef> site) {
- CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
- int const arity = static_cast<int>(p.arity());
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* new_target = NodeProperties::GetValueInput(node, 1);
- Type new_target_type = NodeProperties::GetType(new_target);
- Node* type_info =
- site ? jsgraph()->Constant(*site) : jsgraph()->UndefinedConstant();
-
- ElementsKind elements_kind =
- site ? site->GetElementsKind() : GetInitialFastElementsKind();
- AllocationSiteOverrideMode override_mode =
- (!site || AllocationSite::ShouldTrack(elements_kind))
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- // The Array constructor can only trigger an observable side-effect
- // if the new.target may be a proxy.
- Operator::Properties const properties =
- (new_target != target || new_target_type.Maybe(Type::Proxy()))
- ? Operator::kNoDeopt
- : Operator::kNoDeopt | Operator::kNoWrite;
-
- if (arity == 0) {
- Callable callable = CodeFactory::ArrayNoArgumentConstructor(
- isolate(), elements_kind, override_mode);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), arity + 1,
- CallDescriptor::kNeedsFrameState, properties);
- node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, type_info);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else if (arity == 1) {
- // Require elements kind to "go holey".
- Callable callable = CodeFactory::ArraySingleArgumentConstructor(
- isolate(), GetHoleyElementsKind(elements_kind), override_mode);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), arity + 1,
- CallDescriptor::kNeedsFrameState, properties);
- node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, type_info);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- } else {
- DCHECK_GT(arity, 1);
- Handle<Code> code = BUILTIN_CODE(isolate(), ArrayNArgumentsConstructor);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), ArrayNArgumentsConstructorDescriptor{}, arity + 1,
- CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(code));
- node->InsertInput(graph()->zone(), 2, type_info);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- }
- return Changed(node);
-}
-
Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
@@ -699,7 +637,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
}
}
PretenureFlag pretenure = NOT_TENURED;
- JSFunctionRef constructor = native_context_ref().array_function();
+ JSFunctionRef constructor = native_context().array_function();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
Type new_target_type = (target == new_target)
@@ -711,8 +649,8 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
new_target_type.AsHeapConstant()->Ref().IsJSFunction()) {
JSFunctionRef original_constructor =
new_target_type.AsHeapConstant()->Ref().AsJSFunction();
- DCHECK(constructor.IsConstructor());
- DCHECK(original_constructor.IsConstructor());
+ DCHECK(constructor.map().is_constructor());
+ DCHECK(original_constructor.map().is_constructor());
// Check if we can inline the allocation.
if (IsAllocationInlineable(constructor, original_constructor)) {
@@ -726,10 +664,9 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
bool can_inline_call = false;
// Check if we have a feedback {site} on the {node}.
+ ElementsKind elements_kind = initial_map.elements_kind();
if (site_ref) {
- ElementsKind elements_kind = site_ref->GetElementsKind();
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+ elements_kind = site_ref->GetElementsKind();
can_inline_call = site_ref->CanInlineCall();
pretenure = dependencies()->DependOnPretenureMode(*site_ref);
dependencies()->DependOnElementsKind(*site_ref);
@@ -740,7 +677,8 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
if (arity == 0) {
Node* length = jsgraph()->ZeroConstant();
int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, initial_map, pretenure,
+ return ReduceNewArray(node, length, capacity, initial_map,
+ elements_kind, pretenure,
slack_tracking_prediction);
} else if (arity == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
@@ -748,26 +686,25 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
if (!length_type.Maybe(Type::Number())) {
// Handle the single argument case, where we know that the value
// cannot be a valid Array length.
- ElementsKind elements_kind = initial_map.elements_kind();
elements_kind = GetMoreGeneralElementsKind(
elements_kind, IsHoleyElementsKind(elements_kind)
? HOLEY_ELEMENTS
: PACKED_ELEMENTS);
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
return ReduceNewArray(node, std::vector<Node*>{length}, initial_map,
- pretenure, slack_tracking_prediction);
+ elements_kind, pretenure,
+ slack_tracking_prediction);
}
if (length_type.Is(Type::SignedSmall()) && length_type.Min() >= 0 &&
length_type.Max() <= kElementLoopUnrollLimit &&
length_type.Min() == length_type.Max()) {
int capacity = static_cast<int>(length_type.Max());
- return ReduceNewArray(node, length, capacity, initial_map, pretenure,
+ return ReduceNewArray(node, length, capacity, initial_map,
+ elements_kind, pretenure,
slack_tracking_prediction);
}
if (length_type.Maybe(Type::UnsignedSmall()) && can_inline_call) {
- return ReduceNewArray(node, length, initial_map, pretenure,
- slack_tracking_prediction);
+ return ReduceNewArray(node, length, initial_map, elements_kind,
+ pretenure, slack_tracking_prediction);
}
} else if (arity <= JSArray::kInitialMaxFastElementArray) {
// Gather the values to store into the newly created array.
@@ -791,7 +728,6 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
}
// Try to figure out the ideal elements kind statically.
- ElementsKind elements_kind = initial_map.elements_kind();
if (values_all_smis) {
// Smis can be stored with any elements kind.
} else if (values_all_numbers) {
@@ -812,18 +748,12 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
// we cannot inline this invocation of the Array constructor here.
return NoChange();
}
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
- return ReduceNewArray(node, values, initial_map, pretenure,
- slack_tracking_prediction);
+ return ReduceNewArray(node, values, initial_map, elements_kind,
+ pretenure, slack_tracking_prediction);
}
}
}
-
- // TODO(bmeurer): Optimize the subclassing case.
- if (target != new_target) return NoChange();
-
- return ReduceNewArrayToStubCall(node, site_ref);
+ return NoChange();
}
Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
@@ -838,7 +768,7 @@ Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(JSArrayIterator::kSize, NOT_TENURED, Type::OtherObject());
a.Store(AccessBuilder::ForMap(),
- native_context_ref().initial_array_iterator_map());
+ native_context().initial_array_iterator_map());
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
@@ -902,8 +832,8 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(JSCollectionIterator::kSize, NOT_TENURED, Type::OtherObject());
a.Store(AccessBuilder::ForMap(),
- MapForCollectionIterationKind(
- native_context_ref(), p.collection_kind(), p.iteration_kind()));
+ MapForCollectionIterationKind(native_context(), p.collection_kind(),
+ p.iteration_kind()));
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
@@ -975,7 +905,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
}
MapRef function_map =
- native_context_ref().GetFunctionMapFromIndex(shared.function_map_index());
+ native_context().GetFunctionMapFromIndex(shared.function_map_index());
DCHECK(!function_map.IsInobjectSlackTrackingInProgress());
DCHECK(!function_map.is_dictionary_map());
@@ -1025,7 +955,7 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* iterator_result_map =
- jsgraph()->Constant(native_context_ref().iterator_result_map());
+ jsgraph()->Constant(native_context().iterator_result_map());
// Emit code to allocate the JSIteratorResult instance.
AllocationBuilder a(jsgraph(), effect, graph()->start());
@@ -1047,7 +977,8 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
Node* string = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* map = jsgraph()->Constant(native_context_ref().string_iterator_map());
+ Node* map =
+ jsgraph()->Constant(native_context().initial_string_iterator_map());
// Allocate new iterator and attach the iterator to this string.
AllocationBuilder a(jsgraph(), effect, graph()->start());
a.Allocate(JSStringIterator::kSize, NOT_TENURED, Type::OtherObject());
@@ -1070,7 +1001,7 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* array_map =
- jsgraph()->Constant(native_context_ref().js_array_packed_elements_map());
+ jsgraph()->Constant(native_context().js_array_packed_elements_map());
Node* properties = jsgraph()->EmptyFixedArrayConstant();
Node* length = jsgraph()->Constant(2);
@@ -1097,7 +1028,7 @@ Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
- MapRef promise_map = native_context_ref().promise_function().initial_map();
+ MapRef promise_map = native_context().promise_function().initial_map();
AllocationBuilder a(jsgraph(), effect, graph()->start());
a.Allocate(promise_map.instance_size());
@@ -1157,14 +1088,15 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
AllocationSiteRef site = feedback.AsAllocationSite();
DCHECK(!site.PointsToLiteral());
MapRef initial_map =
- native_context_ref().GetInitialJSArrayMap(site.GetElementsKind());
+ native_context().GetInitialJSArrayMap(site.GetElementsKind());
PretenureFlag const pretenure = dependencies()->DependOnPretenureMode(site);
dependencies()->DependOnElementsKind(site);
Node* length = jsgraph()->ZeroConstant();
DCHECK(!initial_map.IsInobjectSlackTrackingInProgress());
SlackTrackingPrediction slack_tracking_prediction(
initial_map, initial_map.instance_size());
- return ReduceNewArray(node, length, 0, initial_map, pretenure,
+ return ReduceNewArray(node, length, 0, initial_map,
+ initial_map.elements_kind(), pretenure,
slack_tracking_prediction);
}
return NoChange();
@@ -1176,7 +1108,7 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Retrieve the initial map for the object.
- MapRef map = native_context_ref().object_function().initial_map();
+ MapRef map = native_context().object_function().initial_map();
DCHECK(!map.is_dictionary_map());
DCHECK(!map.IsInobjectSlackTrackingInProgress());
Node* js_object_map = jsgraph()->Constant(map);
@@ -1253,7 +1185,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- jsgraph()->HeapConstant(native_context()));
+ jsgraph()->Constant(native_context()));
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
}
@@ -1280,7 +1212,7 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- jsgraph()->HeapConstant(native_context()));
+ jsgraph()->Constant(native_context()));
RelaxControls(node);
a.FinishAndChange(node);
return Changed(node);
@@ -1303,7 +1235,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- jsgraph()->HeapConstant(native_context()));
+ jsgraph()->Constant(native_context()));
a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
exception);
RelaxControls(node);
@@ -1332,7 +1264,7 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- jsgraph()->HeapConstant(native_context()));
+ jsgraph()->Constant(native_context()));
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
}
@@ -1344,6 +1276,24 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
return NoChange();
}
+namespace {
+base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker,
+ HeapObjectRef prototype) {
+ MapRef standard_map =
+ broker->native_context().object_function().initial_map();
+ if (prototype.equals(standard_map.prototype())) {
+ return standard_map;
+ }
+ if (prototype.map().oddball_type() == OddballType::kNull) {
+ return broker->native_context().slow_object_with_null_prototype_map();
+ }
+ if (prototype.IsJSObject()) {
+ return prototype.AsJSObject().GetObjectCreateMap();
+ }
+ return base::Optional<MapRef>();
+}
+} // namespace
+
Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateObject, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1353,13 +1303,14 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
if (!prototype_type.IsHeapConstant()) return NoChange();
HeapObjectRef prototype_const = prototype_type.AsHeapConstant()->Ref();
- auto maybe_instance_map = prototype_const.TryGetObjectCreateMap();
+ auto maybe_instance_map =
+ GetObjectCreateMap(js_heap_broker(), prototype_const);
if (!maybe_instance_map) return NoChange();
MapRef instance_map = maybe_instance_map.value();
Node* properties = jsgraph()->EmptyFixedArrayConstant();
if (instance_map.is_dictionary_map()) {
- DCHECK_EQ(prototype_const.type().oddball_type(), OddballType::kNull);
+ DCHECK_EQ(prototype_const.map().oddball_type(), OddballType::kNull);
// Allocate an empty NameDictionary as backing store for the properties.
Handle<Map> map = isolate()->factory()->name_dictionary_map();
int capacity =
@@ -1441,7 +1392,8 @@ Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
a.AllocateArray(argument_count, factory()->fixed_array_map());
for (int i = 0; i < argument_count; ++i, ++parameters_it) {
DCHECK_NOT_NULL((*parameters_it).node);
- a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ (*parameters_it).node);
}
return a.Finish();
}
@@ -1471,7 +1423,8 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
a.AllocateArray(num_elements, factory()->fixed_array_map());
for (int i = 0; i < num_elements; ++i, ++parameters_it) {
DCHECK_NOT_NULL((*parameters_it).node);
- a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ (*parameters_it).node);
}
return a.Finish();
}
@@ -1508,22 +1461,27 @@ Node* JSCreateLowering::AllocateAliasedArguments(
AllocationBuilder aa(jsgraph(), effect, control);
aa.AllocateArray(argument_count, factory()->fixed_array_map());
for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
- aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
+ aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ jsgraph()->TheHoleConstant());
}
for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
DCHECK_NOT_NULL((*parameters_it).node);
- aa.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ (*parameters_it).node);
}
Node* arguments = aa.Finish();
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), arguments, control);
a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
- a.Store(AccessBuilder::ForFixedArraySlot(0), context);
- a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
+ context);
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
+ arguments);
for (int i = 0; i < mapped_count; ++i) {
int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
- a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i + 2),
+ jsgraph()->Constant(idx));
}
return a.Finish();
}
@@ -1561,8 +1519,10 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), arguments, control);
a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
- a.Store(AccessBuilder::ForFixedArraySlot(0), context);
- a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
+ context);
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
+ arguments);
for (int i = 0; i < mapped_count; ++i) {
int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
Node* value = graph()->NewNode(
@@ -1570,7 +1530,8 @@ Node* JSCreateLowering::AllocateAliasedArguments(
graph()->NewNode(simplified()->NumberLessThan(), jsgraph()->Constant(i),
arguments_length),
jsgraph()->Constant(idx), jsgraph()->TheHoleConstant());
- a.Store(AccessBuilder::ForFixedArraySlot(i + 2), value);
+ a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i + 2),
+ value);
}
return a.Finish();
}
@@ -1648,7 +1609,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
MaybeHandle<Map>(), Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
Node* value;
- if (boilerplate.IsUnboxedDoubleField(index)) {
+ if (boilerplate_map.IsUnboxedDoubleField(i)) {
access.machine_type = MachineType::Float64();
access.type = Type::Number();
value = jsgraph()->Constant(boilerplate.RawFastDoublePropertyAt(index));
@@ -1670,7 +1631,11 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
value = effect = builder.Finish();
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
- value = boilerplate_value.oddball_type() == OddballType::kUninitialized
+ bool is_uninitialized =
+ boilerplate_value.IsHeapObject() &&
+ boilerplate_value.AsHeapObject().map().oddball_type() ==
+ OddballType::kUninitialized;
+ value = is_uninitialized
? jsgraph()->ZeroConstant()
: jsgraph()->Constant(boilerplate_value.AsSmi());
} else {
@@ -1698,11 +1663,11 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
// Actually allocate and initialize the object.
AllocationBuilder builder(jsgraph(), effect, control);
builder.Allocate(boilerplate_map.instance_size(), pretenure,
- Type::For(js_heap_broker(), boilerplate_map.object<Map>()));
+ Type::For(boilerplate_map));
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
builder.Store(AccessBuilder::ForJSObjectElements(), elements);
- if (boilerplate_map.IsJSArrayMap()) {
+ if (boilerplate.IsJSArray()) {
JSArrayRef boilerplate_array = boilerplate.AsJSArray();
builder.Store(
AccessBuilder::ForJSArrayLength(boilerplate_array.GetElementsKind()),
@@ -1744,16 +1709,12 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
} else {
FixedArrayRef elements = boilerplate_elements.AsFixedArray();
for (int i = 0; i < elements_length; ++i) {
- if (elements.is_the_hole(i)) {
- elements_values[i] = jsgraph()->TheHoleConstant();
+ ObjectRef element_value = elements.get(i);
+ if (element_value.IsJSObject()) {
+ elements_values[i] = effect = AllocateFastLiteral(
+ effect, control, element_value.AsJSObject(), pretenure);
} else {
- ObjectRef element_value = elements.get(i);
- if (element_value.IsJSObject()) {
- elements_values[i] = effect = AllocateFastLiteral(
- effect, control, element_value.AsJSObject(), pretenure);
- } else {
- elements_values[i] = jsgraph()->Constant(element_value);
- }
+ elements_values[i] = jsgraph()->Constant(element_value);
}
}
}
@@ -1790,8 +1751,7 @@ Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(size, pretenure,
- Type::For(js_heap_broker(), boilerplate_map.object<Map>()));
+ builder.Allocate(size, pretenure, Type::For(boilerplate_map));
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
boilerplate.raw_properties_or_hash());
@@ -1820,8 +1780,8 @@ SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
return jsgraph()->simplified();
}
-NativeContextRef JSCreateLowering::native_context_ref() const {
- return NativeContextRef(js_heap_broker(), native_context());
+NativeContextRef JSCreateLowering::native_context() const {
+ return js_heap_broker()->native_context();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 151be1b35c..4099edb7b6 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -33,15 +33,13 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
- Handle<Context> native_context, Zone* zone)
+ JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
- native_context_(native_context),
zone_(zone) {}
- ~JSCreateLowering() final {}
+ ~JSCreateLowering() final = default;
const char* reducer_name() const override { return "JSCreateLowering"; }
@@ -69,15 +67,16 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCreateGeneratorObject(Node* node);
Reduction ReduceNewArray(
- Node* node, Node* length, MapRef initial_map, PretenureFlag pretenure,
+ Node* node, Node* length, MapRef initial_map, ElementsKind elements_kind,
+ PretenureFlag pretenure,
const SlackTrackingPrediction& slack_tracking_prediction);
Reduction ReduceNewArray(
Node* node, Node* length, int capacity, MapRef initial_map,
- PretenureFlag pretenure,
+ ElementsKind elements_kind, PretenureFlag pretenure,
const SlackTrackingPrediction& slack_tracking_prediction);
Reduction ReduceNewArray(
Node* node, std::vector<Node*> values, MapRef initial_map,
- PretenureFlag pretenure,
+ ElementsKind elements_kind, PretenureFlag pretenure,
const SlackTrackingPrediction& slack_tracking_prediction);
Reduction ReduceJSCreateObject(Node* node);
@@ -109,15 +108,11 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* AllocateLiteralRegExp(Node* effect, Node* control,
JSRegExpRef boilerplate);
- Reduction ReduceNewArrayToStubCall(Node* node,
- base::Optional<AllocationSiteRef> site);
-
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
- Handle<Context> native_context() const { return native_context_; }
- NativeContextRef native_context_ref() const;
+ NativeContextRef native_context() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
CompilationDependencies* dependencies() const { return dependencies_; }
@@ -127,7 +122,6 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
JSHeapBroker* const js_heap_broker_;
- Handle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 0903f181b9..731159f3d1 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -33,7 +33,7 @@ CallDescriptor::Flags FrameStateFlagForCall(Node* node) {
JSGenericLowering::JSGenericLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
-JSGenericLowering::~JSGenericLowering() {}
+JSGenericLowering::~JSGenericLowering() = default;
Reduction JSGenericLowering::Reduce(Node* node) {
@@ -79,7 +79,6 @@ REPLACE_STUB_CALL(Increment)
REPLACE_STUB_CALL(Negate)
REPLACE_STUB_CALL(HasProperty)
REPLACE_STUB_CALL(Equal)
-REPLACE_STUB_CALL(ToInteger)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
REPLACE_STUB_CALL(ToNumberConvertBigInt)
@@ -95,12 +94,14 @@ REPLACE_STUB_CALL(RejectPromise)
REPLACE_STUB_CALL(ResolvePromise)
#undef REPLACE_STUB_CALL
-void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+void JSGenericLowering::ReplaceWithStubCall(Node* node,
+ Callable callable,
CallDescriptor::Flags flags) {
ReplaceWithStubCall(node, callable, flags, node->op()->properties());
}
-void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+void JSGenericLowering::ReplaceWithStubCall(Node* node,
+ Callable callable,
CallDescriptor::Flags flags,
Operator::Properties properties) {
const CallInterfaceDescriptor& descriptor = callable.descriptor();
@@ -146,12 +147,16 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kKeyedLoadICTrampoline);
+ Callable callable = Builtins::CallableFor(
+ isolate(), p.feedback().ic_state() == MEGAMORPHIC
+ ? Builtins::kKeyedLoadICTrampoline_Megamorphic
+ : Builtins::kKeyedLoadICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC);
+ Callable callable = Builtins::CallableFor(
+ isolate(), p.feedback().ic_state() == MEGAMORPHIC
+ ? Builtins::kKeyedLoadIC_Megamorphic
+ : Builtins::kKeyedLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
@@ -164,20 +169,30 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ if (!p.feedback().IsValid()) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kGetProperty);
+ ReplaceWithStubCall(node, callable, flags);
+ return;
+ }
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kLoadICTrampoline);
+ Callable callable = Builtins::CallableFor(
+ isolate(), p.feedback().ic_state() == MEGAMORPHIC
+ ? Builtins::kLoadICTrampoline_Megamorphic
+ : Builtins::kLoadICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kLoadIC);
+ Callable callable =
+ Builtins::CallableFor(isolate(), p.feedback().ic_state() == MEGAMORPHIC
+ ? Builtins::kLoadIC_Megamorphic
+ : Builtins::kLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
}
-
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
@@ -222,6 +237,12 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ if (!p.feedback().IsValid()) {
+ node->InsertInput(
+ zone(), 3, jsgraph()->SmiConstant(static_cast<int>(p.language_mode())));
+ ReplaceWithRuntimeCall(node, Runtime::kSetNamedProperty);
+ return;
+ }
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
@@ -513,6 +534,13 @@ void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSCreateArrayFromIterable(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kIterableToListWithSymbolLookup);
+ ReplaceWithStubCall(node, callable, flags);
+}
+
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index b3ef85fb07..7b74c2a32c 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -68,7 +68,8 @@ Node* JSGraph::Constant(Handle<Object> value) {
Node* JSGraph::Constant(const ObjectRef& ref) {
if (ref.IsSmi()) return Constant(ref.AsSmi());
- OddballType oddball_type = ref.oddball_type();
+ OddballType oddball_type =
+ ref.AsHeapObject().GetHeapObjectType().oddball_type();
if (ref.IsHeapNumber()) {
return Constant(ref.AsHeapNumber().value());
} else if (oddball_type == OddballType::kUndefined) {
@@ -99,19 +100,6 @@ Node* JSGraph::Constant(double value) {
return NumberConstant(value);
}
-
-Node* JSGraph::Constant(int32_t value) {
- if (value == 0) return ZeroConstant();
- if (value == 1) return OneConstant();
- return NumberConstant(value);
-}
-
-Node* JSGraph::Constant(uint32_t value) {
- if (value == 0) return ZeroConstant();
- if (value == 1) return OneConstant();
- return NumberConstant(value);
-}
-
Node* JSGraph::NumberConstant(double value) {
Node** loc = cache_.FindNumberConstant(value);
if (*loc == nullptr) {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 517b799a24..774b8e7433 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -61,12 +61,6 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
// Creates a NumberConstant node, usually canonicalized.
Node* Constant(double value);
- // Creates a NumberConstant node, usually canonicalized.
- Node* Constant(int32_t value);
-
- // Creates a NumberConstant node, usually canonicalized.
- Node* Constant(uint32_t value);
-
// Creates a HeapConstant node for either true or false.
Node* BooleanConstant(bool is_true) {
return is_true ? TrueConstant() : FalseConstant();
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 949dca377d..a95bfaad21 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -4,7 +4,12 @@
#include "src/compiler/js-heap-broker.h"
+#include "src/ast/modules.h"
+#include "src/bootstrapper.h"
+#include "src/boxed-float.h"
+#include "src/code-factory.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/per-isolate-compiler-cache.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
@@ -19,16 +24,39 @@ namespace compiler {
HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
-// TODO(neis): It would be nice to share the serialized data for read-only
-// objects.
+// There are three kinds of ObjectData values.
+//
+// kSmi: The underlying V8 object is a Smi and the data is an instance of the
+// base class (ObjectData), i.e. it's basically just the handle. Because the
+// object is a Smi, it's safe to access the handle in order to extract the
+// number value, and AsSmi() does exactly that.
+//
+// kSerializedHeapObject: The underlying V8 object is a HeapObject and the
+// data is an instance of the corresponding (most-specific) subclass, e.g.
+// JSFunctionData, which provides serialized information about the object.
+//
+// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
+// data is an instance of the base class (ObjectData), i.e. it basically
+// carries no information other than the handle.
+//
+enum ObjectDataKind { kSmi, kSerializedHeapObject, kUnserializedHeapObject };
class ObjectData : public ZoneObject {
public:
- static ObjectData* Serialize(JSHeapBroker* broker, Handle<Object> object);
-
- ObjectData(JSHeapBroker* broker_, Handle<Object> object_, bool is_smi_)
- : broker(broker_), object(object_), is_smi(is_smi_) {
- broker->AddData(object, this);
+ ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
+ ObjectDataKind kind)
+ : object_(object), kind_(kind) {
+ // This assignment ensures we don't end up inserting the same object
+ // in an endless recursion.
+ *storage = this;
+
+ broker->Trace("Creating data %p for handle %" V8PRIuPTR " (", this,
+ object.address());
+ if (FLAG_trace_heap_broker) {
+ object->ShortPrint();
+ PrintF(")\n");
+ }
+ CHECK_NOT_NULL(broker->isolate()->handle_scope_data()->canonical_scope);
}
#define DECLARE_IS_AND_AS(Name) \
@@ -37,137 +65,356 @@ class ObjectData : public ZoneObject {
HEAP_BROKER_OBJECT_LIST(DECLARE_IS_AND_AS)
#undef DECLARE_IS_AND_AS
- JSHeapBroker* const broker;
- Handle<Object> const object;
- bool const is_smi;
-};
+ Handle<Object> object() const { return object_; }
+ ObjectDataKind kind() const { return kind_; }
+ bool is_smi() const { return kind_ == kSmi; }
-// TODO(neis): Perhaps add a boolean that indicates whether serialization of an
-// object has completed. That could be used to add safety checks.
-
-#define GET_OR_CREATE(name) \
- broker->GetOrCreateData(handle(object_->name(), broker->isolate()))
+ private:
+ Handle<Object> const object_;
+ ObjectDataKind const kind_;
+};
class HeapObjectData : public ObjectData {
public:
+ HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<HeapObject> object);
+
+ bool boolean_value() const { return boolean_value_; }
+ MapData* map() const { return map_; }
+
static HeapObjectData* Serialize(JSHeapBroker* broker,
Handle<HeapObject> object);
- HeapObjectType const type;
- MapData* const map;
+ private:
+ bool const boolean_value_;
+ MapData* const map_;
+};
+
+class PropertyCellData : public HeapObjectData {
+ public:
+ PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<PropertyCell> object);
+
+ PropertyDetails property_details() const { return property_details_; }
- HeapObjectData(JSHeapBroker* broker_, Handle<HeapObject> object_,
- HeapObjectType type_)
- : ObjectData(broker_, object_, false),
- type(type_),
- map(GET_OR_CREATE(map)->AsMap()) {
- CHECK(broker_->SerializingAllowed());
+ void Serialize(JSHeapBroker* broker);
+ ObjectData* value() { return value_; }
+
+ private:
+ PropertyDetails const property_details_;
+
+ bool serialized_ = false;
+ ObjectData* value_ = nullptr;
+};
+
+void JSHeapBroker::IncrementTracingIndentation() { ++tracing_indentation_; }
+
+void JSHeapBroker::DecrementTracingIndentation() { --tracing_indentation_; }
+
+class TraceScope {
+ public:
+ TraceScope(JSHeapBroker* broker, const char* label)
+ : TraceScope(broker, static_cast<void*>(broker), label) {}
+
+ TraceScope(JSHeapBroker* broker, ObjectData* data, const char* label)
+ : TraceScope(broker, static_cast<void*>(data), label) {}
+
+ ~TraceScope() { broker_->DecrementTracingIndentation(); }
+
+ private:
+ JSHeapBroker* const broker_;
+
+ TraceScope(JSHeapBroker* broker, void* self, const char* label)
+ : broker_(broker) {
+ broker_->Trace("Running %s on %p.\n", label, self);
+ broker_->IncrementTracingIndentation();
}
};
-class PropertyCellData : public HeapObjectData {
+PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<PropertyCell> object)
+ : HeapObjectData(broker, storage, object),
+ property_details_(object->property_details()) {}
+
+void PropertyCellData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "PropertyCellData::Serialize");
+ auto cell = Handle<PropertyCell>::cast(object());
+ DCHECK_NULL(value_);
+ value_ = broker->GetOrCreateData(cell->value());
+}
+
+class JSObjectField {
public:
- PropertyCellData(JSHeapBroker* broker_, Handle<PropertyCell> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ bool IsDouble() const { return object_ == nullptr; }
+ double AsDouble() const {
+ CHECK(IsDouble());
+ return number_;
+ }
+
+ bool IsObject() const { return object_ != nullptr; }
+ ObjectData* AsObject() const {
+ CHECK(IsObject());
+ return object_;
+ }
+
+ explicit JSObjectField(double value) : number_(value) {}
+ explicit JSObjectField(ObjectData* value) : object_(value) {}
+
+ private:
+ ObjectData* object_ = nullptr;
+ double number_ = 0;
};
class JSObjectData : public HeapObjectData {
public:
- JSObjectData(JSHeapBroker* broker_, Handle<JSObject> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ JSObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSObject> object);
+
+ // Recursively serializes all reachable JSObjects.
+ void SerializeAsBoilerplate(JSHeapBroker* broker);
+ // Shallow serialization of {elements}.
+ void SerializeElements(JSHeapBroker* broker);
+
+ const JSObjectField& GetInobjectField(int property_index) const;
+ FixedArrayBaseData* elements() const;
+
+ // This method is only used to assert our invariants.
+ bool cow_or_empty_elements_tenured() const;
+
+ void SerializeObjectCreateMap(JSHeapBroker* broker);
+ MapData* object_create_map() const { // Can be nullptr.
+ CHECK(serialized_object_create_map_);
+ return object_create_map_;
+ }
+
+ private:
+ void SerializeRecursive(JSHeapBroker* broker, int max_depths);
+
+ FixedArrayBaseData* elements_ = nullptr;
+ bool cow_or_empty_elements_tenured_ = false;
+ // The {serialized_as_boilerplate} flag is set when all recursively
+ // reachable JSObjects are serialized.
+ bool serialized_as_boilerplate_ = false;
+ bool serialized_elements_ = false;
+
+ ZoneVector<JSObjectField> inobject_fields_;
+
+ bool serialized_object_create_map_ = false;
+ MapData* object_create_map_ = nullptr;
};
+void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
+ if (serialized_object_create_map_) return;
+ serialized_object_create_map_ = true;
+
+ TraceScope tracer(broker, this, "JSObjectData::SerializeObjectCreateMap");
+ Handle<JSObject> jsobject = Handle<JSObject>::cast(object());
+
+ if (jsobject->map()->is_prototype_map()) {
+ Handle<Object> maybe_proto_info(jsobject->map()->prototype_info(),
+ broker->isolate());
+ if (maybe_proto_info->IsPrototypeInfo()) {
+ auto proto_info = Handle<PrototypeInfo>::cast(maybe_proto_info);
+ if (proto_info->HasObjectCreateMap()) {
+ DCHECK_NULL(object_create_map_);
+ object_create_map_ =
+ broker->GetOrCreateData(proto_info->ObjectCreateMap())->AsMap();
+ }
+ }
+ }
+}
+
class JSFunctionData : public JSObjectData {
public:
- JSGlobalProxyData* const global_proxy;
- MapData* const initial_map; // Can be nullptr.
- bool const has_prototype;
- ObjectData* const prototype; // Can be nullptr.
- bool const PrototypeRequiresRuntimeLookup;
- SharedFunctionInfoData* const shared;
-
- JSFunctionData(JSHeapBroker* broker_, Handle<JSFunction> object_,
- HeapObjectType type_);
+ JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSFunction> object);
+
+ bool has_initial_map() const { return has_initial_map_; }
+ bool has_prototype() const { return has_prototype_; }
+ bool PrototypeRequiresRuntimeLookup() const {
+ return PrototypeRequiresRuntimeLookup_;
+ }
+
+ void Serialize(JSHeapBroker* broker);
+
+ JSGlobalProxyData* global_proxy() const { return global_proxy_; }
+ MapData* initial_map() const { return initial_map_; }
+ ObjectData* prototype() const { return prototype_; }
+ SharedFunctionInfoData* shared() const { return shared_; }
+ int initial_map_instance_size_with_min_slack() const {
+ CHECK(serialized_);
+ return initial_map_instance_size_with_min_slack_;
+ }
+
+ private:
+ bool has_initial_map_;
+ bool has_prototype_;
+ bool PrototypeRequiresRuntimeLookup_;
+
+ bool serialized_ = false;
+
+ JSGlobalProxyData* global_proxy_ = nullptr;
+ MapData* initial_map_ = nullptr;
+ ObjectData* prototype_ = nullptr;
+ SharedFunctionInfoData* shared_ = nullptr;
+ int initial_map_instance_size_with_min_slack_;
};
class JSRegExpData : public JSObjectData {
public:
- JSRegExpData(JSHeapBroker* broker_, Handle<JSRegExp> object_,
- HeapObjectType type_)
- : JSObjectData(broker_, object_, type_) {}
+ JSRegExpData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSRegExp> object)
+ : JSObjectData(broker, storage, object) {}
+
+ void SerializeAsRegExpBoilerplate(JSHeapBroker* broker);
+
+ ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; }
+ ObjectData* data() const { return data_; }
+ ObjectData* source() const { return source_; }
+ ObjectData* flags() const { return flags_; }
+ ObjectData* last_index() const { return last_index_; }
+
+ private:
+ bool serialized_as_reg_exp_boilerplate_ = false;
+
+ ObjectData* raw_properties_or_hash_ = nullptr;
+ ObjectData* data_ = nullptr;
+ ObjectData* source_ = nullptr;
+ ObjectData* flags_ = nullptr;
+ ObjectData* last_index_ = nullptr;
};
class HeapNumberData : public HeapObjectData {
public:
- HeapNumberData(JSHeapBroker* broker_, Handle<HeapNumber> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<HeapNumber> object)
+ : HeapObjectData(broker, storage, object), value_(object->value()) {}
+
+ double value() const { return value_; }
+
+ private:
+ double const value_;
};
class MutableHeapNumberData : public HeapObjectData {
public:
- MutableHeapNumberData(JSHeapBroker* broker_,
- Handle<MutableHeapNumber> object_, HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ MutableHeapNumberData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<MutableHeapNumber> object)
+ : HeapObjectData(broker, storage, object), value_(object->value()) {}
+
+ double value() const { return value_; }
+
+ private:
+ double const value_;
};
class ContextData : public HeapObjectData {
public:
- ContextData(JSHeapBroker* broker_, Handle<Context> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ ContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<Context> object);
+ void Serialize(JSHeapBroker* broker);
+
+ ContextData* previous() const {
+ CHECK(serialized_);
+ return previous_;
+ }
+
+ private:
+ bool serialized_ = false;
+ ContextData* previous_ = nullptr;
};
+ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<Context> object)
+ : HeapObjectData(broker, storage, object) {}
+
+void ContextData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "ContextData::Serialize");
+ Handle<Context> context = Handle<Context>::cast(object());
+
+ DCHECK_NULL(previous_);
+ // Context::previous DCHECK-fails when called on the native context.
+ if (!context->IsNativeContext()) {
+ previous_ = broker->GetOrCreateData(context->previous())->AsContext();
+ previous_->Serialize(broker);
+ }
+}
+
class NativeContextData : public ContextData {
public:
-#define DECL_MEMBER(type, name) type##Data* const name;
+#define DECL_ACCESSOR(type, name) \
+ type##Data* name() const { return name##_; }
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ const ZoneVector<MapData*>& function_maps() const {
+ CHECK(serialized_);
+ return function_maps_;
+ }
+
+ NativeContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<NativeContext> object);
+ void Serialize(JSHeapBroker* broker);
+
+ private:
+ bool serialized_ = false;
+#define DECL_MEMBER(type, name) type##Data* name##_ = nullptr;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
-
- NativeContextData(JSHeapBroker* broker_, Handle<NativeContext> object_,
- HeapObjectType type_)
- : ContextData(broker_, object_, type_)
-#define INIT_MEMBER(type, name) , name(GET_OR_CREATE(name)->As##type())
- BROKER_NATIVE_CONTEXT_FIELDS(INIT_MEMBER)
-#undef INIT_MEMBER
- {
- }
+ ZoneVector<MapData*> function_maps_;
};
class NameData : public HeapObjectData {
public:
- NameData(JSHeapBroker* broker, Handle<Name> object, HeapObjectType type)
- : HeapObjectData(broker, object, type) {}
+ NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
+ : HeapObjectData(broker, storage, object) {}
};
class StringData : public NameData {
public:
- StringData(JSHeapBroker* broker, Handle<String> object, HeapObjectType type)
- : NameData(broker, object, type),
- length(object->length()),
- first_char(length > 0 ? object->Get(0) : 0) {
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- if (length <= kMaxLengthForDoubleConversion) {
- to_number = StringToDouble(
- broker->isolate(), broker->isolate()->unicode_cache(), object, flags);
- }
- }
+ StringData(JSHeapBroker* broker, ObjectData** storage, Handle<String> object);
- int const length;
- uint16_t const first_char;
- base::Optional<double> to_number;
+ int length() const { return length_; }
+ uint16_t first_char() const { return first_char_; }
+ base::Optional<double> to_number() const { return to_number_; }
+ bool is_external_string() const { return is_external_string_; }
+ bool is_seq_string() const { return is_seq_string_; }
private:
+ int const length_;
+ uint16_t const first_char_;
+ base::Optional<double> to_number_;
+ bool const is_external_string_;
+ bool const is_seq_string_;
+
static constexpr int kMaxLengthForDoubleConversion = 23;
};
+StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<String> object)
+ : NameData(broker, storage, object),
+ length_(object->length()),
+ first_char_(length_ > 0 ? object->Get(0) : 0),
+ is_external_string_(object->IsExternalString()),
+ is_seq_string_(object->IsSeqString()) {
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ if (length_ <= kMaxLengthForDoubleConversion) {
+ to_number_ = StringToDouble(
+ broker->isolate(), broker->isolate()->unicode_cache(), object, flags);
+ }
+}
+
class InternalizedStringData : public StringData {
public:
- InternalizedStringData(JSHeapBroker* broker,
- Handle<InternalizedString> object, HeapObjectType type)
- : StringData(broker, object, type) {}
+ InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<InternalizedString> object)
+ : StringData(broker, storage, object) {}
};
namespace {
@@ -258,93 +505,239 @@ bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) {
class AllocationSiteData : public HeapObjectData {
public:
- AllocationSiteData(JSHeapBroker* broker, Handle<AllocationSite> object_,
- HeapObjectType type_)
- : HeapObjectData(broker, object_, type_),
- PointsToLiteral(object_->PointsToLiteral()),
- GetPretenureMode(object_->GetPretenureMode()),
- nested_site(GET_OR_CREATE(nested_site)) {
- if (PointsToLiteral) {
- if (IsInlinableFastLiteral(
- handle(object_->boilerplate(), broker->isolate()))) {
- boilerplate = GET_OR_CREATE(boilerplate)->AsJSObject();
- }
- } else {
- GetElementsKind = object_->GetElementsKind();
- CanInlineCall = object_->CanInlineCall();
- }
- }
+ AllocationSiteData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<AllocationSite> object);
+ void SerializeBoilerplate(JSHeapBroker* broker);
- bool const PointsToLiteral;
- PretenureFlag const GetPretenureMode;
- ObjectData* const nested_site;
- JSObjectData* boilerplate = nullptr;
+ bool PointsToLiteral() const { return PointsToLiteral_; }
+ PretenureFlag GetPretenureMode() const { return GetPretenureMode_; }
+ ObjectData* nested_site() const { return nested_site_; }
+ bool IsFastLiteral() const { return IsFastLiteral_; }
+ JSObjectData* boilerplate() const { return boilerplate_; }
// These are only valid if PointsToLiteral is false.
- ElementsKind GetElementsKind = NO_ELEMENTS;
- bool CanInlineCall = false;
+ ElementsKind GetElementsKind() const { return GetElementsKind_; }
+ bool CanInlineCall() const { return CanInlineCall_; }
+
+ private:
+ bool const PointsToLiteral_;
+ PretenureFlag const GetPretenureMode_;
+ ObjectData* nested_site_ = nullptr;
+ bool IsFastLiteral_ = false;
+ JSObjectData* boilerplate_ = nullptr;
+ ElementsKind GetElementsKind_ = NO_ELEMENTS;
+ bool CanInlineCall_ = false;
+ bool serialized_boilerplate_ = false;
};
// Only used in JSNativeContextSpecialization.
class ScriptContextTableData : public HeapObjectData {
public:
- ScriptContextTableData(JSHeapBroker* broker_,
- Handle<ScriptContextTable> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ ScriptContextTableData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<ScriptContextTable> object)
+ : HeapObjectData(broker, storage, object) {}
+};
+
+struct PropertyDescriptor {
+ NameData* key = nullptr;
+ PropertyDetails details = PropertyDetails::Empty();
+ FieldIndex field_index;
+ MapData* field_owner = nullptr;
+ ObjectData* field_type = nullptr;
+ bool is_unboxed_double_field = false;
};
class MapData : public HeapObjectData {
public:
- InstanceType const instance_type;
- int const instance_size;
- byte const bit_field;
- byte const bit_field2;
- uint32_t const bit_field3;
-
- MapData(JSHeapBroker* broker_, Handle<Map> object_, HeapObjectType type_);
+ MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object);
+
+ InstanceType instance_type() const { return instance_type_; }
+ int instance_size() const { return instance_size_; }
+ byte bit_field() const { return bit_field_; }
+ byte bit_field2() const { return bit_field2_; }
+ uint32_t bit_field3() const { return bit_field3_; }
+ bool can_be_deprecated() const { return can_be_deprecated_; }
+ bool can_transition() const { return can_transition_; }
+ int in_object_properties_start_in_words() const {
+ CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
+ return in_object_properties_start_in_words_;
+ }
+ int in_object_properties() const {
+ CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
+ return in_object_properties_;
+ }
// Extra information.
- void SerializeElementsKindGeneralizations();
- const ZoneVector<MapData*>& elements_kind_generalizations() {
+
+ void SerializeElementsKindGeneralizations(JSHeapBroker* broker);
+ const ZoneVector<MapData*>& elements_kind_generalizations() const {
+ CHECK(serialized_elements_kind_generalizations_);
return elements_kind_generalizations_;
}
+ // Serialize the own part of the descriptor array and, recursively, that of
+ // any field owner.
+ void SerializeOwnDescriptors(JSHeapBroker* broker);
+ DescriptorArrayData* instance_descriptors() const {
+ CHECK(serialized_own_descriptors_);
+ return instance_descriptors_;
+ }
+
+ void SerializeConstructorOrBackpointer(JSHeapBroker* broker);
+ ObjectData* constructor_or_backpointer() const {
+ CHECK(serialized_constructor_or_backpointer_);
+ return constructor_or_backpointer_;
+ }
+
+ void SerializePrototype(JSHeapBroker* broker);
+ ObjectData* prototype() const {
+ CHECK(serialized_prototype_);
+ return prototype_;
+ }
+
private:
+ InstanceType const instance_type_;
+ int const instance_size_;
+ byte const bit_field_;
+ byte const bit_field2_;
+ uint32_t const bit_field3_;
+ bool const can_be_deprecated_;
+ bool const can_transition_;
+ int const in_object_properties_start_in_words_;
+ int const in_object_properties_;
+
+ bool serialized_elements_kind_generalizations_ = false;
ZoneVector<MapData*> elements_kind_generalizations_;
+
+ bool serialized_own_descriptors_ = false;
+ DescriptorArrayData* instance_descriptors_ = nullptr;
+
+ bool serialized_constructor_or_backpointer_ = false;
+ ObjectData* constructor_or_backpointer_ = nullptr;
+
+ bool serialized_prototype_ = false;
+ ObjectData* prototype_ = nullptr;
};
-MapData::MapData(JSHeapBroker* broker_, Handle<Map> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_),
- instance_type(object_->instance_type()),
- instance_size(object_->instance_size()),
- bit_field(object_->bit_field()),
- bit_field2(object_->bit_field2()),
- bit_field3(object_->bit_field3()),
+AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<AllocationSite> object)
+ : HeapObjectData(broker, storage, object),
+ PointsToLiteral_(object->PointsToLiteral()),
+ GetPretenureMode_(object->GetPretenureMode()) {
+ if (PointsToLiteral_) {
+ IsFastLiteral_ = IsInlinableFastLiteral(
+ handle(object->boilerplate(), broker->isolate()));
+ } else {
+ GetElementsKind_ = object->GetElementsKind();
+ CanInlineCall_ = object->CanInlineCall();
+ }
+}
+
+void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
+ if (serialized_boilerplate_) return;
+ serialized_boilerplate_ = true;
+
+ TraceScope tracer(broker, this, "AllocationSiteData::SerializeBoilerplate");
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(object());
+
+ CHECK(IsFastLiteral_);
+ DCHECK_NULL(boilerplate_);
+ boilerplate_ = broker->GetOrCreateData(site->boilerplate())->AsJSObject();
+ boilerplate_->SerializeAsBoilerplate(broker);
+
+ DCHECK_NULL(nested_site_);
+ nested_site_ = broker->GetOrCreateData(site->nested_site());
+ if (nested_site_->IsAllocationSite()) {
+ nested_site_->AsAllocationSite()->SerializeBoilerplate(broker);
+ }
+}
+
+HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<HeapObject> object)
+ : ObjectData(broker, storage, object, kSerializedHeapObject),
+ boolean_value_(object->BooleanValue(broker->isolate())),
+ // We have to use a raw cast below instead of AsMap() because of
+ // recursion. AsMap() would call IsMap(), which accesses the
+ // instance_type_ member. In the case of constructing the MapData for the
+ // meta map (whose map is itself), this member has not yet been
+ // initialized.
+ map_(static_cast<MapData*>(broker->GetOrCreateData(object->map()))) {
+ CHECK(broker->SerializingAllowed());
+}
+
+MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
+ : HeapObjectData(broker, storage, object),
+ instance_type_(object->instance_type()),
+ instance_size_(object->instance_size()),
+ bit_field_(object->bit_field()),
+ bit_field2_(object->bit_field2()),
+ bit_field3_(object->bit_field3()),
+ can_be_deprecated_(object->NumberOfOwnDescriptors() > 0
+ ? object->CanBeDeprecated()
+ : false),
+ can_transition_(object->CanTransition()),
+ in_object_properties_start_in_words_(
+ object->IsJSObjectMap() ? object->GetInObjectPropertiesStartInWords()
+ : 0),
+ in_object_properties_(
+ object->IsJSObjectMap() ? object->GetInObjectProperties() : 0),
elements_kind_generalizations_(broker->zone()) {}
-JSFunctionData::JSFunctionData(JSHeapBroker* broker_,
- Handle<JSFunction> object_, HeapObjectType type_)
- : JSObjectData(broker_, object_, type_),
- global_proxy(GET_OR_CREATE(global_proxy)->AsJSGlobalProxy()),
- initial_map(object_->has_prototype_slot() && object_->has_initial_map()
- ? GET_OR_CREATE(initial_map)->AsMap()
- : nullptr),
- has_prototype(object_->has_prototype_slot() && object_->has_prototype()),
- prototype(has_prototype ? GET_OR_CREATE(prototype) : nullptr),
- PrototypeRequiresRuntimeLookup(object_->PrototypeRequiresRuntimeLookup()),
- shared(GET_OR_CREATE(shared)->AsSharedFunctionInfo()) {
- if (initial_map != nullptr && initial_map->instance_type == JS_ARRAY_TYPE) {
- initial_map->SerializeElementsKindGeneralizations();
- }
-}
-
-void MapData::SerializeElementsKindGeneralizations() {
- broker->Trace("Computing ElementsKind generalizations of %p.\n", *object);
- DCHECK_EQ(instance_type, JS_ARRAY_TYPE);
- MapRef self(this);
+JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSFunction> object)
+ : JSObjectData(broker, storage, object),
+ has_initial_map_(object->has_prototype_slot() &&
+ object->has_initial_map()),
+ has_prototype_(object->has_prototype_slot() && object->has_prototype()),
+ PrototypeRequiresRuntimeLookup_(
+ object->PrototypeRequiresRuntimeLookup()) {}
+
+void JSFunctionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "JSFunctionData::Serialize");
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object());
+
+ DCHECK_NULL(global_proxy_);
+ DCHECK_NULL(initial_map_);
+ DCHECK_NULL(prototype_);
+ DCHECK_NULL(shared_);
+
+ global_proxy_ =
+ broker->GetOrCreateData(function->global_proxy())->AsJSGlobalProxy();
+ shared_ = broker->GetOrCreateData(function->shared())->AsSharedFunctionInfo();
+ initial_map_ = has_initial_map()
+ ? broker->GetOrCreateData(function->initial_map())->AsMap()
+ : nullptr;
+ prototype_ = has_prototype() ? broker->GetOrCreateData(function->prototype())
+ : nullptr;
+
+ if (initial_map_ != nullptr) {
+ initial_map_instance_size_with_min_slack_ =
+ function->ComputeInstanceSizeWithMinSlack(broker->isolate());
+ if (initial_map_->instance_type() == JS_ARRAY_TYPE) {
+ initial_map_->SerializeElementsKindGeneralizations(broker);
+ }
+ initial_map_->SerializeConstructorOrBackpointer(broker);
+ // TODO(neis): This is currently only needed for native_context's
+ // object_function, as used by GetObjectCreateMap. If no further use sites
+ // show up, we should move this into NativeContextData::Serialize.
+ initial_map_->SerializePrototype(broker);
+ }
+}
+
+void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
+ if (serialized_elements_kind_generalizations_) return;
+ serialized_elements_kind_generalizations_ = true;
+
+ TraceScope tracer(broker, this,
+ "MapData::SerializeElementsKindGeneralizations");
+ DCHECK_EQ(instance_type(), JS_ARRAY_TYPE);
+ MapRef self(broker, this);
ElementsKind from_kind = self.elements_kind();
+ DCHECK(elements_kind_generalizations_.empty());
for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
ElementsKind to_kind = static_cast<ElementsKind>(i);
if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
@@ -356,195 +749,583 @@ void MapData::SerializeElementsKindGeneralizations() {
}
}
+class DescriptorArrayData : public HeapObjectData {
+ public:
+ DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<DescriptorArray> object)
+ : HeapObjectData(broker, storage, object), contents_(broker->zone()) {}
+
+ ZoneVector<PropertyDescriptor>& contents() { return contents_; }
+
+ private:
+ ZoneVector<PropertyDescriptor> contents_;
+};
+
class FeedbackVectorData : public HeapObjectData {
public:
const ZoneVector<ObjectData*>& feedback() { return feedback_; }
- FeedbackVectorData(JSHeapBroker* broker_, Handle<FeedbackVector> object_,
- HeapObjectType type_);
+ FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FeedbackVector> object);
+
+ void SerializeSlots(JSHeapBroker* broker);
private:
+ bool serialized_ = false;
ZoneVector<ObjectData*> feedback_;
};
-FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker_,
- Handle<FeedbackVector> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_), feedback_(broker_->zone()) {
- feedback_.reserve(object_->length());
- for (int i = 0; i < object_->length(); ++i) {
- MaybeObject* value = object_->get(i);
- feedback_.push_back(value->IsObject()
- ? broker->GetOrCreateData(
- handle(value->ToObject(), broker->isolate()))
- : nullptr);
+FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<FeedbackVector> object)
+ : HeapObjectData(broker, storage, object), feedback_(broker->zone()) {}
+
+void FeedbackVectorData::SerializeSlots(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "FeedbackVectorData::SerializeSlots");
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
+ DCHECK(feedback_.empty());
+ feedback_.reserve(vector->length());
+ for (int i = 0; i < vector->length(); ++i) {
+ MaybeObject* value = vector->get(i);
+ ObjectData* slot_value =
+ value->IsObject() ? broker->GetOrCreateData(value->cast<Object>())
+ : nullptr;
+ feedback_.push_back(slot_value);
+ if (slot_value == nullptr) continue;
+
+ if (slot_value->IsAllocationSite() &&
+ slot_value->AsAllocationSite()->IsFastLiteral()) {
+ slot_value->AsAllocationSite()->SerializeBoilerplate(broker);
+ } else if (slot_value->IsJSRegExp()) {
+ slot_value->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker);
+ }
}
- DCHECK_EQ(object_->length(), feedback_.size());
+ DCHECK_EQ(vector->length(), feedback_.size());
+ broker->Trace("Copied %zu slots.\n", feedback_.size());
}
class FixedArrayBaseData : public HeapObjectData {
public:
- int const length;
+ FixedArrayBaseData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedArrayBase> object)
+ : HeapObjectData(broker, storage, object), length_(object->length()) {}
+
+ int length() const { return length_; }
- FixedArrayBaseData(JSHeapBroker* broker_, Handle<FixedArrayBase> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_), length(object_->length()) {}
+ private:
+ int const length_;
};
+JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSObject> object)
+ : HeapObjectData(broker, storage, object),
+ inobject_fields_(broker->zone()) {}
+
class FixedArrayData : public FixedArrayBaseData {
public:
- FixedArrayData(JSHeapBroker* broker_, Handle<FixedArray> object_,
- HeapObjectType type_)
- : FixedArrayBaseData(broker_, object_, type_) {}
+ FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedArray> object);
+
+ // Creates all elements of the fixed array.
+ void SerializeContents(JSHeapBroker* broker);
+
+ ObjectData* Get(int i) const;
+
+ private:
+ bool serialized_contents_ = false;
+ ZoneVector<ObjectData*> contents_;
};
+void FixedArrayData::SerializeContents(JSHeapBroker* broker) {
+ if (serialized_contents_) return;
+ serialized_contents_ = true;
+
+ TraceScope tracer(broker, this, "FixedArrayData::SerializeContents");
+ Handle<FixedArray> array = Handle<FixedArray>::cast(object());
+ CHECK_EQ(array->length(), length());
+ CHECK(contents_.empty());
+ contents_.reserve(static_cast<size_t>(length()));
+
+ for (int i = 0; i < length(); i++) {
+ Handle<Object> value(array->get(i), broker->isolate());
+ contents_.push_back(broker->GetOrCreateData(value));
+ }
+ broker->Trace("Copied %zu elements.\n", contents_.size());
+}
+
+FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedArray> object)
+ : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
+
class FixedDoubleArrayData : public FixedArrayBaseData {
public:
- FixedDoubleArrayData(JSHeapBroker* broker_, Handle<FixedDoubleArray> object_,
- HeapObjectType type_)
- : FixedArrayBaseData(broker_, object_, type_) {}
+ FixedDoubleArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedDoubleArray> object);
+
+ // Serializes all elements of the fixed array.
+ void SerializeContents(JSHeapBroker* broker);
+
+ Float64 Get(int i) const;
+
+ private:
+ bool serialized_contents_ = false;
+ ZoneVector<Float64> contents_;
};
+FixedDoubleArrayData::FixedDoubleArrayData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<FixedDoubleArray> object)
+ : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
+
+void FixedDoubleArrayData::SerializeContents(JSHeapBroker* broker) {
+ if (serialized_contents_) return;
+ serialized_contents_ = true;
+
+ TraceScope tracer(broker, this, "FixedDoubleArrayData::SerializeContents");
+ Handle<FixedDoubleArray> self = Handle<FixedDoubleArray>::cast(object());
+ CHECK_EQ(self->length(), length());
+ CHECK(contents_.empty());
+ contents_.reserve(static_cast<size_t>(length()));
+
+ for (int i = 0; i < length(); i++) {
+ contents_.push_back(Float64::FromBits(self->get_representation(i)));
+ }
+ broker->Trace("Copied %zu elements.\n", contents_.size());
+}
+
class BytecodeArrayData : public FixedArrayBaseData {
public:
- int const register_count;
+ int register_count() const { return register_count_; }
- BytecodeArrayData(JSHeapBroker* broker_, Handle<BytecodeArray> object_,
- HeapObjectType type_)
- : FixedArrayBaseData(broker_, object_, type_),
- register_count(object_->register_count()) {}
+ BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<BytecodeArray> object)
+ : FixedArrayBaseData(broker, storage, object),
+ register_count_(object->register_count()) {}
+
+ private:
+ int const register_count_;
};
class JSArrayData : public JSObjectData {
public:
- JSArrayData(JSHeapBroker* broker_, Handle<JSArray> object_,
- HeapObjectType type_)
- : JSObjectData(broker_, object_, type_) {}
+ JSArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSArray> object);
+ void Serialize(JSHeapBroker* broker);
+
+ ObjectData* length() const { return length_; }
+
+ private:
+ bool serialized_ = false;
+ ObjectData* length_ = nullptr;
};
+JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSArray> object)
+ : JSObjectData(broker, storage, object) {}
+
+void JSArrayData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "JSArrayData::Serialize");
+ Handle<JSArray> jsarray = Handle<JSArray>::cast(object());
+ DCHECK_NULL(length_);
+ length_ = broker->GetOrCreateData(jsarray->length());
+}
+
class ScopeInfoData : public HeapObjectData {
public:
- ScopeInfoData(JSHeapBroker* broker_, Handle<ScopeInfo> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<ScopeInfo> object);
+
+ int context_length() const { return context_length_; }
+
+ private:
+ int const context_length_;
};
+ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<ScopeInfo> object)
+ : HeapObjectData(broker, storage, object),
+ context_length_(object->ContextLength()) {}
+
class SharedFunctionInfoData : public HeapObjectData {
public:
- int const builtin_id;
- BytecodeArrayData* const GetBytecodeArray; // Can be nullptr.
-#define DECL_MEMBER(type, name) type const name;
- BROKER_SFI_FIELDS(DECL_MEMBER)
-#undef DECL_MEMBER
-
- SharedFunctionInfoData(JSHeapBroker* broker_,
- Handle<SharedFunctionInfo> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_),
- builtin_id(object_->HasBuiltinId() ? object_->builtin_id()
+ int builtin_id() const { return builtin_id_; }
+ BytecodeArrayData* GetBytecodeArray() const { return GetBytecodeArray_; }
+#define DECL_ACCESSOR(type, name) \
+ type name() const { return name##_; }
+ BROKER_SFI_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<SharedFunctionInfo> object)
+ : HeapObjectData(broker, storage, object),
+ builtin_id_(object->HasBuiltinId() ? object->builtin_id()
: Builtins::kNoBuiltinId),
- GetBytecodeArray(
- object_->HasBytecodeArray()
- ? GET_OR_CREATE(GetBytecodeArray)->AsBytecodeArray()
+ GetBytecodeArray_(
+ object->HasBytecodeArray()
+ ? broker->GetOrCreateData(object->GetBytecodeArray())
+ ->AsBytecodeArray()
: nullptr)
-#define INIT_MEMBER(type, name) , name(object_->name())
+#define INIT_MEMBER(type, name) , name##_(object->name())
BROKER_SFI_FIELDS(INIT_MEMBER)
#undef INIT_MEMBER
{
- DCHECK_EQ(HasBuiltinId, builtin_id != Builtins::kNoBuiltinId);
- DCHECK_EQ(HasBytecodeArray, GetBytecodeArray != nullptr);
+ DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
+ DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
}
+
+ private:
+ int const builtin_id_;
+ BytecodeArrayData* const GetBytecodeArray_;
+#define DECL_MEMBER(type, name) type const name##_;
+ BROKER_SFI_FIELDS(DECL_MEMBER)
+#undef DECL_MEMBER
};
class ModuleData : public HeapObjectData {
public:
- ModuleData(JSHeapBroker* broker_, Handle<Module> object_,
- HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ ModuleData(JSHeapBroker* broker, ObjectData** storage, Handle<Module> object);
+ void Serialize(JSHeapBroker* broker);
+
+ CellData* GetCell(int cell_index) const;
+
+ private:
+ bool serialized_ = false;
+ ZoneVector<CellData*> imports_;
+ ZoneVector<CellData*> exports_;
};
+ModuleData::ModuleData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<Module> object)
+ : HeapObjectData(broker, storage, object),
+ imports_(broker->zone()),
+ exports_(broker->zone()) {}
+
+CellData* ModuleData::GetCell(int cell_index) const {
+ CHECK(serialized_);
+ CellData* cell;
+ switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case ModuleDescriptor::kImport:
+ cell = imports_.at(Module::ImportIndex(cell_index));
+ break;
+ case ModuleDescriptor::kExport:
+ cell = exports_.at(Module::ExportIndex(cell_index));
+ break;
+ case ModuleDescriptor::kInvalid:
+ UNREACHABLE();
+ break;
+ }
+ CHECK_NOT_NULL(cell);
+ return cell;
+}
+
+void ModuleData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "ModuleData::Serialize");
+ Handle<Module> module = Handle<Module>::cast(object());
+
+ // TODO(neis): We could be smarter and only serialize the cells we care about.
+ // TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector.
+
+ DCHECK(imports_.empty());
+ Handle<FixedArray> imports(module->regular_imports(), broker->isolate());
+ int const imports_length = imports->length();
+ imports_.reserve(imports_length);
+ for (int i = 0; i < imports_length; ++i) {
+ imports_.push_back(broker->GetOrCreateData(imports->get(i))->AsCell());
+ }
+ broker->Trace("Copied %zu imports.\n", imports_.size());
+
+ DCHECK(exports_.empty());
+ Handle<FixedArray> exports(module->regular_exports(), broker->isolate());
+ int const exports_length = exports->length();
+ exports_.reserve(exports_length);
+ for (int i = 0; i < exports_length; ++i) {
+ exports_.push_back(broker->GetOrCreateData(exports->get(i))->AsCell());
+ }
+ broker->Trace("Copied %zu exports.\n", exports_.size());
+}
+
class CellData : public HeapObjectData {
public:
- CellData(JSHeapBroker* broker_, Handle<Cell> object_, HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
+ : HeapObjectData(broker, storage, object) {}
};
class JSGlobalProxyData : public JSObjectData {
public:
- JSGlobalProxyData(JSHeapBroker* broker_, Handle<JSGlobalProxy> object_,
- HeapObjectType type_)
- : JSObjectData(broker_, object_, type_) {}
+ JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSGlobalProxy> object)
+ : JSObjectData(broker, storage, object) {}
};
class CodeData : public HeapObjectData {
public:
- CodeData(JSHeapBroker* broker_, Handle<Code> object_, HeapObjectType type_)
- : HeapObjectData(broker_, object_, type_) {}
+ CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
+ : HeapObjectData(broker, storage, object) {}
};
-#define DEFINE_IS_AND_AS(Name) \
- bool ObjectData::Is##Name() const { \
- if (broker->mode() == JSHeapBroker::kDisabled) { \
- AllowHandleDereference allow_handle_dereference; \
- return object->Is##Name(); \
- } \
- if (is_smi) return false; \
- InstanceType instance_type = \
- static_cast<const HeapObjectData*>(this)->type.instance_type(); \
- return InstanceTypeChecker::Is##Name(instance_type); \
- } \
- Name##Data* ObjectData::As##Name() { \
- CHECK_NE(broker->mode(), JSHeapBroker::kDisabled); \
- CHECK(Is##Name()); \
- return static_cast<Name##Data*>(this); \
+#define DEFINE_IS_AND_AS(Name) \
+ bool ObjectData::Is##Name() const { \
+ if (kind() == kUnserializedHeapObject) { \
+ AllowHandleDereference allow_handle_dereference; \
+ return object()->Is##Name(); \
+ } \
+ if (is_smi()) return false; \
+ InstanceType instance_type = \
+ static_cast<const HeapObjectData*>(this)->map()->instance_type(); \
+ return InstanceTypeChecker::Is##Name(instance_type); \
+ } \
+ Name##Data* ObjectData::As##Name() { \
+ CHECK_EQ(kind(), kSerializedHeapObject); \
+ CHECK(Is##Name()); \
+ return static_cast<Name##Data*>(this); \
}
HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
-ObjectData* ObjectData::Serialize(JSHeapBroker* broker, Handle<Object> object) {
- CHECK(broker->SerializingAllowed());
- return object->IsSmi() ? new (broker->zone()) ObjectData(broker, object, true)
- : HeapObjectData::Serialize(
- broker, Handle<HeapObject>::cast(object));
+const JSObjectField& JSObjectData::GetInobjectField(int property_index) const {
+ CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
+ return inobject_fields_[property_index];
}
-HeapObjectData* HeapObjectData::Serialize(JSHeapBroker* broker,
- Handle<HeapObject> object) {
- CHECK(broker->SerializingAllowed());
- Handle<Map> map(object->map(), broker->isolate());
- HeapObjectType type = broker->HeapObjectTypeFromMap(map);
+bool JSObjectData::cow_or_empty_elements_tenured() const {
+ return cow_or_empty_elements_tenured_;
+}
+
+FixedArrayBaseData* JSObjectData::elements() const { return elements_; }
-#define RETURN_CREATE_DATA_IF_MATCH(name) \
- if (object->Is##name()) { \
- return new (broker->zone()) \
- name##Data(broker, Handle<name>::cast(object), type); \
+void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
+ SerializeRecursive(broker, kMaxFastLiteralDepth);
+}
+
+void JSObjectData::SerializeElements(JSHeapBroker* broker) {
+ if (serialized_elements_) return;
+ serialized_elements_ = true;
+
+ TraceScope tracer(broker, this, "JSObjectData::SerializeElements");
+ Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
+ Handle<FixedArrayBase> elements_object(boilerplate->elements(),
+ broker->isolate());
+ DCHECK_NULL(elements_);
+ elements_ = broker->GetOrCreateData(elements_object)->AsFixedArrayBase();
+}
+
+void MapData::SerializeConstructorOrBackpointer(JSHeapBroker* broker) {
+ if (serialized_constructor_or_backpointer_) return;
+ serialized_constructor_or_backpointer_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeConstructorOrBackpointer");
+ Handle<Map> map = Handle<Map>::cast(object());
+ DCHECK_NULL(constructor_or_backpointer_);
+ constructor_or_backpointer_ =
+ broker->GetOrCreateData(map->constructor_or_backpointer());
+}
+
+void MapData::SerializePrototype(JSHeapBroker* broker) {
+ if (serialized_prototype_) return;
+ serialized_prototype_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializePrototype");
+ Handle<Map> map = Handle<Map>::cast(object());
+ DCHECK_NULL(prototype_);
+ prototype_ = broker->GetOrCreateData(map->prototype());
+}
+
+void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
+ if (serialized_own_descriptors_) return;
+ serialized_own_descriptors_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptors");
+ Handle<Map> map = Handle<Map>::cast(object());
+
+ DCHECK_NULL(instance_descriptors_);
+ instance_descriptors_ =
+ broker->GetOrCreateData(map->instance_descriptors())->AsDescriptorArray();
+
+ int const number_of_own = map->NumberOfOwnDescriptors();
+ ZoneVector<PropertyDescriptor>& contents = instance_descriptors_->contents();
+ int const current_size = static_cast<int>(contents.size());
+ if (number_of_own <= current_size) return;
+
+ Isolate* const isolate = broker->isolate();
+ auto descriptors =
+ Handle<DescriptorArray>::cast(instance_descriptors_->object());
+ CHECK_EQ(*descriptors, map->instance_descriptors());
+ contents.reserve(number_of_own);
+
+ // Copy the new descriptors.
+ for (int i = current_size; i < number_of_own; ++i) {
+ PropertyDescriptor d;
+ d.key = broker->GetOrCreateData(descriptors->GetKey(i))->AsName();
+ d.details = descriptors->GetDetails(i);
+ if (d.details.location() == kField) {
+ d.field_index = FieldIndex::ForDescriptor(*map, i);
+ d.field_owner =
+ broker->GetOrCreateData(map->FindFieldOwner(isolate, i))->AsMap();
+ d.field_type = broker->GetOrCreateData(descriptors->GetFieldType(i));
+ d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
+ // Recurse.
+ }
+ contents.push_back(d);
+ }
+ CHECK_EQ(number_of_own, contents.size());
+
+ // Recurse on the new owner maps.
+ for (int i = current_size; i < number_of_own; ++i) {
+ const PropertyDescriptor& d = contents[i];
+ if (d.details.location() == kField) {
+ CHECK_LE(
+ Handle<Map>::cast(d.field_owner->object())->NumberOfOwnDescriptors(),
+ number_of_own);
+ d.field_owner->SerializeOwnDescriptors(broker);
+ }
}
- HEAP_BROKER_OBJECT_LIST(RETURN_CREATE_DATA_IF_MATCH)
-#undef RETURN_CREATE_DATA_IF_MATCH
- UNREACHABLE();
+
+ broker->Trace("Copied %zu descriptors into %p (%zu total).\n",
+ number_of_own - current_size, instance_descriptors_,
+ number_of_own);
}
-bool ObjectRef::equals(const ObjectRef& other) const {
- return data_ == other.data_;
+void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
+ if (serialized_as_boilerplate_) return;
+ serialized_as_boilerplate_ = true;
+
+ TraceScope tracer(broker, this, "JSObjectData::SerializeRecursive");
+ Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
+
+ // We only serialize boilerplates that pass the IsInlinableFastLiteral
+ // check, so we only do a sanity check on the depth here.
+ CHECK_GT(depth, 0);
+ CHECK(!boilerplate->map()->is_deprecated());
+
+ // Serialize the elements.
+ Isolate* const isolate = broker->isolate();
+ Handle<FixedArrayBase> elements_object(boilerplate->elements(), isolate);
+
+ // Boilerplates need special serialization - we need to make sure COW arrays
+ // are tenured. Boilerplate objects should only be reachable from their
+ // allocation site, so it is safe to assume that the elements have not been
+ // serialized yet.
+
+ bool const empty_or_cow =
+ elements_object->length() == 0 ||
+ elements_object->map() == ReadOnlyRoots(isolate).fixed_cow_array_map();
+ if (empty_or_cow) {
+ // We need to make sure copy-on-write elements are tenured.
+ if (Heap::InNewSpace(*elements_object)) {
+ elements_object = isolate->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(elements_object));
+ boilerplate->set_elements(*elements_object);
+ }
+ cow_or_empty_elements_tenured_ = true;
+ }
+
+ DCHECK_NULL(elements_);
+ elements_ = broker->GetOrCreateData(elements_object)->AsFixedArrayBase();
+
+ if (empty_or_cow) {
+ // No need to do anything here. Empty or copy-on-write elements
+ // do not need to be serialized because we only need to store the elements
+ // reference to the allocated object.
+ } else if (boilerplate->HasSmiOrObjectElements()) {
+ elements_->AsFixedArray()->SerializeContents(broker);
+ Handle<FixedArray> fast_elements =
+ Handle<FixedArray>::cast(elements_object);
+ int length = elements_object->length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value(fast_elements->get(i), isolate);
+ if (value->IsJSObject()) {
+ ObjectData* value_data = broker->GetOrCreateData(value);
+ value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ }
+ }
+ } else {
+ CHECK(boilerplate->HasDoubleElements());
+ CHECK_LE(elements_object->Size(), kMaxRegularHeapObjectSize);
+ elements_->AsFixedDoubleArray()->SerializeContents(broker);
+ }
+
+ // TODO(turbofan): Do we want to support out-of-object properties?
+ CHECK(boilerplate->HasFastProperties() &&
+ boilerplate->property_array()->length() == 0);
+ CHECK_EQ(inobject_fields_.size(), 0u);
+
+ // Check the in-object properties.
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors(), isolate);
+ int const limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+
+ FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+ // Make sure {field_index} agrees with {inobject_properties} on the index of
+ // this field.
+ DCHECK_EQ(field_index.property_index(),
+ static_cast<int>(inobject_fields_.size()));
+ if (boilerplate->IsUnboxedDoubleField(field_index)) {
+ double value = boilerplate->RawFastDoublePropertyAt(field_index);
+ inobject_fields_.push_back(JSObjectField{value});
+ } else {
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
+ isolate);
+ ObjectData* value_data = broker->GetOrCreateData(value);
+ if (value->IsJSObject()) {
+ value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ }
+ inobject_fields_.push_back(JSObjectField{value_data});
+ }
+ }
+ broker->Trace("Copied %zu in-object fields.\n", inobject_fields_.size());
+
+ map()->SerializeOwnDescriptors(broker);
+
+ if (IsJSArray()) AsJSArray()->Serialize(broker);
}
-StringRef ObjectRef::TypeOf() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- return StringRef(broker(),
- Object::TypeOf(broker()->isolate(), object<Object>()));
+void JSRegExpData::SerializeAsRegExpBoilerplate(JSHeapBroker* broker) {
+ if (serialized_as_reg_exp_boilerplate_) return;
+ serialized_as_reg_exp_boilerplate_ = true;
+
+ TraceScope tracer(broker, this, "JSRegExpData::SerializeAsRegExpBoilerplate");
+ Handle<JSRegExp> boilerplate = Handle<JSRegExp>::cast(object());
+
+ SerializeElements(broker);
+
+ raw_properties_or_hash_ =
+ broker->GetOrCreateData(boilerplate->raw_properties_or_hash());
+ data_ = broker->GetOrCreateData(boilerplate->data());
+ source_ = broker->GetOrCreateData(boilerplate->source());
+ flags_ = broker->GetOrCreateData(boilerplate->flags());
+ last_index_ = broker->GetOrCreateData(boilerplate->last_index());
+}
+
+bool ObjectRef::equals(const ObjectRef& other) const {
+ return data_ == other.data_;
}
Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
-base::Optional<ContextRef> ContextRef::previous() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Context* previous = object<Context>()->previous();
- if (previous == nullptr) return base::Optional<ContextRef>();
- return ContextRef(broker(), handle(previous, broker()->isolate()));
+ContextRef ContextRef::previous() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return ContextRef(
+ broker(), handle(object<Context>()->previous(), broker()->isolate()));
+ }
+ return ContextRef(broker(), data()->AsContext()->previous());
}
+// Not needed for TypedLowering.
ObjectRef ContextRef::get(int index) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
@@ -552,17 +1333,24 @@ ObjectRef ContextRef::get(int index) const {
return ObjectRef(broker(), value);
}
-JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* zone)
+JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone)
: isolate_(isolate),
- zone_(zone),
- refs_(zone),
- mode_(FLAG_concurrent_compiler_frontend ? kSerializing : kDisabled) {
- Trace("%s", "Constructing heap broker.\n");
+ broker_zone_(broker_zone),
+ current_zone_(broker_zone),
+ refs_(new (zone())
+ RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())) {
+ // Note that this initialization of the refs_ pointer with the minimal
+ // initial capacity is redundant in the normal use case (concurrent
+ // compilation enabled, standard objects to be serialized), as the map
+ // is going to be replaced immediatelly with a larger capacity one.
+ // It doesn't seem to affect the performance in a noticeable way though.
+ Trace("Constructing heap broker.\n");
}
void JSHeapBroker::Trace(const char* format, ...) const {
if (FLAG_trace_heap_broker) {
PrintF("[%p] ", this);
+ for (unsigned i = 0; i < tracing_indentation_; ++i) PrintF(" ");
va_list arguments;
va_start(arguments, format);
base::OS::VPrint(format, arguments);
@@ -570,135 +1358,241 @@ void JSHeapBroker::Trace(const char* format, ...) const {
}
}
+void JSHeapBroker::StartSerializing() {
+ CHECK_EQ(mode_, kDisabled);
+ Trace("Starting serialization.\n");
+ mode_ = kSerializing;
+ refs_->Clear();
+}
+
+void JSHeapBroker::StopSerializing() {
+ CHECK_EQ(mode_, kSerializing);
+ Trace("Stopping serialization.\n");
+ mode_ = kSerialized;
+}
+
+void JSHeapBroker::Retire() {
+ CHECK_EQ(mode_, kSerialized);
+ Trace("Retiring.\n");
+ mode_ = kRetired;
+}
+
bool JSHeapBroker::SerializingAllowed() const {
return mode() == kSerializing ||
(!FLAG_strict_heap_broker && mode() == kSerialized);
}
-void JSHeapBroker::SerializeStandardObjects() {
- Trace("Serializing standard objects.\n");
+void JSHeapBroker::SetNativeContextRef() {
+ native_context_ = NativeContextRef(this, isolate()->native_context());
+}
+
+bool IsShareable(Handle<Object> object, Isolate* isolate) {
+ Builtins* const b = isolate->builtins();
+
+ int index;
+ RootIndex root_index;
+ return (object->IsHeapObject() &&
+ b->IsBuiltinHandle(Handle<HeapObject>::cast(object), &index)) ||
+ isolate->heap()->IsRootHandle(object, &root_index);
+}
+
+void JSHeapBroker::SerializeShareableObjects() {
+ PerIsolateCompilerCache::Setup(isolate());
+ compiler_cache_ = isolate()->compiler_cache();
+
+ if (compiler_cache_->HasSnapshot()) {
+ RefsMap* snapshot = compiler_cache_->GetSnapshot();
+
+ refs_ = new (zone()) RefsMap(snapshot, zone());
+ return;
+ }
+
+ TraceScope tracer(
+ this, "JSHeapBroker::SerializeShareableObjects (building snapshot)");
+
+ refs_ =
+ new (zone()) RefsMap(kInitialRefsBucketCount, AddressMatcher(), zone());
+
+ current_zone_ = compiler_cache_->zone();
Builtins* const b = isolate()->builtins();
- Factory* const f = isolate()->factory();
+ {
+ Builtins::Name builtins[] = {
+ Builtins::kAllocateInNewSpace,
+ Builtins::kAllocateInOldSpace,
+ Builtins::kArgumentsAdaptorTrampoline,
+ Builtins::kArrayConstructorImpl,
+ Builtins::kCallFunctionForwardVarargs,
+ Builtins::kCallFunction_ReceiverIsAny,
+ Builtins::kCallFunction_ReceiverIsNotNullOrUndefined,
+ Builtins::kCallFunction_ReceiverIsNullOrUndefined,
+ Builtins::kConstructFunctionForwardVarargs,
+ Builtins::kForInFilter,
+ Builtins::kJSBuiltinsConstructStub,
+ Builtins::kJSConstructStubGeneric,
+ Builtins::kStringAdd_CheckNone,
+ Builtins::kStringAdd_ConvertLeft,
+ Builtins::kStringAdd_ConvertRight,
+ Builtins::kToNumber,
+ Builtins::kToObject,
+ };
+ for (auto id : builtins) {
+ GetOrCreateData(b->builtin_handle(id));
+ }
+ }
+ for (int32_t id = 0; id < Builtins::builtin_count; ++id) {
+ if (Builtins::KindOf(id) == Builtins::TFJ) {
+ GetOrCreateData(b->builtin_handle(id));
+ }
+ }
- // Stuff used by JSGraph:
- GetOrCreateData(f->empty_fixed_array());
+ for (RefsMap::Entry* p = refs_->Start(); p != nullptr; p = refs_->Next(p)) {
+ CHECK(IsShareable(p->value->object(), isolate()));
+ }
- // Stuff used by JSCreateLowering:
+ // TODO(mslekova):
+ // Serialize root objects (from factory).
+ compiler_cache()->SetSnapshot(refs_);
+ current_zone_ = broker_zone_;
+}
+
+void JSHeapBroker::SerializeStandardObjects() {
+ if (mode() == kDisabled) return;
+ CHECK_EQ(mode(), kSerializing);
+
+ SerializeShareableObjects();
+
+ TraceScope tracer(this, "JSHeapBroker::SerializeStandardObjects");
+
+ SetNativeContextRef();
+ native_context().Serialize();
+
+ Factory* const f = isolate()->factory();
+
+ // Maps, strings, oddballs
+ GetOrCreateData(f->arguments_marker_map());
+ GetOrCreateData(f->bigint_string());
GetOrCreateData(f->block_context_map());
+ GetOrCreateData(f->boolean_map());
+ GetOrCreateData(f->boolean_string());
GetOrCreateData(f->catch_context_map());
+ GetOrCreateData(f->empty_fixed_array());
+ GetOrCreateData(f->empty_string());
GetOrCreateData(f->eval_context_map());
+ GetOrCreateData(f->false_string());
+ GetOrCreateData(f->false_value());
GetOrCreateData(f->fixed_array_map());
+ GetOrCreateData(f->fixed_cow_array_map());
GetOrCreateData(f->fixed_double_array_map());
GetOrCreateData(f->function_context_map());
+ GetOrCreateData(f->function_string());
+ GetOrCreateData(f->heap_number_map());
+ GetOrCreateData(f->length_string());
GetOrCreateData(f->many_closures_cell_map());
+ GetOrCreateData(f->minus_zero_value());
GetOrCreateData(f->mutable_heap_number_map());
GetOrCreateData(f->name_dictionary_map());
+ GetOrCreateData(f->NaN_string());
+ GetOrCreateData(f->null_map());
+ GetOrCreateData(f->null_string());
+ GetOrCreateData(f->null_value());
+ GetOrCreateData(f->number_string());
+ GetOrCreateData(f->object_string());
GetOrCreateData(f->one_pointer_filler_map());
+ GetOrCreateData(f->optimized_out());
+ GetOrCreateData(f->optimized_out_map());
+ GetOrCreateData(f->property_array_map());
GetOrCreateData(f->sloppy_arguments_elements_map());
- GetOrCreateData(f->with_context_map());
-
- // Stuff used by TypedOptimization:
- // Strings produced by typeof:
- GetOrCreateData(f->boolean_string());
- GetOrCreateData(f->number_string());
+ GetOrCreateData(f->stale_register());
+ GetOrCreateData(f->stale_register_map());
GetOrCreateData(f->string_string());
- GetOrCreateData(f->bigint_string());
GetOrCreateData(f->symbol_string());
+ GetOrCreateData(f->termination_exception_map());
+ GetOrCreateData(f->the_hole_map());
+ GetOrCreateData(f->the_hole_value());
+ GetOrCreateData(f->true_string());
+ GetOrCreateData(f->true_value());
+ GetOrCreateData(f->undefined_map());
GetOrCreateData(f->undefined_string());
- GetOrCreateData(f->object_string());
- GetOrCreateData(f->function_string());
-
- // Stuff used by JSTypedLowering:
- GetOrCreateData(f->length_string());
- Builtins::Name builtins[] = {
- Builtins::kArgumentsAdaptorTrampoline,
- Builtins::kCallFunctionForwardVarargs,
- Builtins::kStringAdd_CheckNone_NotTenured,
- Builtins::kStringAdd_CheckNone_Tenured,
- Builtins::kStringAdd_ConvertLeft_NotTenured,
- Builtins::kStringAdd_ConvertRight_NotTenured,
- };
- for (auto id : builtins) {
- GetOrCreateData(b->builtin_handle(id));
- }
- for (int32_t id = 0; id < Builtins::builtin_count; ++id) {
- if (Builtins::KindOf(id) == Builtins::TFJ) {
- GetOrCreateData(b->builtin_handle(id));
- }
- }
+ GetOrCreateData(f->undefined_value());
+ GetOrCreateData(f->uninitialized_map());
+ GetOrCreateData(f->with_context_map());
+ GetOrCreateData(f->zero_string());
+
+ // Property cells
+ GetOrCreateData(f->array_buffer_neutering_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
+ GetOrCreateData(f->array_iterator_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
+ GetOrCreateData(f->array_species_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
+ GetOrCreateData(f->no_elements_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
+ GetOrCreateData(f->promise_hook_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
+ GetOrCreateData(f->promise_species_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
+ GetOrCreateData(f->promise_then_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
+
+ // CEntry stub
+ GetOrCreateData(
+ CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
Trace("Finished serializing standard objects.\n");
}
-HeapObjectType JSHeapBroker::HeapObjectTypeFromMap(Map* map) const {
- AllowHandleDereference allow_handle_dereference;
- OddballType oddball_type = OddballType::kNone;
- if (map->instance_type() == ODDBALL_TYPE) {
- ReadOnlyRoots roots(isolate_);
- if (map == roots.undefined_map()) {
- oddball_type = OddballType::kUndefined;
- } else if (map == roots.null_map()) {
- oddball_type = OddballType::kNull;
- } else if (map == roots.boolean_map()) {
- oddball_type = OddballType::kBoolean;
- } else if (map == roots.the_hole_map()) {
- oddball_type = OddballType::kHole;
- } else if (map == roots.uninitialized_map()) {
- oddball_type = OddballType::kUninitialized;
- } else {
- oddball_type = OddballType::kOther;
- DCHECK(map == roots.termination_exception_map() ||
- map == roots.arguments_marker_map() ||
- map == roots.optimized_out_map() ||
- map == roots.stale_register_map());
- }
- }
- HeapObjectType::Flags flags(0);
- if (map->is_undetectable()) flags |= HeapObjectType::kUndetectable;
- if (map->is_callable()) flags |= HeapObjectType::kCallable;
-
- return HeapObjectType(map->instance_type(), flags, oddball_type);
-}
-
ObjectData* JSHeapBroker::GetData(Handle<Object> object) const {
- auto it = refs_.find(object.address());
- return it != refs_.end() ? it->second : nullptr;
+ RefsMap::Entry* entry = refs_->Lookup(object.address());
+ return entry ? entry->value : nullptr;
}
+// clang-format off
ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
CHECK(SerializingAllowed());
- ObjectData* data = GetData(object);
- if (data == nullptr) {
+ RefsMap::Entry* entry = refs_->LookupOrInsert(object.address(), zone());
+ ObjectData** data_storage = &(entry->value);
+ if (*data_storage == nullptr) {
// TODO(neis): Remove these Allow* once we serialize everything upfront.
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- data = ObjectData::Serialize(this, object);
+ if (object->IsSmi()) {
+ new (zone()) ObjectData(this, data_storage, object, kSmi);
+#define CREATE_DATA_IF_MATCH(name) \
+ } else if (object->Is##name()) { \
+ new (zone()) name##Data(this, data_storage, Handle<name>::cast(object));
+ HEAP_BROKER_OBJECT_LIST(CREATE_DATA_IF_MATCH)
+#undef CREATE_DATA_IF_MATCH
+ } else {
+ UNREACHABLE();
+ }
}
- CHECK_NOT_NULL(data);
- return data;
+ CHECK_NOT_NULL(*data_storage);
+ return (*data_storage);
}
+// clang-format on
-void JSHeapBroker::AddData(Handle<Object> object, ObjectData* data) {
- Trace("Creating data %p for handle %" V8PRIuPTR " (", data, object.address());
- if (FLAG_trace_heap_broker) {
- object->ShortPrint();
- PrintF(")\n");
- }
- CHECK_NOT_NULL(isolate()->handle_scope_data()->canonical_scope);
- CHECK(refs_.insert({object.address(), data}).second);
+ObjectData* JSHeapBroker::GetOrCreateData(Object* object) {
+ return GetOrCreateData(handle(object, isolate()));
}
#define DEFINE_IS_AND_AS(Name) \
bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
Name##Ref ObjectRef::As##Name() const { \
DCHECK(Is##Name()); \
- return Name##Ref(data()); \
+ return Name##Ref(broker(), data()); \
}
HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
-bool ObjectRef::IsSmi() const { return data()->is_smi; }
+bool ObjectRef::IsSmi() const { return data()->is_smi(); }
int ObjectRef::AsSmi() const {
DCHECK(IsSmi());
@@ -706,25 +1600,22 @@ int ObjectRef::AsSmi() const {
return object<Smi>()->value();
}
-HeapObjectType HeapObjectRef::type() const {
+base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
- return broker()->HeapObjectTypeFromMap(object<HeapObject>()->map());
- } else {
- return data()->AsHeapObject()->type;
- }
-}
-
-base::Optional<MapRef> HeapObjectRef::TryGetObjectCreateMap() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- Handle<Map> instance_map;
- if (Map::TryGetObjectCreateMap(broker()->isolate(), object<HeapObject>())
- .ToHandle(&instance_map)) {
- return MapRef(broker(), instance_map);
- } else {
- return base::Optional<MapRef>();
+ AllowHeapAllocation heap_allocation;
+ Handle<Map> instance_map;
+ if (Map::TryGetObjectCreateMap(broker()->isolate(), object<HeapObject>())
+ .ToHandle(&instance_map)) {
+ return MapRef(broker(), instance_map);
+ } else {
+ return base::Optional<MapRef>();
+ }
}
+ MapData* map_data = data()->AsJSObject()->object_create_map();
+ return map_data != nullptr ? MapRef(broker(), map_data)
+ : base::Optional<MapRef>();
}
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
@@ -734,26 +1625,28 @@ base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
AllowHandleDereference allow_handle_dereference;
return MapRef(broker(), Map::AsElementsKind(broker()->isolate(),
object<Map>(), kind));
- } else {
- if (kind == elements_kind()) return *this;
- const ZoneVector<MapData*>& elements_kind_generalizations =
- data()->AsMap()->elements_kind_generalizations();
- for (auto data : elements_kind_generalizations) {
- MapRef map(data);
- if (map.elements_kind() == kind) return map;
- }
- return base::Optional<MapRef>();
}
+ if (kind == elements_kind()) return *this;
+ const ZoneVector<MapData*>& elements_kind_generalizations =
+ data()->AsMap()->elements_kind_generalizations();
+ for (auto data : elements_kind_generalizations) {
+ MapRef map(broker(), data);
+ if (map.elements_kind() == kind) return map;
+ }
+ return base::Optional<MapRef>();
}
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation handle_allocation;
-
- return object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
- broker()->isolate());
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation handle_allocation;
+ return object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
+ broker()->isolate());
+ }
+ return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
}
+// Not needed for TypedLowering.
base::Optional<ScriptContextTableRef::LookupResult>
ScriptContextTableRef::lookup(const NameRef& name) const {
AllowHandleAllocation handle_allocation;
@@ -773,127 +1666,192 @@ ScriptContextTableRef::lookup(const NameRef& name) const {
return result;
}
-OddballType ObjectRef::oddball_type() const {
- return IsSmi() ? OddballType::kNone : AsHeapObject().type().oddball_type();
+OddballType MapRef::oddball_type() const {
+ if (instance_type() != ODDBALL_TYPE) {
+ return OddballType::kNone;
+ }
+ Factory* f = broker()->isolate()->factory();
+ if (equals(MapRef(broker(), f->undefined_map()))) {
+ return OddballType::kUndefined;
+ }
+ if (equals(MapRef(broker(), f->null_map()))) {
+ return OddballType::kNull;
+ }
+ if (equals(MapRef(broker(), f->boolean_map()))) {
+ return OddballType::kBoolean;
+ }
+ if (equals(MapRef(broker(), f->the_hole_map()))) {
+ return OddballType::kHole;
+ }
+ if (equals(MapRef(broker(), f->uninitialized_map()))) {
+ return OddballType::kUninitialized;
+ }
+ DCHECK(equals(MapRef(broker(), f->termination_exception_map())) ||
+ equals(MapRef(broker(), f->arguments_marker_map())) ||
+ equals(MapRef(broker(), f->optimized_out_map())) ||
+ equals(MapRef(broker(), f->stale_register_map())));
+ return OddballType::kOther;
}
ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- Handle<Object> value(object<FeedbackVector>()->Get(slot)->ToObject(),
+ Handle<Object> value(object<FeedbackVector>()->Get(slot)->cast<Object>(),
broker()->isolate());
return ObjectRef(broker(), value);
}
int i = FeedbackVector::GetIndex(slot);
- return ObjectRef(data()->AsFeedbackVector()->feedback().at(i));
-}
-
-bool JSObjectRef::IsUnboxedDoubleField(FieldIndex index) const {
- AllowHandleDereference handle_dereference;
- return object<JSObject>()->IsUnboxedDoubleField(index);
+ return ObjectRef(broker(), data()->AsFeedbackVector()->feedback().at(i));
}
double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
- AllowHandleDereference handle_dereference;
- return object<JSObject>()->RawFastDoublePropertyAt(index);
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference handle_dereference;
+ return object<JSObject>()->RawFastDoublePropertyAt(index);
+ }
+ JSObjectData* object_data = data()->AsJSObject();
+ CHECK(index.is_inobject());
+ return object_data->GetInobjectField(index.property_index()).AsDouble();
}
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSObject>()->RawFastPropertyAt(index),
- broker()->isolate()));
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<JSObject>()->RawFastPropertyAt(index),
+ broker()->isolate()));
+ }
+ JSObjectData* object_data = data()->AsJSObject();
+ CHECK(index.is_inobject());
+ return ObjectRef(
+ broker(),
+ object_data->GetInobjectField(index.property_index()).AsObject());
}
-
bool AllocationSiteRef::IsFastLiteral() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
- AllowHeapAllocation
- allow_heap_allocation; // This is needed for TryMigrateInstance.
+ AllowHeapAllocation allow_heap_allocation; // For TryMigrateInstance.
AllowHandleAllocation allow_handle_allocation;
AllowHandleDereference allow_handle_dereference;
return IsInlinableFastLiteral(
handle(object<AllocationSite>()->boilerplate(), broker()->isolate()));
- } else {
- return data()->AsAllocationSite()->boilerplate != nullptr;
}
+ return data()->AsAllocationSite()->IsFastLiteral();
}
void JSObjectRef::EnsureElementsTenured() {
- // TODO(jarin) Eventually, we will pretenure the boilerplates before
- // the compilation job starts.
- AllowHandleAllocation allow_handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- AllowHeapAllocation allow_heap_allocation;
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ AllowHeapAllocation allow_heap_allocation;
- Handle<FixedArrayBase> object_elements = elements().object<FixedArrayBase>();
- if (Heap::InNewSpace(*object_elements)) {
- // If we would like to pretenure a fixed cow array, we must ensure that
- // the array is already in old space, otherwise we'll create too many
- // old-to-new-space pointers (overflowing the store buffer).
- object_elements =
- broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
- Handle<FixedArray>::cast(object_elements));
- object<JSObject>()->set_elements(*object_elements);
+ Handle<FixedArrayBase> object_elements =
+ elements().object<FixedArrayBase>();
+ if (Heap::InNewSpace(*object_elements)) {
+ // If we would like to pretenure a fixed cow array, we must ensure that
+ // the array is already in old space, otherwise we'll create too many
+ // old-to-new-space pointers (overflowing the store buffer).
+ object_elements =
+ broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(object_elements));
+ object<JSObject>()->set_elements(*object_elements);
+ }
+ return;
}
+ CHECK(data()->AsJSObject()->cow_or_empty_elements_tenured());
}
-FieldIndex MapRef::GetFieldIndexFor(int i) const {
- AllowHandleDereference allow_handle_dereference;
- return FieldIndex::ForDescriptor(*object<Map>(), i);
+FieldIndex MapRef::GetFieldIndexFor(int descriptor_index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return FieldIndex::ForDescriptor(*object<Map>(), descriptor_index);
+ }
+ DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
+ return descriptors->contents().at(descriptor_index).field_index;
}
int MapRef::GetInObjectPropertyOffset(int i) const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->GetInObjectPropertyOffset(i);
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->GetInObjectPropertyOffset(i);
+ }
+ return (GetInObjectPropertiesStartInWords() + i) * kPointerSize;
}
-PropertyDetails MapRef::GetPropertyDetails(int i) const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->instance_descriptors()->GetDetails(i);
+PropertyDetails MapRef::GetPropertyDetails(int descriptor_index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->instance_descriptors()->GetDetails(descriptor_index);
+ }
+ DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
+ return descriptors->contents().at(descriptor_index).details;
}
-NameRef MapRef::GetPropertyKey(int i) const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return NameRef(broker(),
- handle(object<Map>()->instance_descriptors()->GetKey(i),
- broker()->isolate()));
+NameRef MapRef::GetPropertyKey(int descriptor_index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return NameRef(
+ broker(),
+ handle(object<Map>()->instance_descriptors()->GetKey(descriptor_index),
+ broker()->isolate()));
+ }
+ DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
+ return NameRef(broker(), descriptors->contents().at(descriptor_index).key);
}
bool MapRef::IsFixedCowArrayMap() const {
- AllowHandleDereference allow_handle_dereference;
- return *object<Map>() ==
- ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map();
+ Handle<Map> fixed_cow_array_map =
+ ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map_handle();
+ return equals(MapRef(broker(), fixed_cow_array_map));
}
-MapRef MapRef::FindFieldOwner(int descriptor) const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- Handle<Map> owner(
- object<Map>()->FindFieldOwner(broker()->isolate(), descriptor),
- broker()->isolate());
- return MapRef(broker(), owner);
+MapRef MapRef::FindFieldOwner(int descriptor_index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<Map> owner(
+ object<Map>()->FindFieldOwner(broker()->isolate(), descriptor_index),
+ broker()->isolate());
+ return MapRef(broker(), owner);
+ }
+ DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
+ return MapRef(broker(),
+ descriptors->contents().at(descriptor_index).field_owner);
}
-ObjectRef MapRef::GetFieldType(int descriptor) const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- Handle<FieldType> field_type(
- object<Map>()->instance_descriptors()->GetFieldType(descriptor),
- broker()->isolate());
- return ObjectRef(broker(), field_type);
+ObjectRef MapRef::GetFieldType(int descriptor_index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<FieldType> field_type(
+ object<Map>()->instance_descriptors()->GetFieldType(descriptor_index),
+ broker()->isolate());
+ return ObjectRef(broker(), field_type);
+ }
+ DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
+ return ObjectRef(broker(),
+ descriptors->contents().at(descriptor_index).field_type);
+}
+
+bool MapRef::IsUnboxedDoubleField(int descriptor_index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Map>()->IsUnboxedDoubleField(
+ FieldIndex::ForDescriptor(*object<Map>(), descriptor_index));
+ }
+ DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
+ return descriptors->contents().at(descriptor_index).is_unboxed_double_field;
}
uint16_t StringRef::GetFirstChar() {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return object<String>()->Get(0);
- } else {
- return data()->AsString()->first_char;
}
+ return data()->AsString()->first_char();
}
base::Optional<double> StringRef::ToNumber() {
@@ -905,31 +1863,35 @@ base::Optional<double> StringRef::ToNumber() {
return StringToDouble(broker()->isolate(),
broker()->isolate()->unicode_cache(),
object<String>(), flags);
- } else {
- return data()->AsString()->to_number;
}
-}
-
-bool FixedArrayRef::is_the_hole(int i) const {
- AllowHandleDereference allow_handle_dereference;
- return object<FixedArray>()->is_the_hole(broker()->isolate(), i);
+ return data()->AsString()->to_number();
}
ObjectRef FixedArrayRef::get(int i) const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<FixedArray>()->get(i), broker()->isolate()));
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return ObjectRef(broker(),
+ handle(object<FixedArray>()->get(i), broker()->isolate()));
+ }
+ return ObjectRef(broker(), data()->AsFixedArray()->Get(i));
}
bool FixedDoubleArrayRef::is_the_hole(int i) const {
- AllowHandleDereference allow_handle_dereference;
- return object<FixedDoubleArray>()->is_the_hole(i);
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<FixedDoubleArray>()->is_the_hole(i);
+ }
+ return data()->AsFixedDoubleArray()->Get(i).is_hole_nan();
}
double FixedDoubleArrayRef::get_scalar(int i) const {
- AllowHandleDereference allow_handle_dereference;
- return object<FixedDoubleArray>()->get_scalar(i);
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<FixedDoubleArray>()->get_scalar(i);
+ }
+ CHECK(!data()->AsFixedDoubleArray()->Get(i).is_hole_nan());
+ return data()->AsFixedDoubleArray()->Get(i).get_scalar();
}
#define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \
@@ -939,54 +1901,34 @@ double FixedDoubleArrayRef::get_scalar(int i) const {
return object<holder>()->name(); \
}
-// Macros for definining a const getter that, depending on the broker mode,
-// either looks into the handle or into the serialized data. The first one is
-// used for the rare case of a XYZRef class that does not have a corresponding
-// XYZ class in objects.h. The second one is used otherwise.
-#define BIMODAL_ACCESSOR(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- if (broker()->mode() == JSHeapBroker::kDisabled) { \
- AllowHandleAllocation handle_allocation; \
- AllowHandleDereference allow_handle_dereference; \
- return result##Ref( \
- broker(), handle(object<holder>()->name(), broker()->isolate())); \
- } else { \
- return result##Ref(data()->As##holder()->name); \
- } \
- }
-
-// Like HANDLE_ACCESSOR except that the result type is not an XYZRef.
-#define BIMODAL_ACCESSOR_C(holder, result, name) \
- result holder##Ref::name() const { \
- IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
- return data()->As##holder()->name; \
- }
-
-// Like HANDLE_ACCESSOR_C but for BitFields.
-#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
- typename BitField::FieldType holder##Ref::name() const { \
- IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
- return BitField::decode(data()->As##holder()->field); \
- }
-
-// Macros for definining a const getter that always looks into the handle.
-// (These will go away once we serialize everything.) The first one is used for
-// the rare case of a XYZRef class that does not have a corresponding XYZ class
-// in objects.h. The second one is used otherwise.
-#define HANDLE_ACCESSOR(holder, result, name) \
- result##Ref holder##Ref::name() const { \
+#define IF_BROKER_DISABLED_ACCESS_HANDLE(holder, result, name) \
+ if (broker()->mode() == JSHeapBroker::kDisabled) { \
AllowHandleAllocation handle_allocation; \
AllowHandleDereference allow_handle_dereference; \
return result##Ref(broker(), \
handle(object<holder>()->name(), broker()->isolate())); \
}
-// Like HANDLE_ACCESSOR except that the result type is not an XYZRef.
-#define HANDLE_ACCESSOR_C(holder, result, name) \
- result holder##Ref::name() const { \
- AllowHandleAllocation handle_allocation; \
- AllowHandleDereference allow_handle_dereference; \
- return object<holder>()->name(); \
+// Macros for definining a const getter that, depending on the broker mode,
+// either looks into the handle or into the serialized data.
+#define BIMODAL_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ IF_BROKER_DISABLED_ACCESS_HANDLE(holder, result, name); \
+ return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
+ }
+
+// Like above except that the result type is not an XYZRef.
+#define BIMODAL_ACCESSOR_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
+ return ObjectRef::data()->As##holder()->name(); \
+ }
+
+// Like above but for BitFields.
+#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
+ typename BitField::FieldType holder##Ref::name() const { \
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
+ return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
@@ -997,59 +1939,39 @@ BIMODAL_ACCESSOR_C(AllocationSite, PretenureFlag, GetPretenureMode)
BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
-BIMODAL_ACCESSOR_C(FixedArrayBase, int, length)
-
BIMODAL_ACCESSOR(HeapObject, Map, map)
-HANDLE_ACCESSOR_C(HeapObject, bool, IsExternalString)
-HANDLE_ACCESSOR_C(HeapObject, bool, IsSeqString)
-
-HANDLE_ACCESSOR_C(HeapNumber, double, value)
-HANDLE_ACCESSOR(JSArray, Object, length)
+BIMODAL_ACCESSOR(JSArray, Object, length)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
+BIMODAL_ACCESSOR(JSFunction, JSGlobalProxy, global_proxy)
BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype)
-HANDLE_ACCESSOR_C(JSFunction, bool, IsConstructor)
-HANDLE_ACCESSOR(JSFunction, JSGlobalProxy, global_proxy)
-HANDLE_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
-
-HANDLE_ACCESSOR(JSObject, FixedArrayBase, elements)
-
-HANDLE_ACCESSOR(JSRegExp, Object, data)
-HANDLE_ACCESSOR(JSRegExp, Object, flags)
-HANDLE_ACCESSOR(JSRegExp, Object, last_index)
-HANDLE_ACCESSOR(JSRegExp, Object, raw_properties_or_hash)
-HANDLE_ACCESSOR(JSRegExp, Object, source)
+BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
+ Map::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
+BIMODAL_ACCESSOR_B(Map, bit_field, is_callable, Map::IsCallableBit)
+BIMODAL_ACCESSOR_B(Map, bit_field, is_constructor, Map::IsConstructorBit)
+BIMODAL_ACCESSOR_B(Map, bit_field, is_undetectable, Map::IsUndetectableBit)
BIMODAL_ACCESSOR_C(Map, int, instance_size)
-HANDLE_ACCESSOR_C(Map, bool, CanBeDeprecated)
-HANDLE_ACCESSOR_C(Map, bool, CanTransition)
-HANDLE_ACCESSOR_C(Map, bool, IsInobjectSlackTrackingInProgress)
-HANDLE_ACCESSOR_C(Map, bool, IsJSArrayMap)
-HANDLE_ACCESSOR_C(Map, bool, is_stable)
-HANDLE_ACCESSOR_C(Map, InstanceType, instance_type)
-HANDLE_ACCESSOR_C(Map, int, GetInObjectProperties)
-HANDLE_ACCESSOR_C(Map, int, GetInObjectPropertiesStartInWords)
-HANDLE_ACCESSOR_C(Map, int, NumberOfOwnDescriptors)
-HANDLE_ACCESSOR(Map, Object, constructor_or_backpointer)
-
-HANDLE_ACCESSOR_C(MutableHeapNumber, double, value)
+BIMODAL_ACCESSOR(Map, Object, prototype)
+BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type)
+BIMODAL_ACCESSOR(Map, Object, constructor_or_backpointer)
#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
BIMODAL_ACCESSOR(NativeContext, type, name)
BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
#undef DEF_NATIVE_CONTEXT_ACCESSOR
-HANDLE_ACCESSOR(PropertyCell, Object, value)
-HANDLE_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-
-HANDLE_ACCESSOR_C(ScopeInfo, int, ContextLength)
+BIMODAL_ACCESSOR(PropertyCell, Object, value)
+BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
BIMODAL_ACCESSOR(SharedFunctionInfo, BytecodeArray, GetBytecodeArray)
@@ -1060,17 +1982,61 @@ BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
BIMODAL_ACCESSOR_C(String, int, length)
-// TODO(neis): Provide StringShape() on StringRef.
+bool MapRef::IsInobjectSlackTrackingInProgress() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, IsInobjectSlackTrackingInProgress);
+ return Map::ConstructionCounterBits::decode(data()->AsMap()->bit_field3()) !=
+ Map::kNoSlackTracking;
+}
-bool JSFunctionRef::has_initial_map() const {
- IF_BROKER_DISABLED_ACCESS_HANDLE_C(JSFunction, has_initial_map);
- return data()->AsJSFunction()->initial_map != nullptr;
+bool MapRef::is_stable() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, is_stable);
+ return !Map::IsUnstableBit::decode(data()->AsMap()->bit_field3());
+}
+
+bool MapRef::CanBeDeprecated() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, CanBeDeprecated);
+ CHECK_GT(NumberOfOwnDescriptors(), 0);
+ return data()->AsMap()->can_be_deprecated();
+}
+
+bool MapRef::CanTransition() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, CanTransition);
+ return data()->AsMap()->can_transition();
+}
+
+int MapRef::GetInObjectPropertiesStartInWords() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, GetInObjectPropertiesStartInWords);
+ return data()->AsMap()->in_object_properties_start_in_words();
+}
+
+int MapRef::GetInObjectProperties() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, GetInObjectProperties);
+ return data()->AsMap()->in_object_properties();
+}
+
+int ScopeInfoRef::ContextLength() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(ScopeInfo, ContextLength);
+ return data()->AsScopeInfo()->context_length();
+}
+
+bool StringRef::IsExternalString() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(String, IsExternalString);
+ return data()->AsString()->is_external_string();
+}
+
+bool StringRef::IsSeqString() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(String, IsSeqString);
+ return data()->AsString()->is_seq_string();
}
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
- DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
- return get(index).AsMap();
+ DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return get(index).AsMap();
+ }
+ return MapRef(broker(), data()->AsNativeContext()->function_maps().at(
+ index - Context::FIRST_FUNCTION_MAP_INDEX));
}
MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
@@ -1092,13 +2058,16 @@ MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
}
}
-bool ObjectRef::BooleanValue() {
- AllowHandleDereference allow_handle_dereference;
- return object<Object>()->BooleanValue(broker()->isolate());
+bool ObjectRef::BooleanValue() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<Object>()->BooleanValue(broker()->isolate());
+ }
+ return IsSmi() ? (AsSmi() != 0) : data()->AsHeapObject()->boolean_value();
}
double ObjectRef::OddballToNumber() const {
- OddballType type = oddball_type();
+ OddballType type = AsHeapObject().map().oddball_type();
switch (type) {
case OddballType::kBoolean: {
@@ -1122,14 +2091,28 @@ double ObjectRef::OddballToNumber() const {
}
}
-CellRef ModuleRef::GetCell(int cell_index) {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return CellRef(broker(), handle(object<Module>()->GetCell(cell_index),
- broker()->isolate()));
+double HeapNumberRef::value() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(HeapNumber, value);
+ return data()->AsHeapNumber()->value();
+}
+
+double MutableHeapNumberRef::value() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(MutableHeapNumber, value);
+ return data()->AsMutableHeapNumber()->value();
}
-ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object) {
+CellRef ModuleRef::GetCell(int cell_index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return CellRef(broker(), handle(object<Module>()->GetCell(cell_index),
+ broker()->isolate()));
+ }
+ return CellRef(broker(), data()->AsModule()->GetCell(cell_index));
+}
+
+ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
+ : broker_(broker) {
switch (broker->mode()) {
case JSHeapBroker::kSerialized:
data_ = FLAG_strict_heap_broker ? broker->GetData(object)
@@ -1138,31 +2121,80 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object) {
case JSHeapBroker::kSerializing:
data_ = broker->GetOrCreateData(object);
break;
- case JSHeapBroker::kDisabled:
- data_ = broker->GetData(object);
- if (data_ == nullptr) {
+ case JSHeapBroker::kDisabled: {
+ RefsMap::Entry* entry =
+ broker->refs_->LookupOrInsert(object.address(), broker->zone());
+ ObjectData** storage = &(entry->value);
+ if (*storage == nullptr) {
AllowHandleDereference handle_dereference;
- data_ =
- new (broker->zone()) ObjectData(broker, object, object->IsSmi());
+ entry->value = new (broker->zone())
+ ObjectData(broker, storage, object,
+ object->IsSmi() ? kSmi : kUnserializedHeapObject);
}
+ data_ = *storage;
break;
+ }
+ case JSHeapBroker::kRetired:
+ UNREACHABLE();
}
CHECK_NOT_NULL(data_);
}
+namespace {
+OddballType GetOddballType(Isolate* isolate, Map* map) {
+ if (map->instance_type() != ODDBALL_TYPE) {
+ return OddballType::kNone;
+ }
+ ReadOnlyRoots roots(isolate);
+ if (map == roots.undefined_map()) {
+ return OddballType::kUndefined;
+ }
+ if (map == roots.null_map()) {
+ return OddballType::kNull;
+ }
+ if (map == roots.boolean_map()) {
+ return OddballType::kBoolean;
+ }
+ if (map == roots.the_hole_map()) {
+ return OddballType::kHole;
+ }
+ if (map == roots.uninitialized_map()) {
+ return OddballType::kUninitialized;
+ }
+ DCHECK(map == roots.termination_exception_map() ||
+ map == roots.arguments_marker_map() ||
+ map == roots.optimized_out_map() || map == roots.stale_register_map());
+ return OddballType::kOther;
+}
+} // namespace
+
+HeapObjectType HeapObjectRef::GetHeapObjectType() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference handle_dereference;
+ Map* map = Handle<HeapObject>::cast(object())->map();
+ HeapObjectType::Flags flags(0);
+ if (map->is_undetectable()) flags |= HeapObjectType::kUndetectable;
+ if (map->is_callable()) flags |= HeapObjectType::kCallable;
+ return HeapObjectType(map->instance_type(), flags,
+ GetOddballType(broker()->isolate(), map));
+ }
+ HeapObjectType::Flags flags(0);
+ if (map().is_undetectable()) flags |= HeapObjectType::kUndetectable;
+ if (map().is_callable()) flags |= HeapObjectType::kCallable;
+ return HeapObjectType(map().instance_type(), flags, map().oddball_type());
+}
base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
return JSObjectRef(broker(), handle(object<AllocationSite>()->boilerplate(),
broker()->isolate()));
+ }
+ JSObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
+ if (boilerplate) {
+ return JSObjectRef(broker(), boilerplate);
} else {
- JSObjectData* boilerplate = data()->AsAllocationSite()->boilerplate;
- if (boilerplate) {
- return JSObjectRef(boilerplate);
- } else {
- return base::nullopt;
- }
+ return base::nullopt;
}
}
@@ -1170,11 +2202,79 @@ ElementsKind JSObjectRef::GetElementsKind() const {
return map().elements_kind();
}
-Handle<Object> ObjectRef::object() const { return data_->object; }
+FixedArrayBaseRef JSObjectRef::elements() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return FixedArrayBaseRef(
+ broker(), handle(object<JSObject>()->elements(), broker()->isolate()));
+ }
+ return FixedArrayBaseRef(broker(), data()->AsJSObject()->elements());
+}
-JSHeapBroker* ObjectRef::broker() const { return data_->broker; }
+int FixedArrayBaseRef::length() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(FixedArrayBase, length);
+ return data()->AsFixedArrayBase()->length();
+}
-ObjectData* ObjectRef::data() const { return data_; }
+ObjectData* FixedArrayData::Get(int i) const {
+ CHECK_LT(i, static_cast<int>(contents_.size()));
+ CHECK_NOT_NULL(contents_[i]);
+ return contents_[i];
+}
+
+Float64 FixedDoubleArrayData::Get(int i) const {
+ CHECK_LT(i, static_cast<int>(contents_.size()));
+ return contents_[i];
+}
+
+void FeedbackVectorRef::SerializeSlots() {
+ data()->AsFeedbackVector()->SerializeSlots(broker());
+}
+
+ObjectRef JSRegExpRef::data() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE(JSRegExp, Object, data);
+ return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->data());
+}
+
+ObjectRef JSRegExpRef::flags() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE(JSRegExp, Object, flags);
+ return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->flags());
+}
+
+ObjectRef JSRegExpRef::last_index() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE(JSRegExp, Object, last_index);
+ return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->last_index());
+}
+
+ObjectRef JSRegExpRef::raw_properties_or_hash() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE(JSRegExp, Object, raw_properties_or_hash);
+ return ObjectRef(broker(),
+ ObjectRef::data()->AsJSRegExp()->raw_properties_or_hash());
+}
+
+ObjectRef JSRegExpRef::source() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE(JSRegExp, Object, source);
+ return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source());
+}
+
+Handle<Object> ObjectRef::object() const { return data_->object(); }
+
+JSHeapBroker* ObjectRef::broker() const { return broker_; }
+
+ObjectData* ObjectRef::data() const {
+ switch (broker()->mode()) {
+ case JSHeapBroker::kDisabled:
+ CHECK_NE(data_->kind(), kSerializedHeapObject);
+ return data_;
+ case JSHeapBroker::kSerializing:
+ case JSHeapBroker::kSerialized:
+ CHECK_NE(data_->kind(), kUnserializedHeapObject);
+ return data_;
+ case JSHeapBroker::kRetired:
+ UNREACHABLE();
+ }
+}
Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
const char* function, int line) {
@@ -1185,12 +2285,76 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
return AdvancedReducer::NoChange();
}
+NativeContextData::NativeContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<NativeContext> object)
+ : ContextData(broker, storage, object), function_maps_(broker->zone()) {}
+
+void NativeContextData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "NativeContextData::Serialize");
+ Handle<NativeContext> context = Handle<NativeContext>::cast(object());
+
+#define SERIALIZE_MEMBER(type, name) \
+ DCHECK_NULL(name##_); \
+ name##_ = broker->GetOrCreateData(context->name())->As##type(); \
+ if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker);
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ if (!broker->isolate()->bootstrapper()->IsActive()) {
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ }
+#undef SERIALIZE_MEMBER
+
+ DCHECK(function_maps_.empty());
+ int const first = Context::FIRST_FUNCTION_MAP_INDEX;
+ int const last = Context::LAST_FUNCTION_MAP_INDEX;
+ function_maps_.reserve(last + 1 - first);
+ for (int i = first; i <= last; ++i) {
+ function_maps_.push_back(broker->GetOrCreateData(context->get(i))->AsMap());
+ }
+}
+
+void JSFunctionRef::Serialize() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSFunction()->Serialize(broker());
+}
+
+void JSObjectRef::SerializeObjectCreateMap() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSObject()->SerializeObjectCreateMap(broker());
+}
+
+void MapRef::SerializeOwnDescriptors() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializeOwnDescriptors(broker());
+}
+
+void ModuleRef::Serialize() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsModule()->Serialize(broker());
+}
+
+void ContextRef::Serialize() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsContext()->Serialize(broker());
+}
+
+void NativeContextRef::Serialize() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsNativeContext()->Serialize(broker());
+}
+
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
-#undef GET_OR_CREATE
-#undef HANDLE_ACCESSOR
-#undef HANDLE_ACCESSOR_C
+#undef IF_BROKER_DISABLED_ACCESS_HANDLE
#undef IF_BROKER_DISABLED_ACCESS_HANDLE_C
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 7ea12ee733..89f3ee871e 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -7,8 +7,10 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
+#include "src/compiler/refs-map.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/builtin-function-id.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -25,35 +27,6 @@ enum class OddballType : uint8_t {
kOther // Oddball, but none of the above.
};
-// TODO(neis): Get rid of the HeapObjectType class.
-class HeapObjectType {
- public:
- enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
-
- typedef base::Flags<Flag> Flags;
-
- HeapObjectType(InstanceType instance_type, Flags flags,
- OddballType oddball_type)
- : instance_type_(instance_type),
- oddball_type_(oddball_type),
- flags_(flags) {
- DCHECK_EQ(instance_type == ODDBALL_TYPE,
- oddball_type != OddballType::kNone);
- }
-
- OddballType oddball_type() const { return oddball_type_; }
- InstanceType instance_type() const { return instance_type_; }
- Flags flags() const { return flags_; }
-
- bool is_callable() const { return flags_ & kCallable; }
- bool is_undetectable() const { return flags_ & kUndetectable; }
-
- private:
- InstanceType const instance_type_;
- OddballType const oddball_type_;
- Flags const flags_;
-};
-
// This list is sorted such that subtypes appear before their supertypes.
// DO NOT VIOLATE THIS PROPERTY!
#define HEAP_BROKER_OBJECT_LIST(V) \
@@ -64,6 +37,10 @@ class HeapObjectType {
V(JSRegExp) \
/* Subtypes of Context */ \
V(NativeContext) \
+ /* Subtypes of FixedArray */ \
+ V(Context) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
/* Subtypes of FixedArrayBase */ \
V(BytecodeArray) \
V(FixedArray) \
@@ -75,19 +52,17 @@ class HeapObjectType {
V(AllocationSite) \
V(Cell) \
V(Code) \
+ V(DescriptorArray) \
V(FeedbackVector) \
- V(Map) \
- V(Module) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- V(SharedFunctionInfo) \
- V(Context) \
V(FixedArrayBase) \
V(HeapNumber) \
V(JSObject) \
+ V(Map) \
+ V(Module) \
V(MutableHeapNumber) \
V(Name) \
V(PropertyCell) \
+ V(SharedFunctionInfo) \
/* Subtypes of Object */ \
V(HeapObject)
@@ -101,7 +76,10 @@ HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
class ObjectRef {
public:
ObjectRef(JSHeapBroker* broker, Handle<Object> object);
- explicit ObjectRef(ObjectData* data) : data_(data) { CHECK_NOT_NULL(data_); }
+ ObjectRef(JSHeapBroker* broker, ObjectData* data)
+ : broker_(broker), data_(data) {
+ CHECK_NOT_NULL(data_);
+ }
bool equals(const ObjectRef& other) const;
@@ -113,8 +91,6 @@ class ObjectRef {
return Handle<T>::cast(object());
}
- OddballType oddball_type() const;
-
bool IsSmi() const;
int AsSmi() const;
@@ -126,8 +102,7 @@ class ObjectRef {
HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
- StringRef TypeOf() const;
- bool BooleanValue();
+ bool BooleanValue() const;
double OddballToNumber() const;
Isolate* isolate() const;
@@ -137,54 +112,94 @@ class ObjectRef {
ObjectData* data() const;
private:
+ JSHeapBroker* broker_;
ObjectData* data_;
};
+// Temporary class that carries information from a Map. We'd like to remove
+// this class and use MapRef instead, but we can't as long as we support the
+// kDisabled broker mode. That's because obtaining the MapRef via
+// HeapObjectRef::map() requires a HandleScope when the broker is disabled.
+// During OptimizeGraph we generally don't have a HandleScope, however. There
+// are two places where we therefore use GetHeapObjectType() instead. Both that
+// function and this class should eventually be removed.
+class HeapObjectType {
+ public:
+ enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
+
+ typedef base::Flags<Flag> Flags;
+
+ HeapObjectType(InstanceType instance_type, Flags flags,
+ OddballType oddball_type)
+ : instance_type_(instance_type),
+ oddball_type_(oddball_type),
+ flags_(flags) {
+ DCHECK_EQ(instance_type == ODDBALL_TYPE,
+ oddball_type != OddballType::kNone);
+ }
+
+ OddballType oddball_type() const { return oddball_type_; }
+ InstanceType instance_type() const { return instance_type_; }
+ Flags flags() const { return flags_; }
+
+ bool is_callable() const { return flags_ & kCallable; }
+ bool is_undetectable() const { return flags_ & kUndetectable; }
+
+ private:
+ InstanceType const instance_type_;
+ OddballType const oddball_type_;
+ Flags const flags_;
+};
+
class HeapObjectRef : public ObjectRef {
public:
using ObjectRef::ObjectRef;
- HeapObjectType type() const;
MapRef map() const;
- base::Optional<MapRef> TryGetObjectCreateMap() const;
- bool IsSeqString() const;
- bool IsExternalString() const;
+
+ // See the comment on the HeapObjectType class.
+ HeapObjectType GetHeapObjectType() const;
};
class PropertyCellRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- ObjectRef value() const;
PropertyDetails property_details() const;
+ ObjectRef value() const;
};
class JSObjectRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- bool IsUnboxedDoubleField(FieldIndex index) const;
double RawFastDoublePropertyAt(FieldIndex index) const;
ObjectRef RawFastPropertyAt(FieldIndex index) const;
FixedArrayBaseRef elements() const;
void EnsureElementsTenured();
ElementsKind GetElementsKind() const;
+
+ void SerializeObjectCreateMap();
+ base::Optional<MapRef> GetObjectCreateMap() const;
};
class JSFunctionRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
- bool IsConstructor() const;
bool has_initial_map() const;
- MapRef initial_map() const;
bool has_prototype() const;
- ObjectRef prototype() const;
bool PrototypeRequiresRuntimeLookup() const;
+
+ void Serialize();
+
+ // The following are available only after calling Serialize().
+ ObjectRef prototype() const;
+ MapRef initial_map() const;
JSGlobalProxyRef global_proxy() const;
- int InitialMapInstanceSizeWithMinSlack() const;
SharedFunctionInfoRef shared() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
};
class JSRegExpRef : public JSObjectRef {
@@ -215,37 +230,48 @@ class MutableHeapNumberRef : public HeapObjectRef {
class ContextRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ void Serialize();
- base::Optional<ContextRef> previous() const;
+ ContextRef previous() const;
ObjectRef get(int index) const;
};
-#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
- V(JSFunction, array_function) \
- V(JSFunction, object_function) \
- V(JSFunction, promise_function) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, iterator_result_map) \
- V(Map, js_array_holey_double_elements_map) \
- V(Map, js_array_holey_elements_map) \
- V(Map, js_array_holey_smi_elements_map) \
- V(Map, js_array_packed_double_elements_map) \
- V(Map, js_array_packed_elements_map) \
- V(Map, js_array_packed_smi_elements_map) \
- V(Map, map_key_iterator_map) \
- V(Map, map_key_value_iterator_map) \
- V(Map, map_value_iterator_map) \
- V(Map, set_key_value_iterator_map) \
- V(Map, set_value_iterator_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, strict_arguments_map) \
- V(Map, string_iterator_map) \
+#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
V(ScriptContextTable, script_context_table)
+// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
+// happened when Turbofan is invoked via --always-opt.
+#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(Map, set_key_value_iterator_map) \
+ V(Map, set_value_iterator_map)
+
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
+
class NativeContextRef : public ContextRef {
public:
using ContextRef::ContextRef;
+ void Serialize();
#define DECL_ACCESSOR(type, name) type##Ref name() const;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
@@ -273,11 +299,18 @@ class ScriptContextTableRef : public HeapObjectRef {
base::Optional<LookupResult> lookup(const NameRef& name) const;
};
+class DescriptorArrayRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+};
+
class FeedbackVectorRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
ObjectRef get(FeedbackSlot slot) const;
+
+ void SerializeSlots();
};
class AllocationSiteRef : public HeapObjectRef {
@@ -313,25 +346,32 @@ class MapRef : public HeapObjectRef {
int GetInObjectPropertyOffset(int index) const;
ElementsKind elements_kind() const;
bool is_stable() const;
+ bool is_constructor() const;
bool has_prototype_slot() const;
bool is_deprecated() const;
bool CanBeDeprecated() const;
bool CanTransition() const;
bool IsInobjectSlackTrackingInProgress() const;
bool is_dictionary_map() const;
- bool IsJSArrayMap() const;
bool IsFixedCowArrayMap() const;
+ bool is_undetectable() const;
+ bool is_callable() const;
ObjectRef constructor_or_backpointer() const;
+ ObjectRef prototype() const;
+
+ OddballType oddball_type() const;
base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
// Concerning the underlying instance_descriptors:
- MapRef FindFieldOwner(int descriptor) const;
- PropertyDetails GetPropertyDetails(int i) const;
- NameRef GetPropertyKey(int i) const;
- FieldIndex GetFieldIndexFor(int i) const;
- ObjectRef GetFieldType(int descriptor) const;
+ void SerializeOwnDescriptors();
+ MapRef FindFieldOwner(int descriptor_index) const;
+ PropertyDetails GetPropertyDetails(int descriptor_index) const;
+ NameRef GetPropertyKey(int descriptor_index) const;
+ FieldIndex GetFieldIndexFor(int descriptor_index) const;
+ ObjectRef GetFieldType(int descriptor_index) const;
+ bool IsUnboxedDoubleField(int descriptor_index) const;
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -346,7 +386,6 @@ class FixedArrayRef : public FixedArrayBaseRef {
using FixedArrayBaseRef::FixedArrayBaseRef;
ObjectRef get(int i) const;
- bool is_the_hole(int i) const;
};
class FixedDoubleArrayRef : public FixedArrayBaseRef {
@@ -400,7 +439,7 @@ class SharedFunctionInfoRef : public HeapObjectRef {
BytecodeArrayRef GetBytecodeArray() const;
#define DECL_ACCESSOR(type, name) type name() const;
BROKER_SFI_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCSESOR
+#undef DECL_ACCESSOR
};
class StringRef : public NameRef {
@@ -410,13 +449,17 @@ class StringRef : public NameRef {
int length() const;
uint16_t GetFirstChar();
base::Optional<double> ToNumber();
+ bool IsSeqString() const;
+ bool IsExternalString() const;
};
class ModuleRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- CellRef GetCell(int cell_index);
+ void Serialize();
+
+ CellRef GetCell(int cell_index) const;
};
class CellRef : public HeapObjectRef {
@@ -439,48 +482,56 @@ class InternalizedStringRef : public StringRef {
using StringRef::StringRef;
};
+class PerIsolateCompilerCache;
+
class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
public:
- JSHeapBroker(Isolate* isolate, Zone* zone);
+ JSHeapBroker(Isolate* isolate, Zone* broker_zone);
+ void SetNativeContextRef();
void SerializeStandardObjects();
- HeapObjectType HeapObjectTypeFromMap(Handle<Map> map) const {
- AllowHandleDereference handle_dereference;
- return HeapObjectTypeFromMap(*map);
- }
-
Isolate* isolate() const { return isolate_; }
- Zone* zone() const { return zone_; }
+ Zone* zone() const { return current_zone_; }
+ NativeContextRef native_context() const { return native_context_.value(); }
+ PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
- enum BrokerMode { kDisabled, kSerializing, kSerialized };
+ enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
BrokerMode mode() const { return mode_; }
- void StopSerializing() {
- CHECK_EQ(mode_, kSerializing);
- mode_ = kSerialized;
- }
+ void StartSerializing();
+ void StopSerializing();
+ void Retire();
bool SerializingAllowed() const;
// Returns nullptr iff handle unknown.
ObjectData* GetData(Handle<Object>) const;
// Never returns nullptr.
ObjectData* GetOrCreateData(Handle<Object>);
+ // Like the previous but wraps argument in handle first (for convenience).
+ ObjectData* GetOrCreateData(Object*);
void Trace(const char* format, ...) const;
+ void IncrementTracingIndentation();
+ void DecrementTracingIndentation();
private:
friend class HeapObjectRef;
friend class ObjectRef;
friend class ObjectData;
- // TODO(neis): Remove eventually.
- HeapObjectType HeapObjectTypeFromMap(Map* map) const;
-
- void AddData(Handle<Object> object, ObjectData* data);
+ void SerializeShareableObjects();
Isolate* const isolate_;
- Zone* const zone_;
- ZoneUnorderedMap<Address, ObjectData*> refs_;
- BrokerMode mode_;
+ Zone* const broker_zone_;
+ Zone* current_zone_;
+ base::Optional<NativeContextRef> native_context_;
+ RefsMap* refs_;
+
+ BrokerMode mode_ = kDisabled;
+ unsigned tracing_indentation_ = 0;
+ PerIsolateCompilerCache* compiler_cache_;
+
+ static const size_t kMinimalRefsBucketCount = 8; // must be power of 2
+ static const size_t kInitialRefsBucketCount = 1024; // must be power of 2
};
#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 0bcc662771..ca510d5054 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -7,6 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
#include "src/heap/factory-inl.h"
#include "src/objects/map.h"
#include "src/objects/scope-info.h"
@@ -25,7 +26,11 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- ObjectRef(broker(), HeapConstantOf(node->op()));
+ ObjectRef object(broker(), HeapConstantOf(node->op()));
+ if (object.IsJSFunction()) object.AsJSFunction().Serialize();
+ if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap();
+ if (object.IsModule()) object.AsModule().Serialize();
+ if (object.IsContext()) object.AsContext().Serialize();
break;
}
case IrOpcode::kJSCreateArray: {
@@ -34,6 +39,23 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
break;
}
+ case IrOpcode::kJSCreateArguments: {
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+ SharedFunctionInfoRef shared(broker(),
+ state_info.shared_info().ToHandleChecked());
+ break;
+ }
+ case IrOpcode::kJSCreateBlockContext: {
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+ break;
+ }
+ case IrOpcode::kJSCreateBoundFunction: {
+ CreateBoundFunctionParameters const& p =
+ CreateBoundFunctionParametersOf(node->op());
+ MapRef(broker(), p.map());
+ break;
+ }
case IrOpcode::kJSCreateCatchContext: {
ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
@@ -46,14 +68,8 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kJSCreateEmptyLiteralArray: {
- // TODO(neis, jarin) Force serialization of the entire feedback vector
- // rather than just the one element.
FeedbackParameter const& p = FeedbackParameterOf(node->op());
- FeedbackVectorRef(broker(), p.feedback().vector());
- Handle<Object> feedback(
- p.feedback().vector()->Get(p.feedback().slot())->ToObject(),
- broker()->isolate());
- ObjectRef(broker(), feedback);
+ FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots();
break;
}
case IrOpcode::kJSCreateFunctionContext: {
@@ -65,7 +81,16 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject: {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- ObjectRef(broker(), p.feedback().vector());
+ FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots();
+ break;
+ }
+ case IrOpcode::kJSCreateLiteralRegExp: {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ FeedbackVectorRef(broker(), p.feedback().vector()).SerializeSlots();
+ break;
+ }
+ case IrOpcode::kJSCreateWithContext: {
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSLoadNamed:
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.h b/deps/v8/src/compiler/js-heap-copy-reducer.h
index b94b930d78..1041a00fab 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.h
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.h
@@ -17,7 +17,7 @@ class JSHeapBroker;
// by handles embedded in the graph is copied to the heap broker.
// TODO(jarin) This is just a temporary solution until the graph uses only
// ObjetRef-derived reference to refer to the heap data.
-class JSHeapCopyReducer : public Reducer {
+class V8_EXPORT_PRIVATE JSHeapCopyReducer : public Reducer {
public:
explicit JSHeapCopyReducer(JSHeapBroker* broker);
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 2f31772883..3c5b1b8046 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -234,7 +234,8 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count,
BailoutId bailout_id,
FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared) {
+ Handle<SharedFunctionInfo> shared,
+ Node* context) {
const FrameStateFunctionInfo* state_info =
common()->CreateFrameStateFunctionInfo(frame_state_type,
parameter_count + 1, 0, shared);
@@ -251,9 +252,11 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
static_cast<int>(params.size()), SparseInputMask::Dense());
Node* params_node = graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
- return graph()->NewNode(op, params_node, node0, node0,
- jsgraph()->UndefinedConstant(), node->InputAt(0),
- outer_frame_state);
+ if (!context) {
+ context = jsgraph()->UndefinedConstant();
+ }
+ return graph()->NewNode(op, params_node, node0, node0, context,
+ node->InputAt(0), outer_frame_state);
}
namespace {
@@ -536,14 +539,14 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// instantiation but before the invocation (i.e. inside {JSConstructStub}
// where execution continues at {construct_stub_create_deopt_pc_offset}).
Node* receiver = jsgraph()->TheHoleConstant(); // Implicit receiver.
+ Node* context = NodeProperties::GetContextInput(node);
if (NeedsImplicitReceiver(shared_info)) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
Node* frame_state_inside = CreateArtificialFrameState(
node, frame_state, call.formal_arguments(),
BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
- shared_info);
+ shared_info, context);
Node* create =
graph()->NewNode(javascript()->Create(), call.target(), new_target,
context, frame_state_inside, effect, control);
@@ -595,10 +598,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
node->ReplaceInput(1, receiver);
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
- frame_state =
- CreateArtificialFrameState(node, frame_state, call.formal_arguments(),
- BailoutId::ConstructStubInvoke(),
- FrameStateType::kConstructStub, shared_info);
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
+ shared_info, context);
}
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 1c7ee6c0b8..baca345f27 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -63,7 +63,8 @@ class JSInliner final : public AdvancedReducer {
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared);
+ Handle<SharedFunctionInfo> shared,
+ Node* context = nullptr);
Reduction InlineCall(Node* call, Node* new_target, Node* context,
Node* frame_state, Node* start, Node* end,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 194e876849..b132cfa6e9 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -32,16 +32,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
switch (f->function_id) {
case Runtime::kInlineCreateIterResultObject:
return ReduceCreateIterResultObject(node);
- case Runtime::kInlineDebugIsActive:
- return ReduceDebugIsActive(node);
case Runtime::kInlineDeoptimizeNow:
return ReduceDeoptimizeNow(node);
case Runtime::kInlineGeneratorClose:
return ReduceGeneratorClose(node);
case Runtime::kInlineCreateJSGeneratorObject:
return ReduceCreateJSGeneratorObject(node);
- case Runtime::kInlineGeneratorGetInputOrDebugPos:
- return ReduceGeneratorGetInputOrDebugPos(node);
case Runtime::kInlineAsyncGeneratorReject:
return ReduceAsyncGeneratorReject(node);
case Runtime::kInlineAsyncGeneratorResolve:
@@ -54,8 +50,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
- case Runtime::kInlineIsJSProxy:
- return ReduceIsInstanceType(node, JS_PROXY_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -64,12 +58,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceRejectPromise(node);
case Runtime::kInlineResolvePromise:
return ReduceResolvePromise(node);
- case Runtime::kInlineToInteger:
- return ReduceToInteger(node);
case Runtime::kInlineToLength:
return ReduceToLength(node);
- case Runtime::kInlineToNumber:
- return ReduceToNumber(node);
case Runtime::kInlineToObject:
return ReduceToObject(node);
case Runtime::kInlineToString:
@@ -92,16 +82,6 @@ Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
context, effect);
}
-Reduction JSIntrinsicLowering::ReduceDebugIsActive(Node* node) {
- Node* const value = jsgraph()->ExternalConstant(
- ExternalReference::debug_is_active_address(isolate()));
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op =
- simplified()->LoadField(AccessBuilder::ForExternalUint8Value());
- return Change(node, op, value, effect, control);
-}
-
Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
Node* const frame_state = NodeProperties::GetFrameStateInput(node);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -147,16 +127,6 @@ Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
return Change(node, op, generator, closed, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
- Node* const generator = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op = simplified()->LoadField(
- AccessBuilder::ForJSGeneratorObjectInputOrDebugPos());
-
- return Change(node, op, generator, effect, control);
-}
-
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorReject(Node* node) {
return Change(
node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorReject),
@@ -258,17 +228,6 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
- NodeProperties::ChangeOp(node, javascript()->ToInteger());
- return Changed(node);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceToNumber(Node* node) {
- NodeProperties::ChangeOp(node, javascript()->ToNumber());
- return Changed(node);
-}
-
Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToLength());
@@ -300,11 +259,6 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
- NodeProperties::ChangeOp(node, javascript()->GetSuperConstructor());
- return Changed(node);
-}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index e0a55d7b06..f71af1156c 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -32,7 +32,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph);
- ~JSIntrinsicLowering() final {}
+ ~JSIntrinsicLowering() final = default;
const char* reducer_name() const override { return "JSIntrinsicLowering"; }
@@ -40,11 +40,9 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
private:
Reduction ReduceCreateIterResultObject(Node* node);
- Reduction ReduceDebugIsActive(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceCreateJSGeneratorObject(Node* node);
Reduction ReduceGeneratorClose(Node* node);
- Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
Reduction ReduceAsyncGeneratorReject(Node* node);
Reduction ReduceAsyncGeneratorResolve(Node* node);
Reduction ReduceAsyncGeneratorYield(Node* node);
@@ -55,13 +53,10 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsSmi(Node* node);
Reduction ReduceRejectPromise(Node* node);
Reduction ReduceResolvePromise(Node* node);
- Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
- Reduction ReduceToNumber(Node* node);
Reduction ReduceToObject(Node* node);
Reduction ReduceToString(Node* node);
Reduction ReduceCall(Node* node);
- Reduction ReduceGetSuperConstructor(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index e35a860be0..d449e72367 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -17,12 +17,14 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/property-access-builder.h"
#include "src/compiler/type-cache.h"
+#include "src/dtoa.h"
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/templates.h"
+#include "src/string-constants.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -62,7 +64,7 @@ struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Flags flags,
Handle<Context> native_context, CompilationDependencies* dependencies,
- Zone* zone)
+ Zone* zone, Zone* shared_zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
@@ -73,6 +75,7 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
native_context_(js_heap_broker, native_context),
dependencies_(dependencies),
zone_(zone),
+ shared_zone_(shared_zone),
type_cache_(TypeCache::Get()) {}
Reduction JSNativeContextSpecialization::Reduce(Node* node) {
@@ -113,12 +116,98 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSStoreInArrayLiteral(node);
case IrOpcode::kJSToObject:
return ReduceJSToObject(node);
+ case IrOpcode::kJSToString:
+ return ReduceJSToString(node);
default:
break;
}
return NoChange();
}
+// static
+base::Optional<size_t> JSNativeContextSpecialization::GetMaxStringLength(
+ JSHeapBroker* broker, Node* node) {
+ if (node->opcode() == IrOpcode::kDelayedStringConstant) {
+ return StringConstantBaseOf(node->op())->GetMaxStringConstantLength();
+ }
+
+ HeapObjectMatcher matcher(node);
+ if (matcher.HasValue() && matcher.Ref(broker).IsString()) {
+ StringRef input = matcher.Ref(broker).AsString();
+ return input.length();
+ }
+
+ NumberMatcher number_matcher(node);
+ if (number_matcher.HasValue()) {
+ return kBase10MaximalLength + 1;
+ }
+
+ // We don't support objects with possibly monkey-patched prototype.toString
+ // as it might have side-effects, so we shouldn't attempt lowering them.
+ return base::nullopt;
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToString, node->opcode());
+ Node* const input = node->InputAt(0);
+ Reduction reduction;
+
+ HeapObjectMatcher matcher(input);
+ if (matcher.HasValue() && matcher.Ref(js_heap_broker()).IsString()) {
+ reduction = Changed(input); // JSToString(x:string) => x
+ ReplaceWithValue(node, reduction.replacement());
+ return reduction;
+ }
+
+ // TODO(turbofan): This optimization is weaker than what we used to have
+ // in js-typed-lowering for OrderedNumbers. We don't have types here though,
+ // so alternative approach should be designed if this causes performance
+ // regressions and the stronger optimization should be re-implemented.
+ NumberMatcher number_matcher(input);
+ if (number_matcher.HasValue()) {
+ const StringConstantBase* base =
+ new (shared_zone()) NumberToStringConstant(number_matcher.Value());
+ reduction =
+ Replace(graph()->NewNode(common()->DelayedStringConstant(base)));
+ ReplaceWithValue(node, reduction.replacement());
+ return reduction;
+ }
+
+ return NoChange();
+}
+
+const StringConstantBase*
+JSNativeContextSpecialization::CreateDelayedStringConstant(Node* node) {
+ if (node->opcode() == IrOpcode::kDelayedStringConstant) {
+ return StringConstantBaseOf(node->op());
+ } else {
+ NumberMatcher number_matcher(node);
+ if (number_matcher.HasValue()) {
+ return new (shared_zone()) NumberToStringConstant(number_matcher.Value());
+ } else {
+ HeapObjectMatcher matcher(node);
+ if (matcher.HasValue() && matcher.Ref(js_heap_broker()).IsString()) {
+ StringRef s = matcher.Ref(js_heap_broker()).AsString();
+ return new (shared_zone())
+ StringLiteral(s.object<String>(), static_cast<size_t>(s.length()));
+ } else {
+ UNREACHABLE();
+ }
+ }
+ }
+}
+
+namespace {
+bool IsStringConstant(JSHeapBroker* broker, Node* node) {
+ if (node->opcode() == IrOpcode::kDelayedStringConstant) {
+ return true;
+ }
+
+ HeapObjectMatcher matcher(node);
+ return matcher.HasValue() && matcher.Ref(broker).IsString();
+}
+}
+
Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
// TODO(turbofan): This has to run together with the inlining and
// native context specialization to be able to leverage the string
@@ -126,20 +215,30 @@ Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
// nevertheless find a better home for this at some point.
DCHECK_EQ(IrOpcode::kJSAdd, node->opcode());
- // Constant-fold string concatenation.
- HeapObjectBinopMatcher m(node);
- if (m.left().HasValue() && m.left().Value()->IsString() &&
- m.right().HasValue() && m.right().Value()->IsString()) {
- Handle<String> left = Handle<String>::cast(m.left().Value());
- Handle<String> right = Handle<String>::cast(m.right().Value());
- if (left->length() + right->length() <= String::kMaxLength) {
- Handle<String> result =
- factory()->NewConsString(left, right).ToHandleChecked();
- Node* value = jsgraph()->HeapConstant(result);
- ReplaceWithValue(node, value);
- return Replace(value);
- }
+ Node* const lhs = node->InputAt(0);
+ Node* const rhs = node->InputAt(1);
+
+ base::Optional<size_t> lhs_len = GetMaxStringLength(js_heap_broker(), lhs);
+ base::Optional<size_t> rhs_len = GetMaxStringLength(js_heap_broker(), rhs);
+ if (!lhs_len || !rhs_len) {
+ return NoChange();
}
+
+ // Fold into DelayedStringConstant if at least one of the parameters is a
+ // string constant and the addition won't throw due to too long result.
+ if (*lhs_len + *rhs_len <= String::kMaxLength &&
+ (IsStringConstant(js_heap_broker(), lhs) ||
+ IsStringConstant(js_heap_broker(), rhs))) {
+ const StringConstantBase* left = CreateDelayedStringConstant(lhs);
+ const StringConstantBase* right = CreateDelayedStringConstant(rhs);
+ const StringConstantBase* cons =
+ new (shared_zone()) StringCons(left, right);
+
+ Node* reduced = graph()->NewNode(common()->DelayedStringConstant(cons));
+ ReplaceWithValue(node, reduced);
+ return Replace(reduced);
+ }
+
return NoChange();
}
@@ -151,15 +250,16 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
// Check if the input is a known JSFunction.
HeapObjectMatcher m(constructor);
if (!m.HasValue()) return NoChange();
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- Handle<Map> function_map(function->map(), isolate());
- Handle<Object> function_prototype(function_map->prototype(), isolate());
+ JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ MapRef function_map = function.map();
+ ObjectRef function_prototype = function_map.prototype();
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
- if (function_map->is_stable() && function_prototype->IsConstructor()) {
- dependencies()->DependOnStableMap(MapRef(js_heap_broker(), function_map));
+ if (function_map.is_stable() && function_prototype.IsHeapObject() &&
+ function_prototype.AsHeapObject().map().is_constructor()) {
+ dependencies()->DependOnStableMap(function_map);
Node* value = jsgraph()->Constant(function_prototype);
ReplaceWithValue(node, value);
return Replace(value);
@@ -405,28 +505,27 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
return reduction.Changed() ? reduction : Changed(node);
}
- // Check if the {constructor} is a JSFunction.
+ // Optimize if we currently know the "prototype" property.
if (m.Value()->IsJSFunction()) {
- // Check if the {function} is a constructor and has an instance "prototype".
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- if (function->IsConstructor() && function->has_prototype_slot() &&
- function->has_instance_prototype() &&
- function->prototype()->IsJSReceiver()) {
- // We need {function}'s initial map so that we can depend on it for the
- // prototype constant-folding below.
- if (!function->has_initial_map()) return NoChange();
- MapRef initial_map = dependencies()->DependOnInitialMap(
- JSFunctionRef(js_heap_broker(), function));
- Node* prototype = jsgraph()->Constant(
- handle(initial_map.object<Map>()->prototype(), isolate()));
-
- // Lower the {node} to JSHasInPrototypeChain.
- NodeProperties::ReplaceValueInput(node, object, 0);
- NodeProperties::ReplaceValueInput(node, prototype, 1);
- NodeProperties::ChangeOp(node, javascript()->HasInPrototypeChain());
- Reduction const reduction = ReduceJSHasInPrototypeChain(node);
- return reduction.Changed() ? reduction : Changed(node);
+ JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ // TODO(neis): This is a temporary hack needed because the copy reducer
+ // runs only after this pass.
+ function.Serialize();
+ // TODO(neis): Remove the has_prototype_slot condition once the broker is
+ // always enabled.
+ if (!function.map().has_prototype_slot() || !function.has_prototype() ||
+ function.PrototypeRequiresRuntimeLookup()) {
+ return NoChange();
}
+ ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
+ Node* prototype_constant = jsgraph()->Constant(prototype);
+
+ // Lower the {node} to JSHasInPrototypeChain.
+ NodeProperties::ReplaceValueInput(node, object, 0);
+ NodeProperties::ReplaceValueInput(node, prototype_constant, 1);
+ NodeProperties::ChangeOp(node, javascript()->HasInPrototypeChain());
+ Reduction const reduction = ReduceJSHasInPrototypeChain(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
return NoChange();
@@ -444,9 +543,11 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
- if (!m.Is(handle(native_context().object<Context>()->promise_function(),
- isolate())))
+ if (!m.HasValue() ||
+ !m.Ref(js_heap_broker())
+ .equals(js_heap_broker()->native_context().promise_function())) {
return NoChange();
+ }
// Check if we know something about the {value}.
ZoneHandleSet<Map> value_maps;
@@ -636,20 +737,19 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
property_cell_value_type = Type::Number();
representation = MachineRepresentation::kTaggedPointer;
} else {
- Handle<Map> property_cell_value_map(
- Handle<HeapObject>::cast(property_cell_value)->map(),
- isolate());
- property_cell_value_type =
- Type::For(js_heap_broker(), property_cell_value_map);
+ MapRef property_cell_value_map(
+ js_heap_broker(),
+ handle(HeapObject::cast(*property_cell_value)->map(),
+ isolate()));
+ property_cell_value_type = Type::For(property_cell_value_map);
representation = MachineRepresentation::kTaggedPointer;
// We can only use the property cell value map for map check
// elimination if it's stable, i.e. the HeapObject wasn't
// mutated without the cell state being updated.
- if (property_cell_value_map->is_stable()) {
- dependencies()->DependOnStableMap(
- MapRef(js_heap_broker(), property_cell_value_map));
- map = property_cell_value_map;
+ if (property_cell_value_map.is_stable()) {
+ dependencies()->DependOnStableMap(property_cell_value_map);
+ map = property_cell_value_map.object<Map>();
}
}
}
@@ -752,8 +852,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
native_context().script_context_table().lookup(name);
if (result) {
ObjectRef contents = result->context.get(result->index);
- OddballType oddball_type = contents.oddball_type();
- if (oddball_type == OddballType::kHole) {
+ if (contents.IsHeapObject() &&
+ contents.AsHeapObject().map().oddball_type() == OddballType::kHole) {
return NoChange();
}
Node* context = jsgraph()->Constant(result->context);
@@ -781,8 +881,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
native_context().script_context_table().lookup(name);
if (result) {
ObjectRef contents = result->context.get(result->index);
- OddballType oddball_type = contents.oddball_type();
- if (oddball_type == OddballType::kHole || result->immutable) {
+ if ((contents.IsHeapObject() &&
+ contents.AsHeapObject().map().oddball_type() == OddballType::kHole) ||
+ result->immutable) {
return NoChange();
}
Node* context = jsgraph()->Constant(result->context);
@@ -1002,6 +1103,16 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
this_effect = graph()->NewNode(simplified()->MapGuard(maps), receiver,
this_effect, this_control);
}
+
+ // If all {receiver_maps} are Strings we also need to rename the
+ // {receiver} here to make sure that TurboFan knows that along this
+ // path the {this_receiver} is a String. This is because we want
+ // strict checking of types, for example for StringLength operators.
+ if (HasOnlyStringMaps(receiver_maps)) {
+ this_receiver = this_effect =
+ graph()->NewNode(common()->TypeGuard(Type::String()), receiver,
+ this_effect, this_control);
+ }
}
// Generate the actual property access.
@@ -1104,6 +1215,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
p.name().is_identical_to(factory()->prototype_string())) {
// Optimize "prototype" property of functions.
JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ // TODO(neis): This is a temporary hack needed because the copy reducer
+ // runs only after this pass.
+ function.Serialize();
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
if (!function.map().has_prototype_slot() || !function.has_prototype() ||
@@ -1835,6 +1949,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
+ } else if (access_info.IsStringLength()) {
+ value = graph()->NewNode(simplified()->StringLength(), receiver);
} else {
DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
value = access_builder.BuildLoadDataField(name, access_info, receiver,
@@ -2095,8 +2211,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
if (!Map::TryUpdate(isolate(), receiver_map).ToHandle(&receiver_map))
return NoChange();
- Handle<Name> cached_name = handle(
- Name::cast(nexus.GetFeedbackExtra()->ToStrongHeapObject()), isolate());
+ Handle<Name> cached_name =
+ handle(Name::cast(nexus.GetFeedbackExtra()->GetHeapObjectAssumeStrong()),
+ isolate());
PropertyAccessInfo access_info;
AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
@@ -2215,6 +2332,15 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
UNREACHABLE();
}
+MaybeHandle<JSTypedArray> GetTypedArrayConstant(Node* receiver) {
+ HeapObjectMatcher m(receiver);
+ if (!m.HasValue()) return MaybeHandle<JSTypedArray>();
+ if (!m.Value()->IsJSTypedArray()) return MaybeHandle<JSTypedArray>();
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
+ if (typed_array->is_on_heap()) return MaybeHandle<JSTypedArray>();
+ return typed_array;
+}
+
} // namespace
JSNativeContextSpecialization::ValueEffectControl
@@ -2236,17 +2362,12 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check if we can constant-fold information about the {receiver} (i.e.
// for asm.js-like code patterns).
- HeapObjectMatcher m(receiver);
- if (m.HasValue() && m.Value()->IsJSTypedArray()) {
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
-
- // Determine the {receiver}s (known) length.
+ Handle<JSTypedArray> typed_array;
+ if (GetTypedArrayConstant(receiver).ToHandle(&typed_array)) {
+ buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
length =
jsgraph()->Constant(static_cast<double>(typed_array->length_value()));
- // Check if the {receiver}s buffer was neutered.
- buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
-
// Load the (known) base and external pointer for the {receiver}. The
// {external_pointer} might be invalid if the {buffer} was neutered, so
// we need to make sure that any access is properly guarded.
@@ -2298,12 +2419,20 @@ JSNativeContextSpecialization::BuildElementAccess(
dependencies()->DependOnProtector(PropertyCellRef(
js_heap_broker(), factory()->array_buffer_neutering_protector()));
} else {
- // Default to zero if the {receiver}s buffer was neutered.
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- length = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), length);
+ // Deopt if the {buffer} was neutered.
+ // Note: A neutered buffer leads to megamorphic feedback.
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->ZeroConstant());
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
+ check, effect, control);
}
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 413e3c191f..0bd62e07c9 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -16,6 +16,9 @@ namespace internal {
// Forward declarations.
class Factory;
class FeedbackNexus;
+class JSGlobalObject;
+class JSGlobalProxy;
+class StringConstantBase;
namespace compiler {
@@ -36,7 +39,8 @@ class TypeCache;
// folding some {LoadGlobal} nodes or strength reducing some {StoreGlobal}
// nodes. And also specializes {LoadNamed} and {StoreNamed} nodes according
// to type feedback (if available).
-class JSNativeContextSpecialization final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
+ : public AdvancedReducer {
public:
// Flags that control the mode of operation.
enum Flag {
@@ -50,7 +54,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
JSHeapBroker* js_heap_broker, Flags flags,
Handle<Context> native_context,
CompilationDependencies* dependencies,
- Zone* zone);
+ Zone* zone, Zone* shared_zone);
const char* reducer_name() const override {
return "JSNativeContextSpecialization";
@@ -58,6 +62,12 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
+ // Utility for folding string constant concatenation.
+ // Supports JSAdd nodes and nodes typed as string or number.
+ // Public for the sake of unit testing.
+ static base::Optional<size_t> GetMaxStringLength(JSHeapBroker* broker,
+ Node* node);
+
private:
Reduction ReduceJSAdd(Node* node);
Reduction ReduceJSGetSuperConstructor(Node* node);
@@ -101,6 +111,9 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Node* index = nullptr);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+ Reduction ReduceJSToString(Node* node);
+
+ const StringConstantBase* CreateDelayedStringConstant(Node* node);
// A triple of nodes that represents a continuation.
class ValueEffectControl final {
@@ -230,6 +243,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
const NativeContextRef& native_context() const { return native_context_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
+ Zone* shared_zone() const { return shared_zone_; }
JSGraph* const jsgraph_;
JSHeapBroker* const js_heap_broker_;
@@ -239,6 +253,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
NativeContextRef native_context_;
CompilationDependencies* const dependencies_;
Zone* const zone_;
+ Zone* const shared_zone_;
TypeCache const& type_cache_;
DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index acfc0d3a19..a30b4ddcdd 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -266,7 +266,7 @@ NamedAccess const& NamedAccessOf(const Operator* op) {
std::ostream& operator<<(std::ostream& os, PropertyAccess const& p) {
- return os << p.language_mode();
+ return os << p.language_mode() << ", " << p.feedback();
}
@@ -609,7 +609,6 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(Decrement, Operator::kNoProperties, 1, 1) \
V(Increment, Operator::kNoProperties, 1, 1) \
V(Negate, Operator::kNoProperties, 1, 1) \
- V(ToInteger, Operator::kNoProperties, 1, 1) \
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
@@ -1191,6 +1190,14 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralArray(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CreateArrayFromIterable() {
+ return new (zone()) Operator( // --
+ IrOpcode::kJSCreateArrayFromIterable, // opcode
+ Operator::kNoProperties, // properties
+ "JSCreateArrayFromIterable", // name
+ 1, 1, 1, 1, 1, 2); // counts
+}
+
const Operator* JSOperatorBuilder::CreateLiteralObject(
Handle<ObjectBoilerplateDescription> constant_properties,
VectorSlotPair const& feedback, int literal_flags,
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index a81b187c7b..db38941219 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -52,6 +52,8 @@ class CallFrequency final {
return bit_cast<uint32_t>(f.value_);
}
+ static constexpr float kNoFeedbackCallFrequency = -1;
+
private:
float value_;
};
@@ -703,7 +705,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Increment();
const Operator* Negate();
- const Operator* ToInteger();
const Operator* ToLength();
const Operator* ToName();
const Operator* ToNumber();
@@ -733,6 +734,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
VectorSlotPair const& feedback, int literal_flags,
int number_of_elements);
const Operator* CreateEmptyLiteralArray(VectorSlotPair const& feedback);
+ const Operator* CreateArrayFromIterable();
const Operator* CreateEmptyLiteralObject();
const Operator* CreateLiteralObject(
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 56b1f224c7..7b3728428b 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -137,15 +137,20 @@ class JSBinopReduction final {
}
}
+ // Inserts a CheckSymbol for the left input.
+ void CheckLeftInputToSymbol() {
+ Node* left_input = graph()->NewNode(simplified()->CheckSymbol(), left(),
+ effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+
// Checks that both inputs are Symbol, and if we don't know
// statically that one side is already a Symbol, insert a
// CheckSymbol node.
void CheckInputsToSymbol() {
if (!left_type().Is(Type::Symbol())) {
- Node* left_input = graph()->NewNode(simplified()->CheckSymbol(), left(),
- effect(), control());
- node_->ReplaceInput(0, left_input);
- update_effect(left_input);
+ CheckLeftInputToSymbol();
}
if (!right_type().Is(Type::Symbol())) {
Node* right_input = graph()->NewNode(simplified()->CheckSymbol(), right(),
@@ -374,7 +379,7 @@ class JSBinopReduction final {
Node* ConvertPlainPrimitiveToNumber(Node* node) {
DCHECK(NodeProperties::GetType(node).Is(Type::PlainPrimitive()));
// Avoid inserting too many eager ToNumber() operations.
- Reduction const reduction = lowering_->ReduceJSToNumberOrNumericInput(node);
+ Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
if (reduction.Changed()) return reduction.replacement();
if (NodeProperties::GetType(node).Is(Type::Number())) {
return node;
@@ -509,76 +514,123 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
+
+ // Strength-reduce if one input is already known to be a string.
+ if (r.LeftInputIs(Type::String())) {
+ // JSAdd(x:string, y) => JSAdd(x, JSToString(y))
+ Reduction const reduction = ReduceJSToStringInput(r.right());
+ if (reduction.Changed()) {
+ NodeProperties::ReplaceValueInput(node, reduction.replacement(), 1);
+ }
+ } else if (r.RightInputIs(Type::String())) {
+ // JSAdd(x, y:string) => JSAdd(JSToString(x), y)
+ Reduction const reduction = ReduceJSToStringInput(r.left());
+ if (reduction.Changed()) {
+ NodeProperties::ReplaceValueInput(node, reduction.replacement(), 0);
+ }
+ }
+
+ // Always bake in String feedback into the graph.
if (BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
- // Always bake in String feedback into the graph.
- // TODO(bmeurer): Consider adding a SpeculativeStringAdd operator,
- // and use that in JSTypeHintLowering instead of looking at the
- // binary operation feedback here.
r.CheckInputsToString();
}
- if (r.OneInputIs(Type::String())) {
- // We know that (at least) one input is already a String,
- // so try to strength-reduce the non-String input.
- if (r.LeftInputIs(Type::String())) {
- Reduction const reduction = ReduceJSToStringInput(r.right());
- if (reduction.Changed()) {
- NodeProperties::ReplaceValueInput(node, reduction.replacement(), 1);
- }
- } else if (r.RightInputIs(Type::String())) {
- Reduction const reduction = ReduceJSToStringInput(r.left());
- if (reduction.Changed()) {
- NodeProperties::ReplaceValueInput(node, reduction.replacement(), 0);
- }
+
+ // Strength-reduce concatenation of empty strings if both sides are
+ // primitives, as in that case the ToPrimitive on the other side is
+ // definitely going to be a no-op.
+ if (r.BothInputsAre(Type::Primitive())) {
+ if (r.LeftInputIs(empty_string_type_)) {
+ // JSAdd("", x:primitive) => JSToString(x)
+ NodeProperties::ReplaceValueInputs(node, r.right());
+ NodeProperties::ChangeOp(node, javascript()->ToString());
+ Reduction const reduction = ReduceJSToString(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ } else if (r.RightInputIs(empty_string_type_)) {
+ // JSAdd(x:primitive, "") => JSToString(x)
+ NodeProperties::ReplaceValueInputs(node, r.left());
+ NodeProperties::ChangeOp(node, javascript()->ToString());
+ Reduction const reduction = ReduceJSToString(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
- // We might be able to constant-fold the String concatenation now.
- if (r.BothInputsAre(Type::String())) {
- HeapObjectBinopMatcher m(node);
- if (m.IsFoldable()) {
- StringRef left = m.left().Ref(js_heap_broker()).AsString();
- StringRef right = m.right().Ref(js_heap_broker()).AsString();
- if (left.length() + right.length() > String::kMaxLength) {
- // No point in trying to optimize this, as it will just throw.
- return NoChange();
+ }
+
+ // Lower to string addition if both inputs are known to be strings.
+ if (r.BothInputsAre(Type::String())) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Compute the resulting length.
+ Node* left_length =
+ graph()->NewNode(simplified()->StringLength(), r.left());
+ Node* right_length =
+ graph()->NewNode(simplified()->StringLength(), r.right());
+ Node* length =
+ graph()->NewNode(simplified()->NumberAdd(), left_length, right_length);
+
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ // We can just deoptimize if the {length} is out-of-bounds. Besides
+ // generating a shorter code sequence than the version below, this
+ // has the additional benefit of not holding on to the lazy {frame_state}
+ // and thus potentially reduces the number of live ranges and allows for
+ // more truncations.
+ length = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), length,
+ jsgraph()->Constant(String::kMaxLength + 1), effect, control);
+ } else {
+ // Check if we would overflow the allowed maximum string length.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(String::kMaxLength));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Throw a RangeError in case of overflow.
+ Node* vfalse = efalse = if_false = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowInvalidStringLength),
+ context, frame_state, efalse, if_false);
+
+ // Update potential {IfException} uses of {node} to point to the
+ // %ThrowInvalidStringLength runtime call node instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse);
+ if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+ Revisit(on_exception);
}
- // TODO(mslekova): get rid of these allows by doing either one of:
- // 1. remove the optimization and check if it ruins the performance
- // 2. leave a placeholder and do the actual allocations once back on the
- // MT
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation allow_handle_allocation;
- AllowHeapAllocation allow_heap_allocation;
- ObjectRef cons(
- js_heap_broker(),
- factory()
- ->NewConsString(left.object<String>(), right.object<String>())
- .ToHandleChecked());
- Node* value = jsgraph()->Constant(cons);
- ReplaceWithValue(node, value);
- return Replace(value);
- }
- }
- // We might know for sure that we're creating a ConsString here.
- if (r.ShouldCreateConsString()) {
- return ReduceCreateConsString(node);
- }
- // Eliminate useless concatenation of empty string.
- if (r.BothInputsAre(Type::String())) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- if (r.LeftInputIs(empty_string_type_)) {
- Node* value = effect =
- graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
- r.right(), effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- } else if (r.RightInputIs(empty_string_type_)) {
- Node* value = effect =
- graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
- r.left(), effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+
+ // The above %ThrowInvalidStringLength runtime call is an unconditional
+ // throw, making it impossible to return a successful completion in this
+ // case. We simply connect the successful completion to the graph end.
+ if_false = graph()->NewNode(common()->Throw(), efalse, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+ Revisit(graph()->end());
}
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ length = effect =
+ graph()->NewNode(common()->TypeGuard(type_cache_.kStringLengthType),
+ length, effect, control);
}
+
+ // TODO(bmeurer): Ideally this should always use StringConcat and decide to
+ // optimize to NewConsString later during SimplifiedLowering, but for that
+ // to work we need to know that it's safe to create a ConsString.
+ Operator const* const op = r.ShouldCreateConsString()
+ ? simplified()->NewConsString()
+ : simplified()->StringConcat();
+ Node* value = graph()->NewNode(op, length, r.left(), r.right());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ // We never get here when we had String feedback.
+ DCHECK_NE(BinaryOperationHint::kString, BinaryOperationHintOf(node->op()));
+ if (r.OneInputIs(Type::String())) {
StringAddFlags flags = STRING_ADD_CHECK_NONE;
if (!r.LeftInputIs(Type::String())) {
flags = STRING_ADD_CONVERT_LEFT;
@@ -592,12 +644,13 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
// effects; it can still throw obviously.
properties = Operator::kNoWrite | Operator::kNoDeopt;
}
+
// JSAdd(x:string, y) => CallStub[StringAdd](x, y)
// JSAdd(x, y:string) => CallStub[StringAdd](x, y)
- Callable const callable =
- CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
+ Callable const callable = CodeFactory::StringAdd(isolate(), flags);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0,
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
CallDescriptor::kNeedsFrameState, properties);
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
node->InsertInput(graph()->zone(), 0,
@@ -605,8 +658,6 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
- // We never get here when we had String feedback.
- DCHECK_NE(BinaryOperationHint::kString, BinaryOperationHintOf(node->op()));
return NoChange();
}
@@ -657,103 +708,6 @@ Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
- Node* first = NodeProperties::GetValueInput(node, 0);
- Node* second = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Make sure {first} is actually a String.
- Type first_type = NodeProperties::GetType(first);
- if (!first_type.Is(Type::String())) {
- first = effect = graph()->NewNode(
- simplified()->CheckString(VectorSlotPair()), first, effect, control);
- first_type = NodeProperties::GetType(first);
- }
-
- // Make sure {second} is actually a String.
- Type second_type = NodeProperties::GetType(second);
- if (!second_type.Is(Type::String())) {
- second = effect = graph()->NewNode(
- simplified()->CheckString(VectorSlotPair()), second, effect, control);
- second_type = NodeProperties::GetType(second);
- }
-
- // Determine the {first} length.
- Node* first_length = BuildGetStringLength(first);
- Node* second_length = BuildGetStringLength(second);
-
- // Compute the resulting length.
- Node* length =
- graph()->NewNode(simplified()->NumberAdd(), first_length, second_length);
-
- if (isolate()->IsStringLengthOverflowIntact()) {
- // We can just deoptimize if the {length} is out-of-bounds. Besides
- // generating a shorter code sequence than the version below, this
- // has the additional benefit of not holding on to the lazy {frame_state}
- // and thus potentially reduces the number of live ranges and allows for
- // more truncations.
- length = effect = graph()->NewNode(
- simplified()->CheckBounds(VectorSlotPair()), length,
- jsgraph()->Constant(String::kMaxLength), effect, control);
- } else {
- // Check if we would overflow the allowed maximum string length.
- Node* check =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(String::kMaxLength));
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- {
- // Throw a RangeError in case of overflow.
- Node* vfalse = efalse = if_false = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowInvalidStringLength),
- context, frame_state, efalse, if_false);
-
- // Update potential {IfException} uses of {node} to point to the
- // %ThrowInvalidStringLength runtime call node instead.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- NodeProperties::ReplaceControlInput(on_exception, vfalse);
- NodeProperties::ReplaceEffectInput(on_exception, efalse);
- if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
- Revisit(on_exception);
- }
-
- // The above %ThrowInvalidStringLength runtime call is an unconditional
- // throw, making it impossible to return a successful completion in this
- // case. We simply connect the successful completion to the graph end.
- if_false = graph()->NewNode(common()->Throw(), efalse, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), if_false);
- Revisit(graph()->end());
- }
- control = graph()->NewNode(common()->IfTrue(), branch);
- length = effect =
- graph()->NewNode(common()->TypeGuard(type_cache_.kStringLengthType),
- length, effect, control);
- }
-
- Node* value =
- graph()->NewNode(simplified()->NewConsString(), length, first, second);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Node* JSTypedLowering::BuildGetStringLength(Node* value) {
- // TODO(bmeurer): Get rid of this hack and instead have a way to
- // express the string length in the types.
- HeapObjectMatcher m(value);
- if (!m.HasValue() || !m.Ref(js_heap_broker()).IsString()) {
- return graph()->NewNode(simplified()->StringLength(), value);
- }
-
- return jsgraph()->Constant(m.Ref(js_heap_broker()).AsString().length());
-}
-
Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Signed32()) ||
@@ -922,30 +876,22 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
} else if (r.IsReceiverCompareOperation()) {
// For strict equality, it's enough to know that one input is a Receiver,
// as a strict equality comparison with a Receiver can only yield true if
- // both sides refer to the same Receiver than.
+ // both sides refer to the same Receiver.
r.CheckLeftInputToReceiver();
return r.ChangeToPureOperator(simplified()->ReferenceEqual());
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
} else if (r.IsSymbolCompareOperation()) {
- r.CheckInputsToSymbol();
+ // For strict equality, it's enough to know that one input is a Symbol,
+ // as a strict equality comparison with a Symbol can only yield true if
+ // both sides refer to the same Symbol.
+ r.CheckLeftInputToSymbol();
return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSToInteger(Node* node) {
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type const input_type = NodeProperties::GetType(input);
- if (input_type.Is(type_cache_.kIntegerOrMinusZero)) {
- // JSToInteger(x:integer) => x
- ReplaceWithValue(node, input);
- return Replace(input);
- }
- return NoChange();
-}
-
Reduction JSTypedLowering::ReduceJSToName(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type const input_type = NodeProperties::GetType(input);
@@ -981,9 +927,8 @@ Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSToNumberOrNumericInput(Node* input) {
- // Try constant-folding of JSToNumber/JSToNumeric with constant inputs. Here
- // we only cover cases where ToNumber and ToNumeric coincide.
+Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
+ // Try constant-folding of JSToNumber with constant inputs.
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::String())) {
@@ -996,8 +941,8 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumericInput(Node* input) {
}
}
if (input_type.IsHeapConstant()) {
- ObjectRef input_value = input_type.AsHeapConstant()->Ref();
- if (input_value.oddball_type() != OddballType::kNone) {
+ HeapObjectRef input_value = input_type.AsHeapConstant()->Ref();
+ if (input_value.map().oddball_type() != OddballType::kNone) {
return Replace(jsgraph()->Constant(input_value.OddballToNumber()));
}
}
@@ -1016,10 +961,10 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumericInput(Node* input) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSToNumberOrNumeric(Node* node) {
+Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
// Try to reduce the input first.
Node* const input = node->InputAt(0);
- Reduction reduction = ReduceJSToNumberOrNumericInput(input);
+ Reduction reduction = ReduceJSToNumberInput(input);
if (reduction.Changed()) {
ReplaceWithValue(node, reduction.replacement());
return reduction;
@@ -1035,7 +980,18 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumeric(Node* node) {
NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
return Changed(node);
}
- // TODO(neis): Reduce ToNumeric to ToNumber if input can't be BigInt?
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSToNumeric(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type const input_type = NodeProperties::GetType(input);
+ if (input_type.Is(Type::NonBigIntPrimitive())) {
+ // ToNumeric(x:primitive\bigint) => ToNumber(x)
+ NodeProperties::ChangeOp(node, javascript()->ToNumber());
+ Reduction const reduction = ReduceJSToNumber(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
return NoChange();
}
@@ -1065,20 +1021,6 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input_type.Is(Type::NaN())) {
return Replace(jsgraph()->HeapConstant(factory()->NaN_string()));
}
- if (input_type.Is(Type::OrderedNumber()) &&
- input_type.Min() == input_type.Max()) {
- // TODO(mslekova): get rid of these allows by doing either one of:
- // 1. remove the optimization and check if it ruins the performance
- // 2. allocate all the ToString's from numbers before the compilation
- // 3. leave a placeholder and do the actual allocations once back on the MT
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation allow_handle_allocation;
- AllowHeapAllocation allow_heap_allocation;
- // Note that we can use Type::OrderedNumber(), since
- // both 0 and -0 map to the String "0" in JavaScript.
- return Replace(jsgraph()->HeapConstant(
- factory()->NumberToString(factory()->NewNumber(input_type.Min()))));
- }
if (input_type.Is(Type::Number())) {
return Replace(graph()->NewNode(simplified()->NumberToString(), input));
}
@@ -1126,7 +1068,8 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
// Convert {receiver} using the ToObjectStub.
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0,
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
CallDescriptor::kNeedsFrameState, node->op()->properties());
rfalse = efalse = if_false =
graph()->NewNode(common()->Call(call_descriptor),
@@ -1555,7 +1498,7 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
target_type.AsHeapConstant()->Ref().IsJSFunction()) {
// Only optimize [[Construct]] here if {function} is a Constructor.
JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
- if (!function.IsConstructor()) return NoChange();
+ if (!function.map().is_constructor()) return NoChange();
// Patch {node} to an indirect call via ConstructFunctionForwardVarargs.
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->RemoveInput(arity + 1);
@@ -1591,7 +1534,7 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
SharedFunctionInfoRef shared = function.shared();
// Only optimize [[Construct]] here if {function} is a Constructor.
- if (!function.IsConstructor()) return NoChange();
+ if (!function.map().is_constructor()) return NoChange();
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
@@ -1744,7 +1687,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->InsertInput(graph()->zone(), arity + 3, argument_count);
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
- graph()->zone(), false, 1 + arity, flags)));
+ graph()->zone(), false, 1 + arity,
+ flags | CallDescriptor::kCanUseRoots)));
}
return Changed(node);
}
@@ -1849,7 +1793,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kForInFilter);
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 0,
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
CallDescriptor::kNeedsFrameState);
vfalse = efalse = if_false =
graph()->NewNode(common()->Call(call_descriptor),
@@ -2314,16 +2259,15 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
- case IrOpcode::kJSToInteger:
- return ReduceJSToInteger(node);
case IrOpcode::kJSToLength:
return ReduceJSToLength(node);
case IrOpcode::kJSToName:
return ReduceJSToName(node);
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumberConvertBigInt:
+ return ReduceJSToNumber(node);
case IrOpcode::kJSToNumeric:
- return ReduceJSToNumberOrNumeric(node);
+ return ReduceJSToNumeric(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
case IrOpcode::kJSToObject:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index c3bef9aeed..e25e092453 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -33,7 +33,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
public:
JSTypedLowering(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* js_heap_broker, Zone* zone);
- ~JSTypedLowering() final {}
+ ~JSTypedLowering() final = default;
const char* reducer_name() const override { return "JSTypedLowering"; }
@@ -57,11 +57,11 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSStoreModule(Node* node);
Reduction ReduceJSEqual(Node* node);
Reduction ReduceJSStrictEqual(Node* node);
- Reduction ReduceJSToInteger(Node* node);
Reduction ReduceJSToLength(Node* node);
Reduction ReduceJSToName(Node* node);
- Reduction ReduceJSToNumberOrNumericInput(Node* input);
- Reduction ReduceJSToNumberOrNumeric(Node* node);
+ Reduction ReduceJSToNumberInput(Node* input);
+ Reduction ReduceJSToNumber(Node* node);
+ Reduction ReduceJSToNumeric(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSToObject(Node* node);
@@ -81,7 +81,6 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
- Reduction ReduceCreateConsString(Node* node);
Reduction ReduceSpeculativeNumberAdd(Node* node);
Reduction ReduceSpeculativeNumberMultiply(Node* node);
Reduction ReduceSpeculativeNumberBinop(Node* node);
@@ -92,9 +91,6 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
// Helper for ReduceJSLoadModule and ReduceJSStoreModule.
Node* BuildGetModuleCell(Node* node);
- // Helpers for ReduceJSCreateConsString.
- Node* BuildGetStringLength(Node* value);
-
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 99c52b1ade..9bba09329d 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -143,7 +143,7 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
SharedFunctionInfo* shared = info->closure()->shared();
return GetJSCallDescriptor(zone, info->is_osr(),
1 + shared->internal_formal_parameter_count(),
- CallDescriptor::kNoFlags);
+ CallDescriptor::kCanUseRoots);
}
return nullptr; // TODO(titzer): ?
}
@@ -167,7 +167,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
case Runtime::kStringEqual:
- case Runtime::kStringNotEqual:
case Runtime::kStringLessThan:
case Runtime::kStringLessThanOrEqual:
case Runtime::kStringGreaterThan:
@@ -180,7 +179,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
// Some inline intrinsics are also safe to call without a FrameState.
case Runtime::kInlineCreateIterResultObject:
case Runtime::kInlineGeneratorClose:
- case Runtime::kInlineGeneratorGetInputOrDebugPos:
case Runtime::kInlineGeneratorGetResumeMode:
case Runtime::kInlineCreateJSGeneratorObject:
case Runtime::kInlineIsArray:
@@ -333,12 +331,16 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved
kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kCanUseRoots | // flags
- flags, // flags
+ flags, // flags
"js-call");
}
// TODO(turbofan): cache call descriptors for code stub calls.
+// TODO(jgruber): Clean up stack parameter count handling. The descriptor
+// already knows the formal stack parameter count and ideally only additional
+// stack parameters should be passed into this method. All call-sites should
+// be audited for correctness (e.g. many used to assume a stack parameter count
+// of 0).
CallDescriptor* Linkage::GetStubCallDescriptor(
Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
@@ -350,6 +352,8 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
const size_t parameter_count =
static_cast<size_t>(js_parameter_count + context_count);
+ DCHECK_GE(stack_parameter_count, descriptor.GetStackParameterCount());
+
size_t return_count = descriptor.GetReturnCount();
LocationSignature::Builder locations(zone, return_count, parameter_count);
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 6d6cfafdbf..46966b552f 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -99,8 +99,6 @@ Reduction LoadElimination::Reduce(Node* node) {
}
}
switch (node->opcode()) {
- case IrOpcode::kArrayBufferWasNeutered:
- return ReduceArrayBufferWasNeutered(node);
case IrOpcode::kMapGuard:
return ReduceMapGuard(node);
case IrOpcode::kCheckMaps:
@@ -139,74 +137,6 @@ Reduction LoadElimination::Reduce(Node* node) {
namespace {
-bool LoadEliminationIsCompatibleCheck(Node const* a, Node const* b) {
- if (a->op() != b->op()) return false;
- for (int i = a->op()->ValueInputCount(); --i >= 0;) {
- if (!MustAlias(a->InputAt(i), b->InputAt(i))) return false;
- }
- return true;
-}
-
-} // namespace
-
-Node* LoadElimination::AbstractChecks::Lookup(Node* node) const {
- for (Node* const check : nodes_) {
- if (check && !check->IsDead() &&
- LoadEliminationIsCompatibleCheck(check, node)) {
- return check;
- }
- }
- return nullptr;
-}
-
-bool LoadElimination::AbstractChecks::Equals(AbstractChecks const* that) const {
- if (this == that) return true;
- for (size_t i = 0; i < arraysize(nodes_); ++i) {
- if (Node* this_node = this->nodes_[i]) {
- for (size_t j = 0;; ++j) {
- if (j == arraysize(nodes_)) return false;
- if (that->nodes_[j] == this_node) break;
- }
- }
- }
- for (size_t i = 0; i < arraysize(nodes_); ++i) {
- if (Node* that_node = that->nodes_[i]) {
- for (size_t j = 0;; ++j) {
- if (j == arraysize(nodes_)) return false;
- if (this->nodes_[j] == that_node) break;
- }
- }
- }
- return true;
-}
-
-LoadElimination::AbstractChecks const* LoadElimination::AbstractChecks::Merge(
- AbstractChecks const* that, Zone* zone) const {
- if (this->Equals(that)) return this;
- AbstractChecks* copy = new (zone) AbstractChecks(zone);
- for (Node* const this_node : this->nodes_) {
- if (this_node == nullptr) continue;
- for (Node* const that_node : that->nodes_) {
- if (this_node == that_node) {
- copy->nodes_[copy->next_index_++] = this_node;
- break;
- }
- }
- }
- copy->next_index_ %= arraysize(nodes_);
- return copy;
-}
-
-void LoadElimination::AbstractChecks::Print() const {
- for (Node* const node : nodes_) {
- if (node != nullptr) {
- PrintF(" #%d:%s\n", node->id(), node->op()->mnemonic());
- }
- }
-}
-
-namespace {
-
bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
if (r1 == r2) return true;
return IsAnyTagged(r1) && IsAnyTagged(r2);
@@ -446,13 +376,6 @@ void LoadElimination::AbstractMaps::Print() const {
}
bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
- if (this->checks_) {
- if (!that->checks_ || !that->checks_->Equals(this->checks_)) {
- return false;
- }
- } else if (that->checks_) {
- return false;
- }
if (this->elements_) {
if (!that->elements_ || !that->elements_->Equals(this->elements_)) {
return false;
@@ -481,12 +404,6 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
void LoadElimination::AbstractState::Merge(AbstractState const* that,
Zone* zone) {
- // Merge the information we have about the checks.
- if (this->checks_) {
- this->checks_ =
- that->checks_ ? that->checks_->Merge(this->checks_, zone) : nullptr;
- }
-
// Merge the information we have about the elements.
if (this->elements_) {
this->elements_ = that->elements_
@@ -511,21 +428,6 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
}
-Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
- return this->checks_ ? this->checks_->Lookup(node) : nullptr;
-}
-
-LoadElimination::AbstractState const* LoadElimination::AbstractState::AddCheck(
- Node* node, Zone* zone) const {
- AbstractState* that = new (zone) AbstractState(*this);
- if (that->checks_) {
- that->checks_ = that->checks_->Extend(node, zone);
- } else {
- that->checks_ = new (zone) AbstractChecks(node, zone);
- }
- return that;
-}
-
bool LoadElimination::AbstractState::LookupMaps(
Node* object, ZoneHandleSet<Map>* object_map) const {
return this->maps_ && this->maps_->Lookup(object, object_map);
@@ -689,10 +591,6 @@ bool LoadElimination::AliasStateInfo::MayAlias(Node* other) const {
}
void LoadElimination::AbstractState::Print() const {
- if (checks_) {
- PrintF(" checks:\n");
- checks_->Print();
- }
if (maps_) {
PrintF(" maps:\n");
maps_->Print();
@@ -723,18 +621,6 @@ void LoadElimination::AbstractStateForEffectNodes::Set(
info_for_node_[id] = state;
}
-Reduction LoadElimination::ReduceArrayBufferWasNeutered(Node* node) {
- Node* const effect = NodeProperties::GetEffectInput(node);
- AbstractState const* state = node_states_.Get(effect);
- if (state == nullptr) return NoChange();
- if (Node* const check = state->LookupCheck(node)) {
- ReplaceWithValue(node, check, effect);
- return Replace(check);
- }
- state = state->AddCheck(node, zone());
- return UpdateState(node, state);
-}
-
Reduction LoadElimination::ReduceMapGuard(Node* node) {
ZoneHandleSet<Map> const maps = MapGuardMapsOf(node->op()).maps();
Node* const object = NodeProperties::GetValueInput(node, 0);
@@ -962,8 +848,9 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
Type const new_value_type = NodeProperties::GetType(new_value);
if (new_value_type.IsHeapConstant()) {
// Record the new {object} map information.
+ AllowHandleDereference handle_dereference;
ZoneHandleSet<Map> object_maps(
- bit_cast<Handle<Map>>(new_value_type.AsHeapConstant()->Value()));
+ Handle<Map>::cast(new_value_type.AsHeapConstant()->Value()));
state = state->SetMaps(object, object_maps, zone());
}
} else {
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 2ce5a04397..8fa31be074 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -31,46 +31,13 @@ class V8_EXPORT_PRIVATE LoadElimination final
public:
LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
: AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
- ~LoadElimination() final {}
+ ~LoadElimination() final = default;
const char* reducer_name() const override { return "LoadElimination"; }
Reduction Reduce(Node* node) final;
private:
- static const size_t kMaxTrackedChecks = 8;
-
- // Abstract state to approximate the current state of checks that are
- // only invalidated by calls, i.e. array buffer neutering checks, along
- // the effect paths through the graph.
- class AbstractChecks final : public ZoneObject {
- public:
- explicit AbstractChecks(Zone* zone) {
- for (size_t i = 0; i < arraysize(nodes_); ++i) {
- nodes_[i] = nullptr;
- }
- }
- AbstractChecks(Node* node, Zone* zone) : AbstractChecks(zone) {
- nodes_[next_index_++] = node;
- }
-
- AbstractChecks const* Extend(Node* node, Zone* zone) const {
- AbstractChecks* that = new (zone) AbstractChecks(*this);
- that->nodes_[that->next_index_] = node;
- that->next_index_ = (that->next_index_ + 1) % arraysize(nodes_);
- return that;
- }
- Node* Lookup(Node* node) const;
- bool Equals(AbstractChecks const* that) const;
- AbstractChecks const* Merge(AbstractChecks const* that, Zone* zone) const;
-
- void Print() const;
-
- private:
- Node* nodes_[kMaxTrackedChecks];
- size_t next_index_ = 0;
- };
-
static const size_t kMaxTrackedElements = 8;
// Abstract state to approximate the current state of an element along the
@@ -108,7 +75,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
private:
struct Element {
- Element() {}
+ Element() = default;
Element(Node* object, Node* index, Node* value,
MachineRepresentation representation)
: object(object),
@@ -174,7 +141,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
private:
struct Field {
- Field() {}
+ Field() = default;
Field(Node* value, MaybeHandle<Name> name) : value(value), name(name) {}
bool operator==(const Field& other) const {
@@ -250,13 +217,9 @@ class V8_EXPORT_PRIVATE LoadElimination final
Node* LookupElement(Node* object, Node* index,
MachineRepresentation representation) const;
- AbstractState const* AddCheck(Node* node, Zone* zone) const;
- Node* LookupCheck(Node* node) const;
-
void Print() const;
private:
- AbstractChecks const* checks_ = nullptr;
AbstractElements const* elements_ = nullptr;
AbstractField const* fields_[kMaxTrackedFields];
AbstractMaps const* maps_ = nullptr;
@@ -274,7 +237,6 @@ class V8_EXPORT_PRIVATE LoadElimination final
ZoneVector<AbstractState const*> info_for_node_;
};
- Reduction ReduceArrayBufferWasNeutered(Node* node);
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCompareMaps(Node* node);
Reduction ReduceMapGuard(Node* node);
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index 150505a5e5..a3408ec81d 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -26,7 +26,7 @@ class V8_EXPORT_PRIVATE PeeledIteration : public NON_EXPORTED_BASE(ZoneObject) {
Node* map(Node* node);
protected:
- PeeledIteration() {}
+ PeeledIteration() = default;
};
class CommonOperatorBuilder;
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 5a0fc9dbfb..9fe7fdc4f9 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -158,6 +158,7 @@ void LoopVariableOptimizer::VisitIf(Node* node, bool polarity) {
// Normalize to less than comparison.
switch (cond->opcode()) {
case IrOpcode::kJSLessThan:
+ case IrOpcode::kNumberLessThan:
case IrOpcode::kSpeculativeNumberLessThan:
AddCmpToLimits(&limits, cond, InductionVariable::kStrict, polarity);
break;
@@ -165,6 +166,7 @@ void LoopVariableOptimizer::VisitIf(Node* node, bool polarity) {
AddCmpToLimits(&limits, cond, InductionVariable::kNonStrict, !polarity);
break;
case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kNumberLessThanOrEqual:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
AddCmpToLimits(&limits, cond, InductionVariable::kNonStrict, polarity);
break;
@@ -226,10 +228,12 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
Node* arith = phi->InputAt(1);
InductionVariable::ArithmeticType arithmeticType;
if (arith->opcode() == IrOpcode::kJSAdd ||
+ arith->opcode() == IrOpcode::kNumberAdd ||
arith->opcode() == IrOpcode::kSpeculativeNumberAdd ||
arith->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd) {
arithmeticType = InductionVariable::ArithmeticType::kAddition;
} else if (arith->opcode() == IrOpcode::kJSSubtract ||
+ arith->opcode() == IrOpcode::kNumberSubtract ||
arith->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
arith->opcode() == IrOpcode::kSpeculativeSafeIntegerSubtract) {
arithmeticType = InductionVariable::ArithmeticType::kSubtraction;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index f3a5fb9023..c5769c2542 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -65,6 +65,16 @@ class MachineRepresentationInferrer {
auto call_descriptor = CallDescriptorOf(input->op());
return call_descriptor->GetReturnType(index).representation();
}
+ case IrOpcode::kWord32AtomicPairLoad:
+ case IrOpcode::kWord32AtomicPairAdd:
+ case IrOpcode::kWord32AtomicPairSub:
+ case IrOpcode::kWord32AtomicPairAnd:
+ case IrOpcode::kWord32AtomicPairOr:
+ case IrOpcode::kWord32AtomicPairXor:
+ case IrOpcode::kWord32AtomicPairExchange:
+ case IrOpcode::kWord32AtomicPairCompareExchange:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return MachineRepresentation::kWord32;
default:
return MachineRepresentation::kNone;
}
@@ -111,6 +121,7 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = MachineRepresentation::kNone;
break;
case IrOpcode::kWord32AtomicLoad:
+ case IrOpcode::kWord64AtomicLoad:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kPoisonedLoad:
@@ -144,9 +155,21 @@ class MachineRepresentationInferrer {
break;
}
case IrOpcode::kWord32AtomicStore:
+ case IrOpcode::kWord64AtomicStore:
representation_vector_[node->id()] =
PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
break;
+ case IrOpcode::kWord32AtomicPairLoad:
+ case IrOpcode::kWord32AtomicPairStore:
+ case IrOpcode::kWord32AtomicPairAdd:
+ case IrOpcode::kWord32AtomicPairSub:
+ case IrOpcode::kWord32AtomicPairAnd:
+ case IrOpcode::kWord32AtomicPairOr:
+ case IrOpcode::kWord32AtomicPairXor:
+ case IrOpcode::kWord32AtomicPairExchange:
+ case IrOpcode::kWord32AtomicPairCompareExchange:
+ representation_vector_[node->id()] = MachineRepresentation::kWord32;
+ break;
case IrOpcode::kWord32AtomicExchange:
case IrOpcode::kWord32AtomicCompareExchange:
case IrOpcode::kWord32AtomicAdd:
@@ -154,6 +177,13 @@ class MachineRepresentationInferrer {
case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicXor:
+ case IrOpcode::kWord64AtomicExchange:
+ case IrOpcode::kWord64AtomicCompareExchange:
+ case IrOpcode::kWord64AtomicAdd:
+ case IrOpcode::kWord64AtomicSub:
+ case IrOpcode::kWord64AtomicAnd:
+ case IrOpcode::kWord64AtomicOr:
+ case IrOpcode::kWord64AtomicXor:
representation_vector_[node->id()] = PromoteRepresentation(
AtomicOpType(node->op()).representation());
break;
@@ -168,6 +198,7 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kHeapConstant:
case IrOpcode::kNumberConstant:
+ case IrOpcode::kDelayedStringConstant:
case IrOpcode::kChangeBitToTagged:
case IrOpcode::kIfException:
case IrOpcode::kOsrValue:
@@ -486,11 +517,23 @@ class MachineRepresentationChecker {
break;
case IrOpcode::kLoad:
case IrOpcode::kWord32AtomicLoad:
+ case IrOpcode::kWord32AtomicPairLoad:
+ case IrOpcode::kWord64AtomicLoad:
case IrOpcode::kPoisonedLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
break;
+ case IrOpcode::kWord32AtomicPairAdd:
+ case IrOpcode::kWord32AtomicPairSub:
+ case IrOpcode::kWord32AtomicPairAnd:
+ case IrOpcode::kWord32AtomicPairOr:
+ case IrOpcode::kWord32AtomicPairXor:
+ case IrOpcode::kWord32AtomicPairStore:
+ case IrOpcode::kWord32AtomicPairExchange:
+ CheckValueInputRepresentationIs(node, 3,
+ MachineRepresentation::kWord32);
+ V8_FALLTHROUGH;
case IrOpcode::kStore:
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicExchange:
@@ -499,6 +542,13 @@ class MachineRepresentationChecker {
case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicXor:
+ case IrOpcode::kWord64AtomicStore:
+ case IrOpcode::kWord64AtomicExchange:
+ case IrOpcode::kWord64AtomicAdd:
+ case IrOpcode::kWord64AtomicSub:
+ case IrOpcode::kWord64AtomicAnd:
+ case IrOpcode::kWord64AtomicOr:
+ case IrOpcode::kWord64AtomicXor:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -513,7 +563,14 @@ class MachineRepresentationChecker {
node, 2, inferrer_->GetRepresentation(node));
}
break;
+ case IrOpcode::kWord32AtomicPairCompareExchange:
+ CheckValueInputRepresentationIs(node, 4,
+ MachineRepresentation::kWord32);
+ CheckValueInputRepresentationIs(node, 5,
+ MachineRepresentation::kWord32);
+ V8_FALLTHROUGH;
case IrOpcode::kWord32AtomicCompareExchange:
+ case IrOpcode::kWord64AtomicCompareExchange:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 7fcba20e2e..8ef7e7ce08 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -22,7 +22,7 @@ MachineOperatorReducer::MachineOperatorReducer(MachineGraph* mcgraph,
bool allow_signalling_nan)
: mcgraph_(mcgraph), allow_signalling_nan_(allow_signalling_nan) {}
-MachineOperatorReducer::~MachineOperatorReducer() {}
+MachineOperatorReducer::~MachineOperatorReducer() = default;
Node* MachineOperatorReducer::Float32Constant(volatile float value) {
@@ -618,6 +618,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kChangeFloat64ToInt64: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt64(static_cast<int64_t>(m.Value()));
+ if (m.IsChangeInt64ToFloat64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
case IrOpcode::kChangeFloat64ToUint32: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(FastD2UI(m.Value()));
@@ -634,6 +640,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceInt64(m.Value());
break;
}
+ case IrOpcode::kChangeInt64ToFloat64: {
+ Int64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(static_cast<double>(m.Value()));
+ if (m.IsChangeFloat64ToInt64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
case IrOpcode::kChangeUint32ToFloat64: {
Uint32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(FastUI2D(m.Value()));
@@ -1374,21 +1386,32 @@ bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
- DCHECK((IrOpcode::kFloat64Equal == node->opcode()) ||
- (IrOpcode::kFloat64LessThan == node->opcode()) ||
- (IrOpcode::kFloat64LessThanOrEqual == node->opcode()));
- // As all Float32 values have an exact representation in Float64, comparing
- // two Float64 values both converted from Float32 is equivalent to comparing
- // the original Float32s, so we can ignore the conversions. We can also reduce
- // comparisons of converted Float64 values against constants that can be
- // represented exactly as Float32.
+ DCHECK(IrOpcode::kFloat64Equal == node->opcode() ||
+ IrOpcode::kFloat64LessThan == node->opcode() ||
+ IrOpcode::kFloat64LessThanOrEqual == node->opcode());
Float64BinopMatcher m(node);
- if ((m.left().IsChangeFloat32ToFloat64() &&
- m.right().IsChangeFloat32ToFloat64()) ||
- (m.left().IsChangeFloat32ToFloat64() &&
- IsFloat64RepresentableAsFloat32(m.right())) ||
- (IsFloat64RepresentableAsFloat32(m.left()) &&
- m.right().IsChangeFloat32ToFloat64())) {
+ if (m.IsFoldable()) {
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Equal:
+ return ReplaceBool(m.left().Value() == m.right().Value());
+ case IrOpcode::kFloat64LessThan:
+ return ReplaceBool(m.left().Value() < m.right().Value());
+ case IrOpcode::kFloat64LessThanOrEqual:
+ return ReplaceBool(m.left().Value() <= m.right().Value());
+ default:
+ UNREACHABLE();
+ }
+ } else if ((m.left().IsChangeFloat32ToFloat64() &&
+ m.right().IsChangeFloat32ToFloat64()) ||
+ (m.left().IsChangeFloat32ToFloat64() &&
+ IsFloat64RepresentableAsFloat32(m.right())) ||
+ (IsFloat64RepresentableAsFloat32(m.left()) &&
+ m.right().IsChangeFloat32ToFloat64())) {
+ // As all Float32 values have an exact representation in Float64, comparing
+ // two Float64 values both converted from Float32 is equivalent to comparing
+ // the original Float32s, so we can ignore the conversions. We can also
+ // reduce comparisons of converted Float64 values against constants that
+ // can be represented exactly as Float32.
switch (node->opcode()) {
case IrOpcode::kFloat64Equal:
NodeProperties::ChangeOp(node, machine()->Float32Equal());
@@ -1400,7 +1423,7 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
NodeProperties::ChangeOp(node, machine()->Float32LessThanOrEqual());
break;
default:
- return NoChange();
+ UNREACHABLE();
}
node->ReplaceInput(
0, m.left().HasValue()
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 1dc2a0a106..c44ec5f551 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
public:
explicit MachineOperatorReducer(MachineGraph* mcgraph,
bool allow_signalling_nan = true);
- ~MachineOperatorReducer();
+ ~MachineOperatorReducer() override;
const char* reducer_name() const override { return "MachineOperatorReducer"; }
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 241651254b..f3fcd7758c 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -81,8 +81,7 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
- IrOpcode::kWord64AtomicStore == op->opcode() ||
- IrOpcode::kWord32AtomicPairStore == op->opcode());
+ IrOpcode::kWord64AtomicStore == op->opcode());
return OpParameter<MachineRepresentation>(op);
}
@@ -145,6 +144,7 @@ MachineType AtomicOpType(Operator const* op) {
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
@@ -155,6 +155,7 @@ MachineType AtomicOpType(Operator const* op) {
V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
@@ -342,8 +343,8 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
- V(Int32AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
- V(Int64AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
+ V(Int32AbsWithOverflow, Operator::kNoProperties, 1, 0, 2) \
+ V(Int64AbsWithOverflow, Operator::kNoProperties, 1, 0, 2) \
V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundDown, Operator::kNoProperties, 1, 0, 1) \
@@ -718,25 +719,6 @@ struct MachineOperatorGlobalCache {
#undef ATOMIC_PAIR_OP
#undef ATOMIC_PAIR_BINOP_LIST
-#define ATOMIC64_NARROW_OP(op, type) \
- struct op##type##Operator : public Operator1<MachineType> { \
- op##type##Operator() \
- : Operator1<MachineType>( \
- IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, "#op", \
- 3, 1, 1, 2, 1, 0, MachineType::type()) {} \
- }; \
- op##type##Operator k##op##type;
-#define ATOMIC_OP_LIST(type) \
- ATOMIC64_NARROW_OP(Word64AtomicNarrowAdd, type) \
- ATOMIC64_NARROW_OP(Word64AtomicNarrowSub, type) \
- ATOMIC64_NARROW_OP(Word64AtomicNarrowAnd, type) \
- ATOMIC64_NARROW_OP(Word64AtomicNarrowOr, type) \
- ATOMIC64_NARROW_OP(Word64AtomicNarrowXor, type) \
- ATOMIC64_NARROW_OP(Word64AtomicNarrowExchange, type)
- ATOMIC_U32_TYPE_LIST(ATOMIC_OP_LIST)
-#undef ATOMIC_OP_LIST
-#undef ATOMIC64_NARROW_OP
-
struct Word32AtomicPairCompareExchangeOperator : public Operator {
Word32AtomicPairCompareExchangeOperator()
: Operator(IrOpcode::kWord32AtomicPairCompareExchange,
@@ -745,20 +727,6 @@ struct MachineOperatorGlobalCache {
};
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
-#define ATOMIC_COMPARE_EXCHANGE(Type) \
- struct Word64AtomicNarrowCompareExchange##Type##Operator \
- : public Operator1<MachineType> { \
- Word64AtomicNarrowCompareExchange##Type##Operator() \
- : Operator1<MachineType>(IrOpcode::kWord64AtomicNarrowCompareExchange, \
- Operator::kNoDeopt | Operator::kNoThrow, \
- "Word64AtomicNarrowCompareExchange", 4, 1, 1, \
- 2, 1, 0, MachineType::Type()) {} \
- }; \
- Word64AtomicNarrowCompareExchange##Type##Operator \
- kWord64AtomicNarrowCompareExchange##Type;
- ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
-#undef ATOMIC_COMPARE_EXCHANGE
-
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
@@ -1245,82 +1213,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange;
}
-const Operator* MachineOperatorBuilder::Word64AtomicNarrowAdd(
- MachineType type) {
-#define ADD(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicNarrowAdd##kType; \
- }
- ATOMIC_U32_TYPE_LIST(ADD)
-#undef ADD
- UNREACHABLE();
-}
-
-const Operator* MachineOperatorBuilder::Word64AtomicNarrowSub(
- MachineType type) {
-#define SUB(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicNarrowSub##kType; \
- }
- ATOMIC_U32_TYPE_LIST(SUB)
-#undef SUB
- UNREACHABLE();
-}
-
-const Operator* MachineOperatorBuilder::Word64AtomicNarrowAnd(
- MachineType type) {
-#define AND(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicNarrowAnd##kType; \
- }
- ATOMIC_U32_TYPE_LIST(AND)
-#undef AND
- UNREACHABLE();
-}
-
-const Operator* MachineOperatorBuilder::Word64AtomicNarrowOr(MachineType type) {
-#define OR(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicNarrowOr##kType; \
- }
- ATOMIC_U32_TYPE_LIST(OR)
-#undef OR
- UNREACHABLE();
-}
-
-const Operator* MachineOperatorBuilder::Word64AtomicNarrowXor(
- MachineType type) {
-#define XOR(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicNarrowXor##kType; \
- }
- ATOMIC_U32_TYPE_LIST(XOR)
-#undef XOR
- UNREACHABLE();
-}
-
-const Operator* MachineOperatorBuilder::Word64AtomicNarrowExchange(
- MachineType type) {
-#define EXCHANGE(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicNarrowExchange##kType; \
- }
- ATOMIC_U32_TYPE_LIST(EXCHANGE)
-#undef EXCHANGE
- UNREACHABLE();
-}
-
-const Operator* MachineOperatorBuilder::Word64AtomicNarrowCompareExchange(
- MachineType type) {
-#define CMP_EXCHANGE(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicNarrowCompareExchange##kType; \
- }
- ATOMIC_U32_TYPE_LIST(CMP_EXCHANGE)
-#undef CMP_EXCHANGE
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
return &cache_.kTaggedPoisonOnSpeculation;
}
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 261891dcdc..a34360a375 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -319,6 +319,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// the input value is representable in the target value.
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
+ const Operator* ChangeFloat64ToInt64();
const Operator* ChangeFloat64ToUint32(); // narrowing
const Operator* ChangeFloat64ToUint64();
const Operator* TruncateFloat64ToUint32();
@@ -330,6 +331,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* TryTruncateFloat64ToUint64();
const Operator* ChangeInt32ToFloat64();
const Operator* ChangeInt32ToInt64();
+ const Operator* ChangeInt64ToFloat64();
const Operator* ChangeUint32ToFloat64();
const Operator* ChangeUint32ToUint64();
@@ -648,20 +650,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word64AtomicOr(MachineType type);
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType rep);
- // atomic-narrow-add [base + index], value
- const Operator* Word64AtomicNarrowAdd(MachineType type);
- // atomic-narow-sub [base + index], value
- const Operator* Word64AtomicNarrowSub(MachineType type);
- // atomic-narrow-and [base + index], value
- const Operator* Word64AtomicNarrowAnd(MachineType type);
- // atomic-narrow-or [base + index], value
- const Operator* Word64AtomicNarrowOr(MachineType type);
- // atomic-narrow-xor [base + index], value
- const Operator* Word64AtomicNarrowXor(MachineType type);
- // atomic-narrow-exchange [base + index], value
- const Operator* Word64AtomicNarrowExchange(MachineType type);
- // atomic-narrow-compare-exchange [base + index], old_value, new_value
- const Operator* Word64AtomicNarrowCompareExchange(MachineType type);
// atomic-pair-load [base + index]
const Operator* Word32AtomicPairLoad();
// atomic-pair-sub [base + index], value_high, value-low
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 3ba3dcc6b8..298a503771 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -67,7 +67,7 @@ MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
: group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
- int size, Node* top)
+ intptr_t size, Node* top)
: group_(group), size_(size), top_(top) {}
bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
@@ -175,27 +175,35 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
- Int32Matcher m(size);
- if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) {
- int32_t const object_size = m.Value();
+ IntPtrMatcher m(size);
+ if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
+ intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->pretenure() == pretenure) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
- int32_t const state_size = state->size() + object_size;
+ intptr_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
- if (OpParameter<int32_t>(group->size()->op()) < state_size) {
- NodeProperties::ChangeOp(group->size(),
- common()->Int32Constant(state_size));
+ if (machine()->Is64()) {
+ if (OpParameter<int64_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(group->size(),
+ common()->Int64Constant(state_size));
+ }
+ } else {
+ if (OpParameter<int32_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(
+ group->size(),
+ common()->Int32Constant(static_cast<int32_t>(state_size)));
+ }
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
- Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
+ Node* top = __ IntAdd(state->top(), size);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
@@ -213,7 +221,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
- Node* size = __ UniqueInt32Constant(object_size);
+ Node* size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
@@ -223,10 +231,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
- Node* check = __ UintLessThan(
- __ IntAdd(top,
- machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
- limit);
+ Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
@@ -238,8 +243,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), AllocateDescriptor{}, 0,
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
@@ -276,8 +282,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
- Node* new_top =
- __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
+ Node* new_top = __ IntAdd(top, size);
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
@@ -294,8 +299,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), AllocateDescriptor{}, 0,
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
@@ -421,19 +427,7 @@ void MemoryOptimizer::VisitOtherEffect(Node* node,
EnqueueUses(node, state);
}
-Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
- Node* index;
- if (machine()->Is64()) {
- // On 64-bit platforms, we need to feed a Word64 index to the Load and
- // Store operators. Since LoadElement or StoreElement don't do any bounds
- // checking themselves, we can be sure that the {key} was already checked
- // and is in valid range, so we can do the further address computation on
- // Word64 below, which ideally allows us to fuse the address computation
- // with the actual memory access operation on Intel platforms.
- index = graph()->NewNode(machine()->ChangeUint32ToUint64(), key);
- } else {
- index = key;
- }
+Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
int const element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 5ea79556f4..31c04e5f2f 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -36,7 +36,7 @@ class MemoryOptimizer final {
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding);
- ~MemoryOptimizer() {}
+ ~MemoryOptimizer() = default;
void Optimize();
@@ -48,7 +48,7 @@ class MemoryOptimizer final {
AllocationGroup(Node* node, PretenureFlag pretenure, Zone* zone);
AllocationGroup(Node* node, PretenureFlag pretenure, Node* size,
Zone* zone);
- ~AllocationGroup() {}
+ ~AllocationGroup() = default;
void Add(Node* object);
bool Contains(Node* object) const;
@@ -74,7 +74,7 @@ class MemoryOptimizer final {
static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
return new (zone) AllocationState(group);
}
- static AllocationState const* Open(AllocationGroup* group, int size,
+ static AllocationState const* Open(AllocationGroup* group, intptr_t size,
Node* top, Zone* zone) {
return new (zone) AllocationState(group, size, top);
}
@@ -83,17 +83,17 @@ class MemoryOptimizer final {
AllocationGroup* group() const { return group_; }
Node* top() const { return top_; }
- int size() const { return size_; }
+ intptr_t size() const { return size_; }
private:
AllocationState();
explicit AllocationState(AllocationGroup* group);
- AllocationState(AllocationGroup* group, int size, Node* top);
+ AllocationState(AllocationGroup* group, intptr_t size, Node* top);
AllocationGroup* const group_;
// The upper bound of the combined allocated object size on the current path
// (max int if allocation folding is impossible on this path).
- int const size_;
+ intptr_t const size_;
Node* const top_;
DISALLOW_COPY_AND_ASSIGN(AllocationState);
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 00575fe117..e44ffee34b 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -84,6 +84,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
break;
@@ -353,6 +356,41 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ sync(); \
} while (0)
+#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr) \
+ do { \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ Label binop; \
+ __ sync(); \
+ __ bind(&binop); \
+ __ llwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
+ __ bin_instr(i.TempRegister(0), i.TempRegister(1), i.TempRegister(0), \
+ i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
+ __ scwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr) \
+ do { \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ Label binop; \
+ __ sync(); \
+ __ bind(&binop); \
+ __ llwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
+ __ bin_instr(i.TempRegister(0), i.TempRegister(1), i.TempRegister(0), \
+ i.TempRegister(1), i.InputRegister(0), i.InputRegister(1), \
+ i.TempRegister(2), i.TempRegister(3)); \
+ __ scwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ } while (0)
+
#define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \
do { \
Label binop; \
@@ -1701,6 +1739,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
+ case kMipsWord32AtomicPairLoad: {
+ if (IsMipsArchVariant(kMips32r6)) {
+ Register second_output =
+ instr->OutputCount() == 2 ? i.OutputRegister(1) : i.TempRegister(0);
+ __ llwp(i.OutputRegister(0), second_output, i.InputRegister(0));
+ __ sync();
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case kMipsWord32AtomicPairStore: {
+ if (IsMipsArchVariant(kMips32r6)) {
+ Label store;
+ __ sync();
+ __ bind(&store);
+ __ llwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(0));
+ __ Move(i.TempRegister(0), i.InputRegister(2));
+ __ scwp(i.InputRegister(1), i.TempRegister(0), i.InputRegister(0));
+ __ BranchShort(&store, eq, i.TempRegister(0), Operand(zero_reg));
+ __ sync();
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+#define ATOMIC64_BINOP_ARITH_CASE(op, instr) \
+ case kMipsWord32AtomicPair##op: \
+ ASSEMBLE_ATOMIC64_ARITH_BINOP(instr); \
+ break;
+ ATOMIC64_BINOP_ARITH_CASE(Add, AddPair)
+ ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair)
+#undef ATOMIC64_BINOP_ARITH_CASE
+#define ATOMIC64_BINOP_LOGIC_CASE(op, instr) \
+ case kMipsWord32AtomicPair##op: \
+ ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr); \
+ break;
+ ATOMIC64_BINOP_LOGIC_CASE(And, AndPair)
+ ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair)
+ ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair)
+#undef ATOMIC64_BINOP_LOGIC_CASE
+ case kMipsWord32AtomicPairExchange:
+ UNREACHABLE();
+ break;
+ case kMipsWord32AtomicPairCompareExchange: {
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PrepareCallCFunction(5, 0, kScratchReg);
+ __ addu(a0, i.InputRegister(0), i.InputRegister(1));
+ __ sw(i.InputRegister(5), MemOperand(sp, 16));
+ __ CallCFunction(
+ ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
+ __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
+ break;
+ }
case kMipsS128Zero: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
@@ -3371,9 +3464,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ li(dst, src.ToExternalReference());
break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index dd789d0196..4b49de36b4 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -276,7 +276,16 @@ namespace compiler {
V(MipsI16x8UConvertI8x16Low) \
V(MipsI16x8UConvertI8x16High) \
V(MipsI8x16SConvertI16x8) \
- V(MipsI8x16UConvertI16x8)
+ V(MipsI8x16UConvertI16x8) \
+ V(MipsWord32AtomicPairLoad) \
+ V(MipsWord32AtomicPairStore) \
+ V(MipsWord32AtomicPairAdd) \
+ V(MipsWord32AtomicPairSub) \
+ V(MipsWord32AtomicPairAnd) \
+ V(MipsWord32AtomicPairOr) \
+ V(MipsWord32AtomicPairXor) \
+ V(MipsWord32AtomicPairExchange) \
+ V(MipsWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
index a0fe188430..26f543d838 100644
--- a/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
@@ -266,6 +266,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUlhu:
case kMipsUlw:
case kMipsUlwc1:
+ case kMipsWord32AtomicPairLoad:
return kIsLoadOperation;
case kMipsModD:
@@ -283,6 +284,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUsh:
case kMipsUsw:
case kMipsUswc1:
+ case kMipsWord32AtomicPairStore:
+ case kMipsWord32AtomicPairAdd:
+ case kMipsWord32AtomicPairSub:
+ case kMipsWord32AtomicPairAnd:
+ case kMipsWord32AtomicPairOr:
+ case kMipsWord32AtomicPairXor:
+ case kMipsWord32AtomicPairExchange:
+ case kMipsWord32AtomicPairCompareExchange:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 66f38dc283..954942c9af 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -229,6 +229,42 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, false, kArchNop);
}
+static void VisitPairAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ MipsOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+
+ InstructionOperand addr_reg = g.TempRegister();
+
+ selector->Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+
+ InstructionOperand inputs[] = {g.UseRegister(value),
+ g.UseRegister(value_high), addr_reg};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister(), g.TempRegister()};
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
+ g.DefineAsRegister(projection1)};
+ selector->Emit(opcode | AddressingModeField::encode(kMode_None),
+ arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsRegister(projection0)};
+ selector->Emit(opcode | AddressingModeField::encode(kMode_None),
+ arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_None), 0, nullptr,
+ arraysize(inputs), inputs, arraysize(temps), temps);
+ }
+}
+
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int alignment = rep.alignment();
@@ -651,6 +687,106 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
VisitRR(this, kMipsClz, node);
}
+void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kMipsWord32AtomicPairLoad;
+
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ InstructionOperand inputs[] = {addr_reg};
+
+ InstructionOperand temps[] = {g.TempRegister()};
+ if (projection1) {
+ InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
+ g.DefineAsRegister(projection1)};
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), arraysize(outputs),
+ outputs, arraysize(inputs), inputs, 1, temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsRegister(projection0)};
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), arraysize(outputs),
+ outputs, arraysize(inputs), inputs, 1, temps);
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), 0, nullptr,
+ arraysize(inputs), inputs, 1, temps);
+ }
+}
+
+void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value_low = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+
+ InstructionOperand inputs[] = {addr_reg, g.UseRegister(value_low),
+ g.UseRegister(value_high)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ Emit(kMipsWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0,
+ nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
+ VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAdd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
+ VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairSub);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
+ VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAnd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
+ VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairOr);
+}
+
+void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
+ VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairXor);
+}
+
+void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
+ VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairExchange);
+}
+
+void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
+ MipsOperandGenerator g(this);
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseFixed(node->InputAt(2), a1), g.UseFixed(node->InputAt(3), a2),
+ g.UseFixed(node->InputAt(4), a3), g.UseUniqueRegister(node->InputAt(5))};
+
+ InstructionCode code = kMipsWord32AtomicPairCompareExchange |
+ AddressingModeField::encode(kMode_MRI);
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0),
+ g.DefineAsFixed(projection1, v1)};
+ InstructionOperand temps[] = {g.TempRegister(a0)};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else if (projection0) {
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0)};
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v1)};
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v0),
+ g.TempRegister(v1)};
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ }
+}
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 7beb887b53..0e2b508a29 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -87,6 +87,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
break;
@@ -349,116 +352,120 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(bin_instr) \
- do { \
- Label binop; \
- __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ sync(); \
- __ bind(&binop); \
- __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
- __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \
- do { \
- Label binop; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
- __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
- Operand(i.TempRegister(3))); \
- __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
- __ sync(); \
- __ bind(&binop); \
- __ Ll(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
- size, sign_extend); \
- __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
- size); \
- __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr) \
+ do { \
+ Label binop; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \
- do { \
- Label exchange; \
- __ sync(); \
- __ bind(&exchange); \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
- __ mov(i.TempRegister(1), i.InputRegister(2)); \
- __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
+ do { \
+ Label exchange; \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ mov(i.TempRegister(1), i.InputRegister(2)); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \
- do { \
- Label exchange; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
- __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
- Operand(i.TempRegister(1))); \
- __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
- __ sync(); \
- __ bind(&exchange); \
- __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
- size, sign_extend); \
- __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size); \
- __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
+ sign_extend, size) \
+ do { \
+ Label exchange; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \
- do { \
- Label compareExchange; \
- Label exit; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ sync(); \
- __ bind(&compareExchange); \
- __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&exit, ne, i.InputRegister(2), \
- Operand(i.OutputRegister(0))); \
- __ mov(i.TempRegister(2), i.InputRegister(3)); \
- __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
- Operand(zero_reg)); \
- __ bind(&exit); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
- __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
- Operand(i.TempRegister(1))); \
- __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
- __ sync(); \
- __ bind(&compareExchange); \
- __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
- size, sign_extend); \
- __ BranchShort(&exit, ne, i.InputRegister(2), \
- Operand(i.OutputRegister(0))); \
- __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
- size); \
- __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
- Operand(zero_reg)); \
- __ bind(&exit); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -1845,6 +1852,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
+ case kMips64Word64AtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
+ break;
+ case kMips64Word64AtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
+ break;
+ case kMips64Word64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ break;
+ case kMips64Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
+ break;
case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
@@ -1854,52 +1873,88 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
+ case kMips64Word64AtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
+ break;
+ case kMips64Word64AtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
+ break;
+ case kMips64Word64AtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
+ break;
+ case kMips64Word64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
+ break;
case kWord32AtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8);
break;
case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8);
break;
case kWord32AtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16);
break;
case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16);
break;
case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kMips64Word64AtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8);
+ break;
+ case kMips64Word64AtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16);
+ break;
+ case kMips64Word64AtomicExchangeUint32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32);
+ break;
+ case kMips64Word64AtomicExchangeUint64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
case kWord32AtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8);
break;
case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8);
break;
case kWord32AtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16);
break;
case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16);
break;
case kWord32AtomicCompareExchangeWord32:
__ sll(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
- break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst); \
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst); \
+ break; \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst); \
+ break; \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst); \
+ break; \
+ case kWord32Atomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
ATOMIC_BINOP_CASE(Sub, Subu)
@@ -1907,6 +1962,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kMips64Word64Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst); \
+ break; \
+ case kMips64Word64Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst); \
+ break; \
+ case kMips64Word64Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst); \
+ break; \
+ case kMips64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, Daddu)
+ ATOMIC_BINOP_CASE(Sub, Dsubu)
+ ATOMIC_BINOP_CASE(And, And)
+ ATOMIC_BINOP_CASE(Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor)
+#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
i.InputRegister(0), Operand(i.InputRegister(1)));
@@ -2496,7 +2570,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label all_false;
__ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
- __ li(dst, 0); // branch delay slot
+ __ li(dst, 0l); // branch delay slot
__ li(dst, -1);
__ bind(&all_false);
break;
@@ -2508,7 +2582,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
__ li(dst, -1); // branch delay slot
- __ li(dst, 0);
+ __ li(dst, 0l);
__ bind(&all_true);
break;
}
@@ -2519,7 +2593,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
__ li(dst, -1); // branch delay slot
- __ li(dst, 0);
+ __ li(dst, 0l);
__ bind(&all_true);
break;
}
@@ -2530,7 +2604,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
__ li(dst, -1); // branch delay slot
- __ li(dst, 0);
+ __ li(dst, 0l);
__ bind(&all_true);
break;
}
@@ -3174,6 +3248,8 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
}
}
+#undef UNSUPPORTED_COND
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3621,9 +3697,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ li(dst, src.ToExternalReference());
break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
@@ -3823,6 +3902,19 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNREACHABLE();
}
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index a50d294013..7ea707db53 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -11,302 +11,338 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64ModS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64AssertEqual) \
- V(Mips64S128Zero) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4AddHoriz) \
- V(Mips64I32x4Sub) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4AddHoriz) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLane) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSaturateS) \
- V(Mips64I16x8AddHoriz) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSaturateS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSaturateU) \
- V(Mips64I16x8SubSaturateU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLane) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSaturateS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSaturateS) \
- V(Mips64I8x16Mul) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSaturateU) \
- V(Mips64I8x16SubSaturateU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S1x4AnyTrue) \
- V(Mips64S1x4AllTrue) \
- V(Mips64S1x8AnyTrue) \
- V(Mips64S1x8AllTrue) \
- V(Mips64S1x16AnyTrue) \
- V(Mips64S1x16AllTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64S8x16Shuffle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64ModS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Zero) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4AddHoriz) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4AddHoriz) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLane) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8AddHoriz) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSaturateS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSaturateU) \
+ V(Mips64I16x8SubSaturateU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLane) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSaturateS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSaturateS) \
+ V(Mips64I8x16Mul) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSaturateU) \
+ V(Mips64I8x16SubSaturateU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S1x4AnyTrue) \
+ V(Mips64S1x4AllTrue) \
+ V(Mips64S1x8AnyTrue) \
+ V(Mips64S1x8AllTrue) \
+ V(Mips64S1x16AnyTrue) \
+ V(Mips64S1x16AllTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64S8x16Shuffle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64Word64AtomicLoadUint8) \
+ V(Mips64Word64AtomicLoadUint16) \
+ V(Mips64Word64AtomicLoadUint32) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord8) \
+ V(Mips64Word64AtomicStoreWord16) \
+ V(Mips64Word64AtomicStoreWord32) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint8) \
+ V(Mips64Word64AtomicAddUint16) \
+ V(Mips64Word64AtomicAddUint32) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint8) \
+ V(Mips64Word64AtomicSubUint16) \
+ V(Mips64Word64AtomicSubUint32) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint8) \
+ V(Mips64Word64AtomicAndUint16) \
+ V(Mips64Word64AtomicAndUint32) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint8) \
+ V(Mips64Word64AtomicOrUint16) \
+ V(Mips64Word64AtomicOrUint32) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint8) \
+ V(Mips64Word64AtomicXorUint16) \
+ V(Mips64Word64AtomicXorUint32) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint8) \
+ V(Mips64Word64AtomicExchangeUint16) \
+ V(Mips64Word64AtomicExchangeUint32) \
+ V(Mips64Word64AtomicExchangeUint64) \
+ V(Mips64Word64AtomicCompareExchangeUint8) \
+ V(Mips64Word64AtomicCompareExchangeUint16) \
+ V(Mips64Word64AtomicCompareExchangeUint32) \
+ V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
index b0f6d65bfe..8fe669fe02 100644
--- a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
@@ -293,6 +293,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ulw:
case kMips64Ulwu:
case kMips64Ulwc1:
+ case kMips64Word64AtomicLoadUint8:
+ case kMips64Word64AtomicLoadUint16:
+ case kMips64Word64AtomicLoadUint32:
+ case kMips64Word64AtomicLoadUint64:
+
return kIsLoadOperation;
case kMips64ModD:
@@ -312,6 +317,38 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ush:
case kMips64Usw:
case kMips64Uswc1:
+ case kMips64Word64AtomicStoreWord8:
+ case kMips64Word64AtomicStoreWord16:
+ case kMips64Word64AtomicStoreWord32:
+ case kMips64Word64AtomicStoreWord64:
+ case kMips64Word64AtomicAddUint8:
+ case kMips64Word64AtomicAddUint16:
+ case kMips64Word64AtomicAddUint32:
+ case kMips64Word64AtomicAddUint64:
+ case kMips64Word64AtomicSubUint8:
+ case kMips64Word64AtomicSubUint16:
+ case kMips64Word64AtomicSubUint32:
+ case kMips64Word64AtomicSubUint64:
+ case kMips64Word64AtomicAndUint8:
+ case kMips64Word64AtomicAndUint16:
+ case kMips64Word64AtomicAndUint32:
+ case kMips64Word64AtomicAndUint64:
+ case kMips64Word64AtomicOrUint8:
+ case kMips64Word64AtomicOrUint16:
+ case kMips64Word64AtomicOrUint32:
+ case kMips64Word64AtomicOrUint64:
+ case kMips64Word64AtomicXorUint8:
+ case kMips64Word64AtomicXorUint16:
+ case kMips64Word64AtomicXorUint32:
+ case kMips64Word64AtomicXorUint64:
+ case kMips64Word64AtomicExchangeUint8:
+ case kMips64Word64AtomicExchangeUint16:
+ case kMips64Word64AtomicExchangeUint32:
+ case kMips64Word64AtomicExchangeUint64:
+ case kMips64Word64AtomicCompareExchangeUint8:
+ case kMips64Word64AtomicCompareExchangeUint16:
+ case kMips64Word64AtomicCompareExchangeUint32:
+ case kMips64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 9f9aebc145..f27ad218fd 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -1160,6 +1160,9 @@ void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDW, node);
}
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDL, node);
+}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDUw, node);
@@ -1239,6 +1242,9 @@ void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRR(this, kMips64TruncWD, node);
}
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kMips64TruncLD, node);
+}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kMips64TruncUwD, node);
@@ -2022,6 +2028,119 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
g.TempImmediate(0), cont);
}
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
} // namespace
// Shared routine for word comparisons against zero.
@@ -2366,9 +2485,6 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
@@ -2386,25 +2502,11 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
- } else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired load opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
- }
+ VisitAtomicLoad(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
@@ -2421,25 +2523,57 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
return;
}
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
- } else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ VisitAtomicStore(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = kMips64Word64AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Word64AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kMips64Word64AtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Word64AtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Word64AtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Word64AtomicStoreWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
}
+
+ VisitAtomicStore(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
@@ -2457,28 +2591,28 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
return;
}
- AddressingMode addressing_mode = kMode_MRI;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temp[3];
- temp[0] = g.TempRegister();
- temp[1] = g.TempRegister();
- temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 3, temp);
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kMips64Word64AtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kMips64Word64AtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kMips64Word64AtomicExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kMips64Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* old_value = node->InputAt(2);
- Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
@@ -2496,30 +2630,29 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
return;
}
- AddressingMode addressing_mode = kMode_MRI;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(old_value);
- inputs[input_count++] = g.UseUniqueRegister(new_value);
- InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temp[3];
- temp[0] = g.TempRegister();
- temp[1] = g.TempRegister();
- temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 3, temp);
+ VisitAtomicCompareExchange(this, node, opcode);
}
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicCompareExchange(this, node, opcode);
+}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
@@ -2537,21 +2670,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
return;
}
- AddressingMode addressing_mode = kMode_MRI;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temps[4];
- temps[0] = g.TempRegister();
- temps[1] = g.TempRegister();
- temps[2] = g.TempRegister();
- temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 4, temps);
+ VisitAtomicBinop(this, node, opcode);
}
#define VISIT_ATOMIC_BINOP(op) \
@@ -2568,6 +2687,39 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicBinop(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
+ kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index 82f4b63276..cf6edc2b67 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -37,7 +37,7 @@ class OperandSet {
set_->push_back(op);
if (!kSimpleFPAliasing && op.IsFPRegister())
- fp_reps_ |= RepBit(LocationOperand::cast(op).representation());
+ fp_reps_ |= RepresentationBit(LocationOperand::cast(op).representation());
}
bool Contains(const InstructionOperand& op) const {
@@ -55,7 +55,7 @@ class OperandSet {
const LocationOperand& loc = LocationOperand::cast(op);
MachineRepresentation rep = loc.representation();
// If haven't encountered mixed rep FP registers, skip the extra checks.
- if (!HasMixedFPReps(fp_reps_ | RepBit(rep))) return false;
+ if (!HasMixedFPReps(fp_reps_ | RepresentationBit(rep))) return false;
// Check register against aliasing registers of other FP representations.
MachineRepresentation other_rep1, other_rep2;
@@ -100,10 +100,6 @@ class OperandSet {
}
private:
- static int RepBit(MachineRepresentation rep) {
- return 1 << static_cast<int>(rep);
- }
-
static bool HasMixedFPReps(int reps) {
return reps && !base::bits::IsPowerOfTwo(reps);
}
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index fc9a44c629..6b9c8dc07d 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -38,7 +38,7 @@ bool NodeCache<Key, Hash, Pred>::Resize(Zone* zone) {
size_ *= 4;
size_t num_entries = size_ + kLinearProbe;
entries_ = zone->NewArray<Entry>(num_entries);
- memset(entries_, 0, sizeof(Entry) * num_entries);
+ memset(static_cast<void*>(entries_), 0, sizeof(Entry) * num_entries);
// Insert the old entries into the new block.
for (size_t i = 0; i < old_size; ++i) {
@@ -69,7 +69,7 @@ Node** NodeCache<Key, Hash, Pred>::Find(Zone* zone, Key key) {
size_t num_entries = kInitialSize + kLinearProbe;
entries_ = zone->NewArray<Entry>(num_entries);
size_ = kInitialSize;
- memset(entries_, 0, sizeof(Entry) * num_entries);
+ memset(static_cast<void*>(entries_), 0, sizeof(Entry) * num_entries);
Entry* entry = &entries_[hash & (kInitialSize - 1)];
entry->key_ = key;
return &entry->value_;
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
index 7063a3b0b4..72b5fdf2f3 100644
--- a/deps/v8/src/compiler/node-cache.h
+++ b/deps/v8/src/compiler/node-cache.h
@@ -27,11 +27,11 @@ class Node;
// nodes such as constants, parameters, etc.
template <typename Key, typename Hash = base::hash<Key>,
typename Pred = std::equal_to<Key> >
-class NodeCache final {
+class V8_EXPORT_PRIVATE NodeCache final {
public:
explicit NodeCache(unsigned max = 256)
: entries_(nullptr), size_(0), max_(max) {}
- ~NodeCache() {}
+ ~NodeCache() = default;
// Search for node associated with {key} and return a pointer to a memory
// location in this cache that stores an entry for the key. If the location
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 22cdd0b091..d72980f9fd 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -599,7 +599,6 @@ bool NodeProperties::CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckSymbol:
- case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index fc5a17c19d..01dca47cbe 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -429,8 +429,7 @@ class Node::InputEdges::iterator final {
typedef Edge& reference;
iterator() : use_(nullptr), input_ptr_(nullptr) {}
- iterator(const iterator& other)
- : use_(other.use_), input_ptr_(other.input_ptr_) {}
+ iterator(const iterator& other) = default;
Edge operator*() const { return Edge(use_, input_ptr_); }
bool operator==(const iterator& other) const {
@@ -488,7 +487,7 @@ class Node::Inputs::const_iterator final {
typedef const value_type* pointer;
typedef value_type& reference;
- const_iterator(const const_iterator& other) : input_ptr_(other.input_ptr_) {}
+ const_iterator(const const_iterator& other) = default;
Node* operator*() const { return *input_ptr_; }
bool operator==(const const_iterator& other) const {
@@ -536,8 +535,7 @@ Node* Node::Inputs::operator[](int index) const { return input_root_[index]; }
// A forward iterator to visit the uses edges of a node.
class Node::UseEdges::iterator final {
public:
- iterator(const iterator& other)
- : current_(other.current_), next_(other.next_) {}
+ iterator(const iterator& other) = default;
Edge operator*() const { return Edge(current_, current_->input_ptr()); }
bool operator==(const iterator& other) const {
@@ -584,15 +582,6 @@ class Node::Uses::const_iterator final {
typedef Node** pointer;
typedef Node*& reference;
- const_iterator(const const_iterator& other)
- : current_(other.current_)
-#ifdef DEBUG
- ,
- next_(other.next_)
-#endif
- {
- }
-
Node* operator*() const { return current_->from(); }
bool operator==(const const_iterator& other) const {
return other.current_ == current_;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index d6ea247fbc..b6777ac439 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -117,7 +117,6 @@
V(JSOrdinaryHasInstance)
#define JS_CONVERSION_UNOP_LIST(V) \
- V(JSToInteger) \
V(JSToLength) \
V(JSToName) \
V(JSToNumber) \
@@ -151,6 +150,7 @@
V(JSCreateTypedArray) \
V(JSCreateLiteralArray) \
V(JSCreateEmptyLiteralArray) \
+ V(JSCreateArrayFromIterable) \
V(JSCreateLiteralObject) \
V(JSCreateEmptyLiteralObject) \
V(JSCloneObject) \
@@ -179,6 +179,12 @@
V(JSCreateWithContext) \
V(JSCreateBlockContext)
+#define JS_CALL_OP_LIST(V) \
+ V(JSCall) \
+ V(JSCallForwardVarargs) \
+ V(JSCallWithArrayLike) \
+ V(JSCallWithSpread)
+
#define JS_CONSTRUCT_OP_LIST(V) \
V(JSConstructForwardVarargs) \
V(JSConstruct) \
@@ -186,11 +192,8 @@
V(JSConstructWithSpread)
#define JS_OTHER_OP_LIST(V) \
+ JS_CALL_OP_LIST(V) \
JS_CONSTRUCT_OP_LIST(V) \
- V(JSCallForwardVarargs) \
- V(JSCall) \
- V(JSCallWithArrayLike) \
- V(JSCallWithSpread) \
V(JSCallRuntime) \
V(JSForInEnumerate) \
V(JSForInNext) \
@@ -224,13 +227,17 @@
// Opcodes for VirtuaMachine-level operators.
#define SIMPLIFIED_CHANGE_OP_LIST(V) \
V(ChangeTaggedSignedToInt32) \
+ V(ChangeTaggedSignedToInt64) \
V(ChangeTaggedToInt32) \
+ V(ChangeTaggedToInt64) \
V(ChangeTaggedToUint32) \
V(ChangeTaggedToFloat64) \
V(ChangeTaggedToTaggedSigned) \
V(ChangeInt31ToTaggedSigned) \
V(ChangeInt32ToTagged) \
+ V(ChangeInt64ToTagged) \
V(ChangeUint32ToTagged) \
+ V(ChangeUint64ToTagged) \
V(ChangeFloat64ToTagged) \
V(ChangeFloat64ToTaggedPointer) \
V(ChangeTaggedToBit) \
@@ -249,8 +256,12 @@
V(CheckedUint32Mod) \
V(CheckedInt32Mul) \
V(CheckedInt32ToTaggedSigned) \
+ V(CheckedInt64ToInt32) \
+ V(CheckedInt64ToTaggedSigned) \
V(CheckedUint32ToInt32) \
V(CheckedUint32ToTaggedSigned) \
+ V(CheckedUint64ToInt32) \
+ V(CheckedUint64ToTaggedSigned) \
V(CheckedFloat64ToInt32) \
V(CheckedTaggedSignedToInt32) \
V(CheckedTaggedToInt32) \
@@ -348,6 +359,7 @@
V(PlainPrimitiveToWord32) \
V(PlainPrimitiveToFloat64) \
V(BooleanNot) \
+ V(StringConcat) \
V(StringToNumber) \
V(StringCharCodeAt) \
V(StringCodePointAt) \
@@ -420,7 +432,7 @@
V(NewSmiOrObjectElements) \
V(NewArgumentsElements) \
V(NewConsString) \
- V(ArrayBufferWasNeutered) \
+ V(DelayedStringConstant) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
V(TransitionElementsKind) \
@@ -571,14 +583,7 @@
V(Word64AtomicOr) \
V(Word64AtomicXor) \
V(Word64AtomicExchange) \
- V(Word64AtomicCompareExchange) \
- V(Word64AtomicNarrowAdd) \
- V(Word64AtomicNarrowSub) \
- V(Word64AtomicNarrowAnd) \
- V(Word64AtomicNarrowOr) \
- V(Word64AtomicNarrowXor) \
- V(Word64AtomicNarrowExchange) \
- V(Word64AtomicNarrowCompareExchange)
+ V(Word64AtomicCompareExchange)
#define MACHINE_OP_LIST(V) \
MACHINE_UNOP_32_LIST(V) \
@@ -610,6 +615,7 @@
V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
@@ -622,6 +628,7 @@
V(TryTruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
+ V(ChangeInt64ToFloat64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(TruncateFloat64ToFloat32) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 67a7b138a5..313a263ebb 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -16,14 +16,11 @@ namespace v8 {
namespace internal {
namespace compiler {
-OperationTyper::OperationTyper(Isolate* isolate, JSHeapBroker* js_heap_broker,
- Zone* zone)
+OperationTyper::OperationTyper(JSHeapBroker* js_heap_broker, Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
- Factory* factory = isolate->factory();
- infinity_ =
- Type::NewConstant(js_heap_broker, factory->infinity_value(), zone);
- minus_infinity_ =
- Type::NewConstant(js_heap_broker, factory->minus_infinity_value(), zone);
+ Factory* factory = js_heap_broker->isolate()->factory();
+ infinity_ = Type::NewConstant(V8_INFINITY, zone);
+ minus_infinity_ = Type::NewConstant(-V8_INFINITY, zone);
Type truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero.Maybe(Type::Integral32()));
@@ -265,59 +262,61 @@ Type OperationTyper::ConvertReceiver(Type type) {
return type;
}
-// Returns the result type of converting {type} to number, if the
-// result does not depend on conversion options.
-base::Optional<Type> OperationTyper::ToNumberCommon(Type type) {
+Type OperationTyper::ToNumber(Type type) {
if (type.Is(Type::Number())) return type;
- if (type.Is(Type::NullOrUndefined())) {
- if (type.Is(Type::Null())) return cache_.kSingletonZero;
- if (type.Is(Type::Undefined())) return Type::NaN();
- return Type::Union(Type::NaN(), cache_.kSingletonZero, zone());
- }
- if (type.Is(Type::Boolean())) {
- if (type.Is(singleton_false_)) return cache_.kSingletonZero;
- if (type.Is(singleton_true_)) return cache_.kSingletonOne;
- return cache_.kZeroOrOne;
+
+ // If {type} includes any receivers, we cannot tell what kind of
+ // Number their callbacks might produce. Similarly in the case
+ // where {type} includes String, it's not possible at this point
+ // to tell which exact numbers are going to be produced.
+ if (type.Maybe(Type::StringOrReceiver())) return Type::Number();
+
+ // Both Symbol and BigInt primitives will cause exceptions
+ // to be thrown from ToNumber conversions, so they don't
+ // contribute to the resulting type anyways.
+ type = Type::Intersect(type, Type::PlainPrimitive(), zone());
+
+ // This leaves us with Number\/Oddball, so deal with the individual
+ // Oddball primitives below.
+ DCHECK(type.Is(Type::NumberOrOddball()));
+ if (type.Maybe(Type::Null())) {
+ // ToNumber(null) => +0
+ type = Type::Union(type, cache_.kSingletonZero, zone());
}
- if (type.Is(Type::NumberOrOddball())) {
- if (type.Is(Type::NumberOrUndefined())) {
- type = Type::Union(type, Type::NaN(), zone());
- } else if (type.Is(Type::NullOrNumber())) {
- type = Type::Union(type, cache_.kSingletonZero, zone());
- } else if (type.Is(Type::BooleanOrNullOrNumber())) {
- type = Type::Union(type, cache_.kZeroOrOne, zone());
- } else {
- type = Type::Union(type, cache_.kZeroOrOneOrNaN, zone());
- }
- return Type::Intersect(type, Type::Number(), zone());
+ if (type.Maybe(Type::Undefined())) {
+ // ToNumber(undefined) => NaN
+ type = Type::Union(type, Type::NaN(), zone());
}
- return base::Optional<Type>();
-}
-
-Type OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type type) {
- if (base::Optional<Type> maybe_result_type = ToNumberCommon(type)) {
- return *maybe_result_type;
+ if (type.Maybe(singleton_false_)) {
+ // ToNumber(false) => +0
+ type = Type::Union(type, cache_.kSingletonZero, zone());
}
- if (type.Is(Type::BigInt())) {
- return mode == Object::Conversion::kToNumber ? Type::None() : type;
+ if (type.Maybe(singleton_true_)) {
+ // ToNumber(true) => +1
+ type = Type::Union(type, cache_.kSingletonOne, zone());
}
- return mode == Object::Conversion::kToNumber ? Type::Number()
- : Type::Numeric();
-}
-
-Type OperationTyper::ToNumber(Type type) {
- return ToNumberOrNumeric(Object::Conversion::kToNumber, type);
+ return Type::Intersect(type, Type::Number(), zone());
}
Type OperationTyper::ToNumberConvertBigInt(Type type) {
- if (base::Optional<Type> maybe_result_type = ToNumberCommon(type)) {
- return *maybe_result_type;
- }
- return Type::Number();
+ // If the {type} includes any receivers, then the callbacks
+ // might actually produce BigInt primitive values here.
+ bool maybe_bigint =
+ type.Maybe(Type::BigInt()) || type.Maybe(Type::Receiver());
+ type = ToNumber(Type::Intersect(type, Type::NonBigInt(), zone()));
+
+ // Any BigInt is rounded to an integer Number in the range [-inf, inf].
+ return maybe_bigint ? Type::Union(type, cache_.kInteger, zone()) : type;
}
Type OperationTyper::ToNumeric(Type type) {
- return ToNumberOrNumeric(Object::Conversion::kToNumeric, type);
+ // If the {type} includes any receivers, then the callbacks
+ // might actually produce BigInt primitive values here.
+ if (type.Maybe(Type::Receiver())) {
+ type = Type::Union(type, Type::BigInt(), zone());
+ }
+ return Type::Union(ToNumber(Type::Intersect(type, Type::NonBigInt(), zone())),
+ Type::Intersect(type, Type::BigInt(), zone()), zone());
}
Type OperationTyper::NumberAbs(Type type) {
@@ -415,7 +414,7 @@ Type OperationTyper::NumberExp(Type type) {
Type OperationTyper::NumberExpm1(Type type) {
DCHECK(type.Is(Type::Number()));
- return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
+ return Type::Number();
}
Type OperationTyper::NumberFloor(Type type) {
@@ -999,7 +998,6 @@ Type OperationTyper::NumberMax(Type lhs, Type rhs) {
if (lhs.Is(Type::NaN()) || rhs.Is(Type::NaN())) return Type::NaN();
Type type = Type::None();
- // TODO(turbofan): Improve minus zero handling here.
if (lhs.Maybe(Type::NaN()) || rhs.Maybe(Type::NaN())) {
type = Type::Union(type, Type::NaN(), zone());
}
@@ -1007,10 +1005,17 @@ Type OperationTyper::NumberMax(Type lhs, Type rhs) {
DCHECK(!lhs.IsNone());
rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
DCHECK(!rhs.IsNone());
- if (lhs.Is(cache_.kInteger) && rhs.Is(cache_.kInteger)) {
+ if (lhs.Is(cache_.kIntegerOrMinusZero) &&
+ rhs.Is(cache_.kIntegerOrMinusZero)) {
+ // TODO(turbofan): This could still be improved in ruling out -0 when
+ // one of the inputs' min is 0.
double max = std::max(lhs.Max(), rhs.Max());
double min = std::max(lhs.Min(), rhs.Min());
type = Type::Union(type, Type::Range(min, max, zone()), zone());
+ if (min <= 0.0 && 0.0 <= max &&
+ (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero()))) {
+ type = Type::Union(type, Type::MinusZero(), zone());
+ }
} else {
type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
}
@@ -1025,7 +1030,6 @@ Type OperationTyper::NumberMin(Type lhs, Type rhs) {
if (lhs.Is(Type::NaN()) || rhs.Is(Type::NaN())) return Type::NaN();
Type type = Type::None();
- // TODO(turbofan): Improve minus zero handling here.
if (lhs.Maybe(Type::NaN()) || rhs.Maybe(Type::NaN())) {
type = Type::Union(type, Type::NaN(), zone());
}
@@ -1033,10 +1037,15 @@ Type OperationTyper::NumberMin(Type lhs, Type rhs) {
DCHECK(!lhs.IsNone());
rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
DCHECK(!rhs.IsNone());
- if (lhs.Is(cache_.kInteger) && rhs.Is(cache_.kInteger)) {
+ if (lhs.Is(cache_.kIntegerOrMinusZero) &&
+ rhs.Is(cache_.kIntegerOrMinusZero)) {
double max = std::min(lhs.Max(), rhs.Max());
double min = std::min(lhs.Min(), rhs.Min());
type = Type::Union(type, Type::Range(min, max, zone()), zone());
+ if (min <= 0.0 && 0.0 <= max &&
+ (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero()))) {
+ type = Type::Union(type, Type::MinusZero(), zone());
+ }
} else {
type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
}
@@ -1163,6 +1172,19 @@ Type OperationTyper::StrictEqual(Type lhs, Type rhs) {
return Type::Boolean();
}
+Type OperationTyper::CheckBounds(Type index, Type length) {
+ DCHECK(length.Is(Type::Unsigned31()));
+ if (index.Maybe(Type::MinusZero())) {
+ index = Type::Union(index, cache_.kSingletonZero, zone());
+ }
+ index = Type::Intersect(index, Type::Integral32(), zone());
+ if (index.IsNone() || length.IsNone()) return Type::None();
+ double min = std::max(index.Min(), 0.0);
+ double max = std::min(index.Max(), length.Max() - 1);
+ if (max < min) return Type::None();
+ return Type::Range(min, max, zone());
+}
+
Type OperationTyper::CheckFloat64Hole(Type type) {
if (type.Maybe(Type::Hole())) {
// Turn "the hole" into undefined.
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index fb5997485c..e84e97d2a1 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -27,7 +27,7 @@ class TypeCache;
class V8_EXPORT_PRIVATE OperationTyper {
public:
- OperationTyper(Isolate* isolate, JSHeapBroker* js_heap_broker, Zone* zone);
+ OperationTyper(JSHeapBroker* js_heap_broker, Zone* zone);
// Typing Phi.
Type Merge(Type left, Type right);
@@ -58,6 +58,7 @@ class V8_EXPORT_PRIVATE OperationTyper {
Type StrictEqual(Type lhs, Type rhs);
// Check operators.
+ Type CheckBounds(Type index, Type length);
Type CheckFloat64Hole(Type type);
Type CheckNumber(Type type);
Type ConvertTaggedHoleToUndefined(Type type);
@@ -77,9 +78,6 @@ class V8_EXPORT_PRIVATE OperationTyper {
private:
typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
- Type ToNumberOrNumeric(Object::Conversion mode, Type type);
- base::Optional<Type> ToNumberCommon(Type type);
-
ComparisonOutcome Invert(ComparisonOutcome);
Type Invert(Type);
Type FalsifyUndefined(ComparisonOutcome);
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index a5d16053d2..8da3ccfd81 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -7,6 +7,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -18,6 +19,109 @@ bool OperatorProperties::HasContextInput(const Operator* op) {
return IrOpcode::IsJsOpcode(opcode);
}
+// static
+bool OperatorProperties::NeedsExactContext(const Operator* op) {
+ DCHECK(HasContextInput(op));
+ IrOpcode::Value const opcode = static_cast<IrOpcode::Value>(op->opcode());
+ switch (opcode) {
+#define CASE(Name) case IrOpcode::k##Name:
+ // Binary/unary operators, calls and constructor calls only
+ // need the context to generate exceptions or lookup fields
+ // on the native context, so passing any context is fine.
+ JS_SIMPLE_BINOP_LIST(CASE)
+ JS_CALL_OP_LIST(CASE)
+ JS_CONSTRUCT_OP_LIST(CASE)
+ JS_SIMPLE_UNOP_LIST(CASE)
+#undef CASE
+ case IrOpcode::kJSCloneObject:
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateEmptyLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateEmptyLiteralObject:
+ case IrOpcode::kJSCreateArrayFromIterable:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSForInEnumerate:
+ case IrOpcode::kJSForInNext:
+ case IrOpcode::kJSForInPrepare:
+ case IrOpcode::kJSGeneratorRestoreContext:
+ case IrOpcode::kJSGeneratorRestoreContinuation:
+ case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
+ case IrOpcode::kJSGeneratorRestoreRegister:
+ case IrOpcode::kJSGetSuperConstructor:
+ case IrOpcode::kJSLoadGlobal:
+ case IrOpcode::kJSLoadMessage:
+ case IrOpcode::kJSStackCheck:
+ case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSStoreMessage:
+ return false;
+
+ case IrOpcode::kJSCallRuntime:
+ return Runtime::NeedsExactContext(CallRuntimeParametersOf(op).id());
+
+ case IrOpcode::kJSCreateArguments:
+ // For mapped arguments we need to access slots of context-allocated
+ // variables if there's aliasing with formal parameters.
+ return CreateArgumentsTypeOf(op) == CreateArgumentsType::kMappedArguments;
+
+ case IrOpcode::kJSCreateBlockContext:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateFunctionContext:
+ case IrOpcode::kJSCreateGeneratorObject:
+ case IrOpcode::kJSCreateCatchContext:
+ case IrOpcode::kJSCreateWithContext:
+ case IrOpcode::kJSDebugger:
+ case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSGeneratorStore:
+ case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSLoadContext:
+ case IrOpcode::kJSLoadModule:
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSLoadProperty:
+ case IrOpcode::kJSStoreContext:
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSStoreInArrayLiteral:
+ case IrOpcode::kJSStoreModule:
+ case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSStoreNamedOwn:
+ case IrOpcode::kJSStoreProperty:
+ return true;
+
+ case IrOpcode::kJSCreateArrayIterator:
+ case IrOpcode::kJSCreateBoundFunction:
+ case IrOpcode::kJSCreateCollectionIterator:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateStringIterator:
+ case IrOpcode::kJSCreateKeyValueArray:
+ case IrOpcode::kJSCreateObject:
+ case IrOpcode::kJSCreatePromise:
+ case IrOpcode::kJSCreateTypedArray:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSFulfillPromise:
+ case IrOpcode::kJSObjectIsArray:
+ case IrOpcode::kJSPerformPromiseThen:
+ case IrOpcode::kJSPromiseResolve:
+ case IrOpcode::kJSRegExpTest:
+ case IrOpcode::kJSRejectPromise:
+ case IrOpcode::kJSResolvePromise:
+ // These operators aren't introduced by BytecodeGraphBuilder and
+ // thus we don't bother checking them. If you ever introduce one
+ // of these early in the BytecodeGraphBuilder make sure to check
+ // whether they are context-sensitive.
+ break;
+
+#define CASE(Name) case IrOpcode::k##Name:
+ // Non-JavaScript operators don't have a notion of "context"
+ COMMON_OP_LIST(CASE)
+ CONTROL_OP_LIST(CASE)
+ MACHINE_OP_LIST(CASE)
+ MACHINE_SIMD_OP_LIST(CASE)
+ SIMPLIFIED_OP_LIST(CASE)
+ break;
+#undef CASE
+ }
+ UNREACHABLE();
+}
// static
bool OperatorProperties::HasFrameStateInput(const Operator* op) {
@@ -73,6 +177,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCreateArray:
case IrOpcode::kJSCreateTypedArray:
case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateArrayFromIterable:
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
case IrOpcode::kJSCreateObject:
@@ -90,7 +195,6 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSDeleteProperty:
// Conversions
- case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index b4bb8b5e73..eb9e683f63 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -22,6 +22,8 @@ class V8_EXPORT_PRIVATE OperatorProperties final {
return HasContextInput(op) ? 1 : 0;
}
+ static bool NeedsExactContext(const Operator* op);
+
static bool HasFrameStateInput(const Operator* op);
static int GetFrameStateInputCount(const Operator* op) {
return HasFrameStateInput(op) ? 1 : 0;
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index e436ec09f4..dc94e91190 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -65,7 +65,7 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out);
- virtual ~Operator() {}
+ virtual ~Operator() = default;
// A small integer unique to all instances of a particular kind of operator,
// useful for quick matching for specific kinds of operators. For fast access
@@ -197,7 +197,7 @@ class Operator1 : public Operator {
os << "[" << parameter() << "]";
}
- virtual void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const {
+ void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const override {
os << mnemonic();
PrintParameter(os, verbose);
}
diff --git a/deps/v8/src/compiler/per-isolate-compiler-cache.h b/deps/v8/src/compiler/per-isolate-compiler-cache.h
new file mode 100644
index 0000000000..70f53c38e1
--- /dev/null
+++ b/deps/v8/src/compiler/per-isolate-compiler-cache.h
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PER_ISOLATE_COMPILER_CACHE_H_
+#define V8_COMPILER_PER_ISOLATE_COMPILER_CACHE_H_
+
+#include "src/compiler/refs-map.h"
+#include "src/isolate.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class ObjectData;
+
+// This class serves as a per-isolate container of data that should be
+// persisted between compiler runs. For now it stores the code builtins
+// so they are not serialized on each compiler run.
+class PerIsolateCompilerCache : public ZoneObject {
+ public:
+ explicit PerIsolateCompilerCache(Zone* zone)
+ : zone_(zone), refs_snapshot_(nullptr) {}
+
+ RefsMap* GetSnapshot() { return refs_snapshot_; }
+ void SetSnapshot(RefsMap* refs) {
+ DCHECK_NULL(refs_snapshot_);
+ DCHECK(!refs->IsEmpty());
+ refs_snapshot_ = new (zone_) RefsMap(refs, zone_);
+ }
+
+ bool HasSnapshot() const { return refs_snapshot_; }
+
+ Zone* zone() const { return zone_; }
+
+ static void Setup(Isolate* isolate) {
+ if (isolate->compiler_cache()) return;
+
+ // The following zone is supposed to contain compiler-related objects
+ // that should live through all compilations, as opposed to the
+ // broker_zone which holds per-compilation data. It's not meant for
+ // per-compilation or heap broker data.
+ Zone* compiler_zone = new Zone(isolate->allocator(), "Compiler zone");
+ PerIsolateCompilerCache* compiler_cache =
+ new (compiler_zone) PerIsolateCompilerCache(compiler_zone);
+ isolate->set_compiler_utils(compiler_cache, compiler_zone);
+ }
+
+ private:
+ Zone* const zone_;
+
+ RefsMap* refs_snapshot_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_PER_ISOLATE_COMPILER_CACHE_H_
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 0cc3723594..c4169266e7 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -137,9 +137,9 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
- js_heap_broker_ = new (codegen_zone_) JSHeapBroker(isolate_, codegen_zone_);
+ js_heap_broker_ = new (info_->zone()) JSHeapBroker(isolate_, info_->zone());
dependencies_ =
- new (codegen_zone_) CompilationDependencies(isolate_, codegen_zone_);
+ new (info_->zone()) CompilationDependencies(isolate_, info_->zone());
}
// For WebAssembly compile entry point.
@@ -147,8 +147,7 @@ class PipelineData {
OptimizedCompilationInfo* info, MachineGraph* mcgraph,
PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins,
- int wasm_function_index,
+ NodeOriginTable* node_origins, int wasm_function_index,
const AssemblerOptions& assembler_options)
: isolate_(nullptr),
wasm_engine_(wasm_engine),
@@ -156,6 +155,7 @@ class PipelineData {
info_(info),
debug_name_(info_->GetDebugName()),
wasm_function_index_(wasm_function_index),
+ may_have_unverifiable_graph_(false),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
graph_zone_scope_(zone_stats_, ZONE_NAME),
@@ -218,8 +218,11 @@ class PipelineData {
assembler_options_(AssemblerOptions::Default(isolate)) {}
~PipelineData() {
- delete code_generator_; // Must happen before zones are destroyed.
+ // Must happen before zones are destroyed.
+ delete code_generator_;
code_generator_ = nullptr;
+ DeleteTyper();
+
DeleteRegisterAllocationZone();
DeleteInstructionZone();
DeleteCodegenZone();
@@ -310,6 +313,22 @@ class PipelineData {
: wasm_engine_->GetCodeTracer();
}
+ Typer* CreateTyper() {
+ DCHECK_NULL(typer_);
+ typer_ = new Typer(js_heap_broker(), typer_flags_, graph());
+ return typer_;
+ }
+
+ void AddTyperFlag(Typer::Flag flag) {
+ DCHECK_NULL(typer_);
+ typer_flags_ |= flag;
+ }
+
+ void DeleteTyper() {
+ delete typer_;
+ typer_ = nullptr;
+ }
+
void DeleteGraphZone() {
if (graph_zone_ == nullptr) return;
graph_zone_scope_.Destroy();
@@ -433,6 +452,8 @@ class PipelineData {
base::Optional<OsrHelper> osr_helper_;
MaybeHandle<Code> code_;
CodeGenerator* code_generator_ = nullptr;
+ Typer* typer_ = nullptr;
+ Typer::Flags typer_flags_ = Typer::kNoFlags;
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
@@ -658,12 +679,6 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
#endif // ENABLE_DISASSEMBLER
}
-struct TurboCfgFile : public std::ofstream {
- explicit TurboCfgFile(Isolate* isolate)
- : std::ofstream(isolate->GetTurboCfgFileName().c_str(),
- std::ios_base::app) {}
-};
-
void TraceSchedule(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
if (info->trace_turbo_json_enabled()) {
@@ -692,7 +707,7 @@ class SourcePositionWrapper final : public Reducer {
public:
SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
: reducer_(reducer), table_(table) {}
- ~SourcePositionWrapper() final {}
+ ~SourcePositionWrapper() final = default;
const char* reducer_name() const override { return reducer_->reducer_name(); }
@@ -715,7 +730,7 @@ class NodeOriginsWrapper final : public Reducer {
public:
NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
: reducer_(reducer), table_(table) {}
- ~NodeOriginsWrapper() final {}
+ ~NodeOriginsWrapper() final = default;
const char* reducer_name() const override { return reducer_->reducer_name(); }
@@ -896,12 +911,13 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->MarkAsAccessorInliningEnabled();
}
- // Compute and set poisoning level.
+ // This is the bottleneck for computing and setting poisoning level in the
+ // optimizing compiler.
PoisoningMitigationLevel load_poisoning =
PoisoningMitigationLevel::kDontPoison;
- if (FLAG_branch_load_poisoning) {
- load_poisoning = PoisoningMitigationLevel::kPoisonAll;
- } else if (FLAG_untrusted_code_mitigations) {
+ if (FLAG_untrusted_code_mitigations) {
+ // For full mitigations, this can be changed to
+ // PoisoningMitigationLevel::kPoisonAll.
load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
}
compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
@@ -1030,7 +1046,6 @@ class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::PrepareJobImpl(
Isolate* isolate) {
UNREACHABLE(); // Prepare should always be skipped for WasmCompilationJob.
- return SUCCEEDED;
}
PipelineWasmCompilationJob::Status
@@ -1110,7 +1125,6 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
UNREACHABLE(); // Finalize should always be skipped for WasmCompilationJob.
- return SUCCEEDED;
}
template <typename Phase>
@@ -1186,6 +1200,7 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
Isolate* isolate = data->isolate();
+ OptimizedCompilationInfo* info = data->info();
GraphReducer graph_reducer(temp_zone, data->graph(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -1214,9 +1229,12 @@ struct InliningPhase {
if (data->info()->is_bailout_on_uninitialized()) {
flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
}
+ // Passing the OptimizedCompilationInfo's shared zone here as
+ // JSNativeContextSpecialization allocates out-of-heap objects
+ // that need to live until code generation.
JSNativeContextSpecialization native_context_specialization(
&graph_reducer, data->jsgraph(), data->js_heap_broker(), flags,
- data->native_context(), data->dependencies(), temp_zone);
+ data->native_context(), data->dependencies(), temp_zone, info->zone());
JSInliningHeuristic inlining(
&graph_reducer, data->info()->is_inlining_enabled()
? JSInliningHeuristic::kGeneralInlining
@@ -1242,6 +1260,11 @@ struct TyperPhase {
void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
+
+ // Make sure we always type True and False. Needed for escape analysis.
+ roots.push_back(data->jsgraph()->TrueConstant());
+ roots.push_back(data->jsgraph()->FalseConstant());
+
LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
data->common(), temp_zone);
if (FLAG_turbo_loop_variable) induction_vars.Run();
@@ -1279,10 +1302,16 @@ struct UntyperPhase {
}
};
-struct CopyMetadataForConcurrentCompilePhase {
- static const char* phase_name() {
- return "copy metadata for concurrent compile";
+struct SerializeStandardObjectsPhase {
+ static const char* phase_name() { return "serialize standard objects"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ data->js_heap_broker()->SerializeStandardObjects();
}
+};
+
+struct CopyMetadataForConcurrentCompilePhase {
+ static const char* phase_name() { return "serialize metadata"; }
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
@@ -1290,7 +1319,11 @@ struct CopyMetadataForConcurrentCompilePhase {
JSHeapCopyReducer heap_copy_reducer(data->js_heap_broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
graph_reducer.ReduceGraph();
- data->js_heap_broker()->StopSerializing();
+
+ // Some nodes that are no longer in the graph might still be in the cache.
+ NodeVector cached_nodes(temp_zone);
+ data->jsgraph()->GetCachedNodes(&cached_nodes);
+ for (Node* const node : cached_nodes) graph_reducer.ReduceNode(node);
}
};
@@ -1304,7 +1337,7 @@ struct TypedLoweringPhase {
data->common(), temp_zone);
JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
data->jsgraph(), data->js_heap_broker(),
- data->native_context(), temp_zone);
+ temp_zone);
JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
data->js_heap_broker(), temp_zone);
ConstantFoldingReducer constant_folding_reducer(
@@ -1385,32 +1418,6 @@ struct LoopExitEliminationPhase {
}
};
-struct ConcurrentOptimizationPrepPhase {
- static const char* phase_name() { return "concurrency preparation"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- // Make sure we cache these code stubs.
- data->jsgraph()->CEntryStubConstant(1);
- data->jsgraph()->CEntryStubConstant(2);
-
- // TODO(turbofan): Remove this line once the Array constructor code
- // is a proper builtin and no longer a CodeStub.
- data->jsgraph()->ArrayConstructorStubConstant();
-
- // This is needed for escape analysis.
- NodeProperties::SetType(
- data->jsgraph()->FalseConstant(),
- Type::HeapConstant(data->js_heap_broker(),
- data->isolate()->factory()->false_value(),
- data->jsgraph()->zone()));
- NodeProperties::SetType(
- data->jsgraph()->TrueConstant(),
- Type::HeapConstant(data->js_heap_broker(),
- data->isolate()->factory()->true_value(),
- data->jsgraph()->zone()));
- }
-};
-
struct GenericLoweringPhase {
static const char* phase_name() { return "generic lowering"; }
@@ -2009,39 +2016,36 @@ bool PipelineImpl::CreateGraph() {
Run<EarlyGraphTrimmingPhase>();
RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
- // Run the type-sensitive lowerings and optimizations on the graph.
+ // Determine the Typer operation flags.
{
- // Determine the Typer operation flags.
- Typer::Flags flags = Typer::kNoFlags;
if (is_sloppy(info()->shared_info()->language_mode()) &&
info()->shared_info()->IsUserJavaScript()) {
// Sloppy mode functions always have an Object for this.
- flags |= Typer::kThisIsReceiver;
+ data->AddTyperFlag(Typer::kThisIsReceiver);
}
if (IsClassConstructor(info()->shared_info()->kind())) {
// Class constructors cannot be [[Call]]ed.
- flags |= Typer::kNewTargetIsReceiver;
+ data->AddTyperFlag(Typer::kNewTargetIsReceiver);
}
+ }
- // Type the graph and keep the Typer running on newly created nodes within
- // this scope; the Typer is automatically unlinked from the Graph once we
- // leave this scope below.
- Typer typer(isolate(), data->js_heap_broker(), flags, data->graph());
- Run<TyperPhase>(&typer);
- RunPrintAndVerify(TyperPhase::phase_name());
-
- // Do some hacky things to prepare for the optimization phase.
- // (caching handles, etc.).
- Run<ConcurrentOptimizationPrepPhase>();
-
+ // Run the type-sensitive lowerings and optimizations on the graph.
+ {
if (FLAG_concurrent_compiler_frontend) {
- data->js_heap_broker()->SerializeStandardObjects();
+ data->js_heap_broker()->StartSerializing();
+ Run<SerializeStandardObjectsPhase>();
Run<CopyMetadataForConcurrentCompilePhase>();
+ data->js_heap_broker()->StopSerializing();
+ } else {
+ data->js_heap_broker()->SetNativeContextRef();
+ // Type the graph and keep the Typer running such that new nodes get
+ // automatically typed when they are created.
+ Run<TyperPhase>(data->CreateTyper());
+ RunPrintAndVerify(TyperPhase::phase_name());
+ Run<TypedLoweringPhase>();
+ RunPrintAndVerify(TypedLoweringPhase::phase_name());
+ data->DeleteTyper();
}
-
- // Lower JSOperators where we can determine types.
- Run<TypedLoweringPhase>();
- RunPrintAndVerify(TypedLoweringPhase::phase_name());
}
data->EndPhaseKind();
@@ -2054,6 +2058,16 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
data->BeginPhaseKind("lowering");
+ if (FLAG_concurrent_compiler_frontend) {
+ // Type the graph and keep the Typer running such that new nodes get
+ // automatically typed when they are created.
+ Run<TyperPhase>(data->CreateTyper());
+ RunPrintAndVerify(TyperPhase::phase_name());
+ Run<TypedLoweringPhase>();
+ RunPrintAndVerify(TypedLoweringPhase::phase_name());
+ data->DeleteTyper();
+ }
+
if (data->info()->is_loop_peeling_enabled()) {
Run<LoopPeelingPhase>();
RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
@@ -2155,10 +2169,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
- SourcePositionTable source_positions(graph);
NodeOriginTable node_origins(graph);
- PipelineData data(&zone_stats, &info, isolate, graph, schedule,
- &source_positions, &node_origins, jump_opt, options);
+ PipelineData data(&zone_stats, &info, isolate, graph, schedule, nullptr,
+ &node_origins, jump_opt, options);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
@@ -2193,6 +2206,49 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
}
// static
+MaybeHandle<Code> Pipeline::GenerateCodeForWasmStub(
+ Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
+ Code::Kind kind, const char* debug_name, const AssemblerOptions& options,
+ SourcePositionTable* source_positions) {
+ OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
+ // Construct a pipeline for scheduling and code generation.
+ ZoneStats zone_stats(isolate->allocator());
+ NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
+ PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
+ source_positions, node_positions, nullptr, options);
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info, isolate->GetTurboStatistics(), &zone_stats));
+ pipeline_statistics->BeginPhaseKind("wasm stub codegen");
+ }
+
+ PipelineImpl pipeline(&data);
+
+ if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
+ << std::endl
+ << AsRPO(*graph);
+ }
+
+ if (info.trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(&info, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+ // TODO(rossberg): Should this really be untyped?
+ pipeline.RunPrintAndVerify("machine", true);
+ pipeline.ComputeScheduledGraph();
+
+ Handle<Code> code;
+ if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
+ pipeline.CommitDependencies(code)) {
+ return code;
+ }
+ return MaybeHandle<Code>();
+}
+
+// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate) {
ZoneStats zone_stats(isolate->allocator());
@@ -2220,17 +2276,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
CallDescriptor* call_descriptor, Graph* graph,
- const AssemblerOptions& options, Schedule* schedule,
- SourcePositionTable* source_positions) {
+ const AssemblerOptions& options, Schedule* schedule) {
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
- // TODO(wasm): Refactor code generation to check for non-existing source
- // table, then remove this conditional allocation.
- if (!source_positions)
- source_positions = new (info->zone()) SourcePositionTable(graph);
NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph);
- PipelineData data(&zone_stats, info, isolate, graph, schedule,
- source_positions, node_positions, nullptr, options);
+ PipelineData data(&zone_stats, info, isolate, graph, schedule, nullptr,
+ node_positions, nullptr, options);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(
@@ -2374,7 +2425,11 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
if (info()->trace_turbo_json_enabled()) {
std::ostringstream source_position_output;
// Output source position information before the graph is deleted.
- data_->source_positions()->PrintJson(source_position_output);
+ if (data_->source_positions() != nullptr) {
+ data_->source_positions()->PrintJson(source_position_output);
+ } else {
+ source_position_output << "{}";
+ }
source_position_output << ",\n\"NodeOrigins\" : ";
data_->node_origins()->PrintJson(source_position_output);
data_->set_source_position_output(source_position_output.str());
@@ -2398,17 +2453,16 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
- } else if (data_->assembler_options().isolate_independent_code) {
+ } else if (Builtins::IsBuiltinId(data->info()->builtin_index())) {
// TODO(v8:6666): Extend support to user code. Ensure that
// it is mutually exclusive with the Poisoning configuration above; and that
// it cooperates with restricted allocatable registers above.
static_assert(kRootRegister == kSpeculationPoisonRegister,
"The following checks assume root equals poison register");
- CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_branch_load_poisoning);
CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_untrusted_code_mitigations);
AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
call_descriptor, run_verifier);
-#endif // V8_TARGET_ARCH_IA32
+#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
@@ -2512,6 +2566,9 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
MaybeHandle<Code> PipelineImpl::FinalizeCode() {
PipelineData* data = this->data_;
+ if (data->js_heap_broker() && FLAG_concurrent_compiler_frontend) {
+ data->js_heap_broker()->Retire();
+ }
Run<FinalizeCodePhase>();
MaybeHandle<Code> maybe_code = data->code();
@@ -2578,6 +2635,29 @@ bool PipelineImpl::CommitDependencies(Handle<Code> code) {
data_->dependencies()->Commit(code);
}
+namespace {
+
+void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
+ const RegisterConfiguration* config,
+ const char* phase_name) {
+ if (info->trace_turbo_json_enabled()) {
+ AllowHandleDereference allow_deref;
+ TurboJsonFile json_of(info, std::ios_base::app);
+ json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\",";
+ json_of << InstructionSequenceAsJSON{config, data->sequence()};
+ json_of << "},\n";
+ }
+ if (info->trace_turbo_graph_enabled()) {
+ AllowHandleDereference allow_deref;
+ CodeTracer::Scope tracing_scope(data->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "----- Instruction sequence " << phase_name << " -----\n"
+ << PrintableInstructionSequence({config, data->sequence()});
+ }
+}
+
+} // namespace
+
void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* call_descriptor,
bool run_verifier) {
@@ -2603,13 +2683,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
Run<BuildLiveRangesPhase>();
- if (info()->trace_turbo_graph_enabled()) {
- AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "----- Instruction sequence before register allocation -----\n"
- << PrintableInstructionSequence({config, data->sequence()});
- }
+ TraceSequence(info(), data, config, "before register allocation");
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
CHECK(data->register_allocation_data()
@@ -2621,7 +2695,10 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
}
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
- Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
+
+ if (data->sequence()->HasFPVirtualRegisters()) {
+ Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
+ }
if (FLAG_turbo_preprocess_ranges) {
Run<MergeSplintersPhase>();
@@ -2647,13 +2724,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<LocateSpillSlotsPhase>();
- if (info()->trace_turbo_graph_enabled()) {
- AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "----- Instruction sequence after register allocation -----\n"
- << PrintableInstructionSequence({config, data->sequence()});
- }
+ TraceSequence(info(), data, config, "after register allocation");
if (verifier != nullptr) {
verifier->VerifyAssignment("End of regalloc pipeline.");
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 5e4ae8671b..a86efe840b 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -54,6 +54,13 @@ class Pipeline : public AllStatic {
wasm::NativeModule* native_module, int function_index,
wasm::ModuleOrigin wasm_origin);
+ // Run the pipeline on a machine graph and generate code.
+ static MaybeHandle<Code> GenerateCodeForWasmStub(
+ Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
+ Code::Kind kind, const char* debug_name,
+ const AssemblerOptions& assembler_options,
+ SourcePositionTable* source_positions = nullptr);
+
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
static MaybeHandle<Code> GenerateCodeForCodeStub(
@@ -76,8 +83,7 @@ class Pipeline : public AllStatic {
V8_EXPORT_PRIVATE static MaybeHandle<Code> GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
CallDescriptor* call_descriptor, Graph* graph,
- const AssemblerOptions& options, Schedule* schedule = nullptr,
- SourcePositionTable* source_positions = nullptr);
+ const AssemblerOptions& options, Schedule* schedule = nullptr);
// Run just the register allocator phases.
V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 45cd95a9e0..fd2b2eefdb 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -74,6 +74,10 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return Operand(constant.ToInt64());
#endif
case Constant::kExternalReference:
+ return Operand(constant.ToExternalReference());
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -513,11 +517,11 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
/* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */ \
/* being different registers is most efficiently expressed */ \
/* as -((-L) - R). */ \
- __ fneg(left_reg, left_reg); \
- if (left_reg == right_reg) { \
- __ fadd(result_reg, left_reg, right_reg); \
+ __ fneg(kScratchDoubleReg, left_reg); \
+ if (kScratchDoubleReg == right_reg) { \
+ __ fadd(result_reg, kScratchDoubleReg, right_reg); \
} else { \
- __ fsub(result_reg, left_reg, right_reg); \
+ __ fsub(result_reg, kScratchDoubleReg, right_reg); \
} \
__ fneg(result_reg, result_reg); \
__ b(&done); \
@@ -660,15 +664,15 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
__ bne(&exchange, cr0); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ bin_inst(i.InputRegister(2), i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(i.InputRegister(2), operand); \
- __ bne(&binop, cr0); \
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
+ do { \
+ MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ Label binop; \
+ __ bind(&binop); \
+ __ load_inst(i.OutputRegister(), operand); \
+ __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
+ __ store_inst(kScratchReg, operand); \
+ __ bne(&binop, cr0); \
} while (false)
#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, \
@@ -691,7 +695,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
Label exit; \
__ bind(&loop); \
__ load_inst(i.OutputRegister(), operand); \
- __ cmp_inst(i.OutputRegister(), i.InputRegister(2)); \
+ __ cmp_inst(i.OutputRegister(), i.InputRegister(2), cr0); \
__ bne(&exit, cr0); \
__ store_inst(i.InputRegister(3), operand); \
__ bne(&loop, cr0); \
@@ -1975,32 +1979,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
break;
+ case kPPC_Word64AtomicLoadUint8:
case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
break;
case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
break;
+ case kPPC_Word64AtomicLoadUint16:
case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
break;
+ case kPPC_Word64AtomicLoadUint32:
case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
break;
+ case kPPC_Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ld, ldx);
+ break;
+ case kPPC_Word64AtomicStoreUint8:
case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
break;
+ case kPPC_Word64AtomicStoreUint16:
case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
break;
+ case kPPC_Word64AtomicStoreUint32:
case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
break;
+ case kPPC_Word64AtomicStoreUint64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(std, stdx);
+ break;
case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
+ case kPPC_Word64AtomicExchangeUint8:
case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
@@ -2008,44 +2025,57 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
+ case kPPC_Word64AtomicExchangeUint16:
case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
break;
+ case kPPC_Word64AtomicExchangeUint32:
case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
break;
-
+ case kPPC_Word64AtomicExchangeUint64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
+ break;
case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lbarx, stbcx, extsb);
break;
+ case kPPC_Word64AtomicCompareExchangeUint8:
case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lbarx, stbcx);
break;
case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lharx, sthcx, extsh);
break;
+ case kPPC_Word64AtomicCompareExchangeUint16:
case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lharx, sthcx);
break;
+ case kPPC_Word64AtomicCompareExchangeUint32:
case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx);
break;
-
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
+ case kPPC_Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, ldarx, stdcx);
+ break;
+
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
+ break; \
+ case kPPC_Word64Atomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
+ break; \
+ case kPPC_Word64Atomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
+ break; \
+ case kPPC_Word64Atomic##op##Uint32: \
+ case kWord32Atomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -2054,6 +2084,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
+#define ATOMIC64_BINOP_CASE(op, inst) \
+ case kPPC_Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
+ break;
+ ATOMIC64_BINOP_CASE(Add, add)
+ ATOMIC64_BINOP_CASE(Sub, sub)
+ ATOMIC64_BINOP_CASE(And, and_)
+ ATOMIC64_BINOP_CASE(Or, orx)
+ ATOMIC64_BINOP_CASE(Xor, xor_)
+#undef ATOMIC64_BINOP_CASE
+
case kPPC_ByteRev32: {
Register input = i.InputRegister(0);
Register output = i.OutputRegister();
@@ -2118,7 +2159,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
+ condition == kOverflow || condition == kNotOverflow) {
return;
}
@@ -2564,9 +2606,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
break;
+ case Constant::kDelayedStringConstant:
+ __ mov(dst, Operand::EmbeddedStringConstant(
+ src.ToDelayedStringConstant()));
+ break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 3f3270028c..e189a18543 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -11,122 +11,158 @@ namespace compiler {
// PPC-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(PPC_And) \
- V(PPC_AndComplement) \
- V(PPC_Or) \
- V(PPC_OrComplement) \
- V(PPC_Xor) \
- V(PPC_ShiftLeft32) \
- V(PPC_ShiftLeft64) \
- V(PPC_ShiftLeftPair) \
- V(PPC_ShiftRight32) \
- V(PPC_ShiftRight64) \
- V(PPC_ShiftRightPair) \
- V(PPC_ShiftRightAlg32) \
- V(PPC_ShiftRightAlg64) \
- V(PPC_ShiftRightAlgPair) \
- V(PPC_RotRight32) \
- V(PPC_RotRight64) \
- V(PPC_Not) \
- V(PPC_RotLeftAndMask32) \
- V(PPC_RotLeftAndClear64) \
- V(PPC_RotLeftAndClearLeft64) \
- V(PPC_RotLeftAndClearRight64) \
- V(PPC_Add32) \
- V(PPC_Add64) \
- V(PPC_AddWithOverflow32) \
- V(PPC_AddPair) \
- V(PPC_AddDouble) \
- V(PPC_Sub) \
- V(PPC_SubWithOverflow32) \
- V(PPC_SubPair) \
- V(PPC_SubDouble) \
- V(PPC_Mul32) \
- V(PPC_Mul32WithHigh32) \
- V(PPC_Mul64) \
- V(PPC_MulHigh32) \
- V(PPC_MulHighU32) \
- V(PPC_MulPair) \
- V(PPC_MulDouble) \
- V(PPC_Div32) \
- V(PPC_Div64) \
- V(PPC_DivU32) \
- V(PPC_DivU64) \
- V(PPC_DivDouble) \
- V(PPC_Mod32) \
- V(PPC_Mod64) \
- V(PPC_ModU32) \
- V(PPC_ModU64) \
- V(PPC_ModDouble) \
- V(PPC_Neg) \
- V(PPC_NegDouble) \
- V(PPC_SqrtDouble) \
- V(PPC_FloorDouble) \
- V(PPC_CeilDouble) \
- V(PPC_TruncateDouble) \
- V(PPC_RoundDouble) \
- V(PPC_MaxDouble) \
- V(PPC_MinDouble) \
- V(PPC_AbsDouble) \
- V(PPC_Cntlz32) \
- V(PPC_Cntlz64) \
- V(PPC_Popcnt32) \
- V(PPC_Popcnt64) \
- V(PPC_Cmp32) \
- V(PPC_Cmp64) \
- V(PPC_CmpDouble) \
- V(PPC_Tst32) \
- V(PPC_Tst64) \
- V(PPC_Push) \
- V(PPC_PushFrame) \
- V(PPC_StoreToStackSlot) \
- V(PPC_ExtendSignWord8) \
- V(PPC_ExtendSignWord16) \
- V(PPC_ExtendSignWord32) \
- V(PPC_Uint32ToUint64) \
- V(PPC_Int64ToInt32) \
- V(PPC_Int64ToFloat32) \
- V(PPC_Int64ToDouble) \
- V(PPC_Uint64ToFloat32) \
- V(PPC_Uint64ToDouble) \
- V(PPC_Int32ToFloat32) \
- V(PPC_Int32ToDouble) \
- V(PPC_Uint32ToFloat32) \
- V(PPC_Uint32ToDouble) \
- V(PPC_Float32ToDouble) \
- V(PPC_Float64SilenceNaN) \
- V(PPC_DoubleToInt32) \
- V(PPC_DoubleToUint32) \
- V(PPC_DoubleToInt64) \
- V(PPC_DoubleToUint64) \
- V(PPC_DoubleToFloat32) \
- V(PPC_DoubleExtractLowWord32) \
- V(PPC_DoubleExtractHighWord32) \
- V(PPC_DoubleInsertLowWord32) \
- V(PPC_DoubleInsertHighWord32) \
- V(PPC_DoubleConstruct) \
- V(PPC_BitcastInt32ToFloat32) \
- V(PPC_BitcastFloat32ToInt32) \
- V(PPC_BitcastInt64ToDouble) \
- V(PPC_BitcastDoubleToInt64) \
- V(PPC_LoadWordS8) \
- V(PPC_LoadWordU8) \
- V(PPC_LoadWordS16) \
- V(PPC_LoadWordU16) \
- V(PPC_LoadWordS32) \
- V(PPC_LoadWordU32) \
- V(PPC_LoadWord64) \
- V(PPC_LoadFloat32) \
- V(PPC_LoadDouble) \
- V(PPC_StoreWord8) \
- V(PPC_StoreWord16) \
- V(PPC_StoreWord32) \
- V(PPC_StoreWord64) \
- V(PPC_StoreFloat32) \
- V(PPC_StoreDouble) \
- V(PPC_ByteRev32) \
- V(PPC_ByteRev64)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(PPC_And) \
+ V(PPC_AndComplement) \
+ V(PPC_Or) \
+ V(PPC_OrComplement) \
+ V(PPC_Xor) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftLeftPair) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightPair) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_ShiftRightAlgPair) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add32) \
+ V(PPC_Add64) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_AddPair) \
+ V(PPC_AddDouble) \
+ V(PPC_Sub) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_SubPair) \
+ V(PPC_SubDouble) \
+ V(PPC_Mul32) \
+ V(PPC_Mul32WithHigh32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulPair) \
+ V(PPC_MulDouble) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivDouble) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModDouble) \
+ V(PPC_Neg) \
+ V(PPC_NegDouble) \
+ V(PPC_SqrtDouble) \
+ V(PPC_FloorDouble) \
+ V(PPC_CeilDouble) \
+ V(PPC_TruncateDouble) \
+ V(PPC_RoundDouble) \
+ V(PPC_MaxDouble) \
+ V(PPC_MinDouble) \
+ V(PPC_AbsDouble) \
+ V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpDouble) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_PushFrame) \
+ V(PPC_StoreToStackSlot) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
+ V(PPC_Int32ToFloat32) \
+ V(PPC_Int32ToDouble) \
+ V(PPC_Uint32ToFloat32) \
+ V(PPC_Uint32ToDouble) \
+ V(PPC_Float32ToDouble) \
+ V(PPC_Float64SilenceNaN) \
+ V(PPC_DoubleToInt32) \
+ V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
+ V(PPC_DoubleToFloat32) \
+ V(PPC_DoubleExtractLowWord32) \
+ V(PPC_DoubleExtractHighWord32) \
+ V(PPC_DoubleInsertLowWord32) \
+ V(PPC_DoubleInsertHighWord32) \
+ V(PPC_DoubleConstruct) \
+ V(PPC_BitcastInt32ToFloat32) \
+ V(PPC_BitcastFloat32ToInt32) \
+ V(PPC_BitcastInt64ToDouble) \
+ V(PPC_BitcastDoubleToInt64) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWordU32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadDouble) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreDouble) \
+ V(PPC_ByteRev32) \
+ V(PPC_ByteRev64) \
+ V(PPC_Word64AtomicStoreUint8) \
+ V(PPC_Word64AtomicStoreUint16) \
+ V(PPC_Word64AtomicStoreUint32) \
+ V(PPC_Word64AtomicStoreUint64) \
+ V(PPC_Word64AtomicLoadUint8) \
+ V(PPC_Word64AtomicLoadUint16) \
+ V(PPC_Word64AtomicLoadUint32) \
+ V(PPC_Word64AtomicLoadUint64) \
+ V(PPC_Word64AtomicExchangeUint8) \
+ V(PPC_Word64AtomicExchangeUint16) \
+ V(PPC_Word64AtomicExchangeUint32) \
+ V(PPC_Word64AtomicExchangeUint64) \
+ V(PPC_Word64AtomicCompareExchangeUint8) \
+ V(PPC_Word64AtomicCompareExchangeUint16) \
+ V(PPC_Word64AtomicCompareExchangeUint32) \
+ V(PPC_Word64AtomicCompareExchangeUint64) \
+ V(PPC_Word64AtomicAddUint8) \
+ V(PPC_Word64AtomicAddUint16) \
+ V(PPC_Word64AtomicAddUint32) \
+ V(PPC_Word64AtomicAddUint64) \
+ V(PPC_Word64AtomicSubUint8) \
+ V(PPC_Word64AtomicSubUint16) \
+ V(PPC_Word64AtomicSubUint32) \
+ V(PPC_Word64AtomicSubUint64) \
+ V(PPC_Word64AtomicAndUint8) \
+ V(PPC_Word64AtomicAndUint16) \
+ V(PPC_Word64AtomicAndUint32) \
+ V(PPC_Word64AtomicAndUint64) \
+ V(PPC_Word64AtomicOrUint8) \
+ V(PPC_Word64AtomicOrUint16) \
+ V(PPC_Word64AtomicOrUint32) \
+ V(PPC_Word64AtomicOrUint64) \
+ V(PPC_Word64AtomicXorUint8) \
+ V(PPC_Word64AtomicXorUint16) \
+ V(PPC_Word64AtomicXorUint32) \
+ V(PPC_Word64AtomicXorUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index 51c92e8e84..6e7284f30a 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -135,6 +135,46 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_StoreToStackSlot:
return kHasSideEffect;
+ case kPPC_Word64AtomicLoadUint8:
+ case kPPC_Word64AtomicLoadUint16:
+ case kPPC_Word64AtomicLoadUint32:
+ case kPPC_Word64AtomicLoadUint64:
+ return kIsLoadOperation;
+
+ case kPPC_Word64AtomicStoreUint8:
+ case kPPC_Word64AtomicStoreUint16:
+ case kPPC_Word64AtomicStoreUint32:
+ case kPPC_Word64AtomicStoreUint64:
+ case kPPC_Word64AtomicExchangeUint8:
+ case kPPC_Word64AtomicExchangeUint16:
+ case kPPC_Word64AtomicExchangeUint32:
+ case kPPC_Word64AtomicExchangeUint64:
+ case kPPC_Word64AtomicCompareExchangeUint8:
+ case kPPC_Word64AtomicCompareExchangeUint16:
+ case kPPC_Word64AtomicCompareExchangeUint32:
+ case kPPC_Word64AtomicCompareExchangeUint64:
+ case kPPC_Word64AtomicAddUint8:
+ case kPPC_Word64AtomicAddUint16:
+ case kPPC_Word64AtomicAddUint32:
+ case kPPC_Word64AtomicAddUint64:
+ case kPPC_Word64AtomicSubUint8:
+ case kPPC_Word64AtomicSubUint16:
+ case kPPC_Word64AtomicSubUint32:
+ case kPPC_Word64AtomicSubUint64:
+ case kPPC_Word64AtomicAndUint8:
+ case kPPC_Word64AtomicAndUint16:
+ case kPPC_Word64AtomicAndUint32:
+ case kPPC_Word64AtomicAndUint64:
+ case kPPC_Word64AtomicOrUint8:
+ case kPPC_Word64AtomicOrUint16:
+ case kPPC_Word64AtomicOrUint32:
+ case kPPC_Word64AtomicOrUint64:
+ case kPPC_Word64AtomicXorUint8:
+ case kPPC_Word64AtomicXorUint16:
+ case kPPC_Word64AtomicXorUint32:
+ case kPPC_Word64AtomicXorUint64:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index e29ae8c7a5..5d336652c9 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -1188,6 +1188,10 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
VisitRR(this, kPPC_DoubleToUint64, node);
}
+
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kPPC_DoubleToInt64, node);
+}
#endif
@@ -1230,6 +1234,9 @@ void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
VisitRR(this, kPPC_Int64ToDouble, node);
}
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Int64ToDouble, node);
+}
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
VisitRR(this, kPPC_Uint64ToFloat32, node);
@@ -1859,6 +1866,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
+ if (input.node == nullptr) continue;
Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
@@ -1949,6 +1957,33 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = kPPC_Word64AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kPPC_Word64AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kPPC_Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kPPC_Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
PPCOperandGenerator g(this);
@@ -1980,12 +2015,61 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kPPC_Word64AtomicStoreUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kPPC_Word64AtomicStoreUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kPPC_Word64AtomicStoreUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kPPC_Word64AtomicStoreUint64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
+ inputs);
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ PPCOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+ ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2001,26 +2085,53 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
UNREACHABLE();
return;
}
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kPPC_Word64AtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kPPC_Word64AtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kPPC_Word64AtomicExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kPPC_Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ PPCOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
AddressingMode addressing_mode = kMode_MRR;
- InstructionOperand inputs[3];
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+
+ InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+
InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs);
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ selector->Emit(code, output_count, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- PPCOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* old_value = node->InputAt(2);
- Node* new_value = node->InputAt(3);
-
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
@@ -2037,31 +2148,53 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNREACHABLE();
return;
}
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ MachineType type = AtomicOpType(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint8()) {
+ opcode = kPPC_Word64AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kPPC_Word64AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kPPC_Word64AtomicCompareExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kPPC_Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+
+void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ PPCOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
AddressingMode addressing_mode = kMode_MRR;
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionOperand inputs[3];
- InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(old_value);
- inputs[input_count++] = g.UseUniqueRegister(new_value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
size_t output_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
- Emit(code, output_count, outputs, input_count, inputs);
+ selector->Emit(code, output_count, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- PPCOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
@@ -2079,20 +2212,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
return;
}
- AddressingMode addressing_mode = kMode_MRR;
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- InstructionOperand inputs[3];
-
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
-
- InstructionOperand outputs[1];
- size_t output_count = 0;
- outputs[output_count++] = g.DefineAsRegister(node);
-
- Emit(code, output_count, outputs, input_count, inputs);
+ VisitAtomicBinaryOperation(this, node, opcode);
}
#define VISIT_ATOMIC_BINOP(op) \
@@ -2109,6 +2229,39 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ MachineType type = AtomicOpType(node->op());
+ ArchOpcode opcode = kArchNop;
+
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicBinaryOperation(this, node, opcode);
+}
+
+#define VISIT_ATOMIC64_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kPPC_Word64Atomic##op##Uint8, kPPC_Word64Atomic##op##Uint16, \
+ kPPC_Word64Atomic##op##Uint32, kPPC_Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC64_BINOP(Add)
+VISIT_ATOMIC64_BINOP(Sub)
+VISIT_ATOMIC64_BINOP(And)
+VISIT_ATOMIC64_BINOP(Or)
+VISIT_ATOMIC64_BINOP(Xor)
+#undef VISIT_ATOMIC64_BINOP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
@@ -2318,6 +2471,82 @@ void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 42b28cfa76..31950f32f2 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -89,6 +89,7 @@ bool NeedsCheckHeapObject(Node* receiver) {
case IrOpcode::kJSCreateIterResultObject:
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateEmptyLiteralArray:
+ case IrOpcode::kJSCreateArrayFromIterable:
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateEmptyLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
@@ -208,6 +209,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
DCHECK(!it.is_dictionary_holder());
MapRef map(js_heap_broker(),
handle(it.GetHolder<HeapObject>()->map(), isolate()));
+ map.SerializeOwnDescriptors(); // TODO(neis): Remove later.
dependencies()->DependOnFieldType(map, it.GetFieldDescriptorIndex());
}
return value;
@@ -244,7 +246,8 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
MaybeHandle<Map>(),
field_type,
MachineType::TypeForRepresentation(field_representation),
- kFullWriteBarrier};
+ kFullWriteBarrier,
+ LoadSensitivity::kCritical};
if (field_representation == MachineRepresentation::kFloat64) {
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
@@ -254,7 +257,8 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
MaybeHandle<Map>(),
Type::OtherInternal(),
MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
field_access.offset = HeapNumber::kValueOffset;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 304d0e4ff1..9b4806a3a6 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -47,7 +47,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
FullUnalignedAccessSupport(),
PoisoningMitigationLevel poisoning_level =
PoisoningMitigationLevel::kPoisonCriticalOnly);
- ~RawMachineAssembler() {}
+ ~RawMachineAssembler() = default;
Isolate* isolate() const { return isolate_; }
Graph* graph() const { return graph_; }
@@ -173,15 +173,51 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Atomic memory operations.
Node* AtomicLoad(MachineType type, Node* base, Node* index) {
+ if (type.representation() == MachineRepresentation::kWord64) {
+ if (machine()->Is64()) {
+ return AddNode(machine()->Word64AtomicLoad(type), base, index);
+ } else {
+ return AddNode(machine()->Word32AtomicPairLoad(), base, index);
+ }
+ }
return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+#define VALUE_HALVES value_high, value
+#else
+#define VALUE_HALVES value, value_high
+#endif
+
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
- Node* value) {
+ Node* value, Node* value_high) {
+ if (rep == MachineRepresentation::kWord64) {
+ if (machine()->Is64()) {
+ DCHECK_NULL(value_high);
+ return AddNode(machine()->Word64AtomicStore(rep), base, index, value);
+ } else {
+ return AddNode(machine()->Word32AtomicPairStore(), base, index,
+ VALUE_HALVES);
+ }
+ }
+ DCHECK_NULL(value_high);
return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
}
-#define ATOMIC_FUNCTION(name) \
- Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value) { \
- return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \
+#define ATOMIC_FUNCTION(name) \
+ Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value, \
+ Node* value_high) { \
+ if (rep.representation() == MachineRepresentation::kWord64) { \
+ if (machine()->Is64()) { \
+ DCHECK_NULL(value_high); \
+ return AddNode(machine()->Word64Atomic##name(rep), base, index, \
+ value); \
+ } else { \
+ return AddNode(machine()->Word32AtomicPair##name(), base, index, \
+ VALUE_HALVES); \
+ } \
+ } \
+ DCHECK_NULL(value_high); \
+ return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \
}
ATOMIC_FUNCTION(Exchange);
ATOMIC_FUNCTION(Add);
@@ -190,9 +226,25 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
ATOMIC_FUNCTION(Or);
ATOMIC_FUNCTION(Xor);
#undef ATOMIC_FUNCTION
+#undef VALUE_HALVES
Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
- Node* old_value, Node* new_value) {
+ Node* old_value, Node* old_value_high,
+ Node* new_value, Node* new_value_high) {
+ if (rep.representation() == MachineRepresentation::kWord64) {
+ if (machine()->Is64()) {
+ DCHECK_NULL(old_value_high);
+ DCHECK_NULL(new_value_high);
+ return AddNode(machine()->Word64AtomicCompareExchange(rep), base, index,
+ old_value, new_value);
+ } else {
+ return AddNode(machine()->Word32AtomicPairCompareExchange(), base,
+ index, old_value, old_value_high, new_value,
+ new_value_high);
+ }
+ }
+ DCHECK_NULL(old_value_high);
+ DCHECK_NULL(new_value_high);
return AddNode(machine()->Word32AtomicCompareExchange(rep), base, index,
old_value, new_value);
}
@@ -605,12 +657,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* ChangeInt32ToFloat64(Node* a) {
return AddNode(machine()->ChangeInt32ToFloat64(), a);
}
+ Node* ChangeInt64ToFloat64(Node* a) {
+ return AddNode(machine()->ChangeInt64ToFloat64(), a);
+ }
Node* ChangeUint32ToFloat64(Node* a) {
return AddNode(machine()->ChangeUint32ToFloat64(), a);
}
Node* ChangeFloat64ToInt32(Node* a) {
return AddNode(machine()->ChangeFloat64ToInt32(), a);
}
+ Node* ChangeFloat64ToInt64(Node* a) {
+ return AddNode(machine()->ChangeFloat64ToInt64(), a);
+ }
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 5ecef0408b..fdab39fb43 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -14,7 +14,7 @@ namespace compiler {
RedundancyElimination::RedundancyElimination(Editor* editor, Zone* zone)
: AdvancedReducer(editor), node_checks_(zone), zone_(zone) {}
-RedundancyElimination::~RedundancyElimination() {}
+RedundancyElimination::~RedundancyElimination() = default;
Reduction RedundancyElimination::Reduce(Node* node) {
if (node_checks_.Get(node)) return NoChange();
@@ -32,31 +32,20 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckSymbol:
- case IrOpcode::kCheckedFloat64ToInt32:
- case IrOpcode::kCheckedInt32Add:
- case IrOpcode::kCheckedInt32Div:
- case IrOpcode::kCheckedInt32Mod:
- case IrOpcode::kCheckedInt32Mul:
- case IrOpcode::kCheckedInt32Sub:
- case IrOpcode::kCheckedInt32ToTaggedSigned:
- case IrOpcode::kCheckedTaggedSignedToInt32:
- case IrOpcode::kCheckedTaggedToFloat64:
- case IrOpcode::kCheckedTaggedToInt32:
- case IrOpcode::kCheckedTaggedToTaggedPointer:
- case IrOpcode::kCheckedTaggedToTaggedSigned:
- case IrOpcode::kCheckedTruncateTaggedToWord32:
- case IrOpcode::kCheckedUint32Div:
- case IrOpcode::kCheckedUint32Mod:
- case IrOpcode::kCheckedUint32ToInt32:
- case IrOpcode::kCheckedUint32ToTaggedSigned:
+#define SIMPLIFIED_CHECKED_OP(Opcode) case IrOpcode::k##Opcode:
+ SIMPLIFIED_CHECKED_OP_LIST(SIMPLIFIED_CHECKED_OP)
+#undef SIMPLIFIED_CHECKED_OP
return ReduceCheckNode(node);
+ case IrOpcode::kSpeculativeNumberEqual:
+ case IrOpcode::kSpeculativeNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ return ReduceSpeculativeNumberComparison(node);
case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kSpeculativeNumberSubtract:
case IrOpcode::kSpeculativeSafeIntegerAdd:
case IrOpcode::kSpeculativeSafeIntegerSubtract:
- // For increments and decrements by a constant, try to learn from the last
- // bounds check.
- return TryReuseBoundsCheckForFirstInput(node);
+ case IrOpcode::kSpeculativeToNumber:
+ return ReduceSpeculativeNumberOperation(node);
case IrOpcode::kEffectPhi:
return ReduceEffectPhi(node);
case IrOpcode::kDead:
@@ -140,6 +129,12 @@ bool CheckSubsumes(Node const* a, Node const* b) {
if (a->opcode() == IrOpcode::kCheckInternalizedString &&
b->opcode() == IrOpcode::kCheckString) {
// CheckInternalizedString(node) implies CheckString(node)
+ } else if (a->opcode() == IrOpcode::kCheckSmi &&
+ b->opcode() == IrOpcode::kCheckNumber) {
+ // CheckSmi(node) implies CheckNumber(node)
+ } else if (a->opcode() == IrOpcode::kCheckedTaggedSignedToInt32 &&
+ b->opcode() == IrOpcode::kCheckedTaggedToInt32) {
+ // CheckedTaggedSignedToInt32(node) implies CheckedTaggedToInt32(node)
} else if (a->opcode() != b->opcode()) {
return false;
} else {
@@ -150,11 +145,15 @@ bool CheckSubsumes(Node const* a, Node const* b) {
case IrOpcode::kCheckNumber:
break;
case IrOpcode::kCheckedInt32ToTaggedSigned:
+ case IrOpcode::kCheckedInt64ToInt32:
+ case IrOpcode::kCheckedInt64ToTaggedSigned:
case IrOpcode::kCheckedTaggedSignedToInt32:
case IrOpcode::kCheckedTaggedToTaggedPointer:
case IrOpcode::kCheckedTaggedToTaggedSigned:
case IrOpcode::kCheckedUint32ToInt32:
case IrOpcode::kCheckedUint32ToTaggedSigned:
+ case IrOpcode::kCheckedUint64ToInt32:
+ case IrOpcode::kCheckedUint64ToTaggedSigned:
break;
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedTaggedToInt32: {
@@ -167,6 +166,20 @@ bool CheckSubsumes(Node const* a, Node const* b) {
}
break;
}
+ case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedTruncateTaggedToWord32: {
+ CheckTaggedInputParameters const& ap =
+ CheckTaggedInputParametersOf(a->op());
+ CheckTaggedInputParameters const& bp =
+ CheckTaggedInputParametersOf(b->op());
+ // {a} subsumes {b} if the modes are either the same, or {a} checks
+ // for Number, in which case {b} will be subsumed no matter what.
+ if (ap.mode() != bp.mode() &&
+ ap.mode() != CheckTaggedInputMode::kNumber) {
+ return false;
+ }
+ break;
+ }
default:
DCHECK(!IsCheckedWithFeedback(a->op()));
return false;
@@ -232,38 +245,6 @@ Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
return UpdateChecks(node, checks->AddCheck(zone(), node));
}
-Reduction RedundancyElimination::TryReuseBoundsCheckForFirstInput(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kSpeculativeNumberAdd ||
- node->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
- node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd ||
- node->opcode() == IrOpcode::kSpeculativeSafeIntegerSubtract);
-
- DCHECK_EQ(1, node->op()->EffectInputCount());
- DCHECK_EQ(1, node->op()->EffectOutputCount());
-
- Node* const effect = NodeProperties::GetEffectInput(node);
- EffectPathChecks const* checks = node_checks_.Get(effect);
-
- // If we do not know anything about the predecessor, do not propagate just yet
- // because we will have to recompute anyway once we compute the predecessor.
- if (checks == nullptr) return NoChange();
-
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- // Only use bounds checks for increments/decrements by a constant.
- if (right->opcode() == IrOpcode::kNumberConstant) {
- if (Node* bounds_check = checks->LookupBoundsCheckFor(left)) {
- // Only use the bounds checked type if it is better.
- if (NodeProperties::GetType(bounds_check)
- .Is(NodeProperties::GetType(left))) {
- node->ReplaceInput(0, bounds_check);
- }
- }
- }
-
- return UpdateChecks(node, checks);
-}
-
Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
if (control->opcode() == IrOpcode::kLoop) {
@@ -292,6 +273,97 @@ Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
return UpdateChecks(node, checks);
}
+Reduction RedundancyElimination::ReduceSpeculativeNumberComparison(Node* node) {
+ NumberOperationHint const hint = NumberOperationHintOf(node->op());
+ Node* const first = NodeProperties::GetValueInput(node, 0);
+ Type const first_type = NodeProperties::GetType(first);
+ Node* const second = NodeProperties::GetValueInput(node, 1);
+ Type const second_type = NodeProperties::GetType(second);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+
+ // Avoid the potentially expensive lookups below if the {node}
+ // has seen non-Smi inputs in the past, which is a clear signal
+ // that the comparison is probably not performed on a value that
+ // already passed an array bounds check.
+ if (hint == NumberOperationHint::kSignedSmall) {
+ // Don't bother trying to find a CheckBounds for the {first} input
+ // if it's type is already in UnsignedSmall range, since the bounds
+ // check is only going to narrow that range further, but the result
+ // is not going to make the representation selection any better.
+ if (!first_type.Is(Type::UnsignedSmall())) {
+ if (Node* check = checks->LookupBoundsCheckFor(first)) {
+ if (!first_type.Is(NodeProperties::GetType(check))) {
+ // Replace the {first} input with the {check}. This is safe,
+ // despite the fact that {check} can truncate -0 to 0, because
+ // the regular Number comparisons in JavaScript also identify
+ // 0 and -0 (unlike special comparisons as Object.is).
+ NodeProperties::ReplaceValueInput(node, check, 0);
+ Reduction const reduction = ReduceSpeculativeNumberComparison(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ }
+
+ // Don't bother trying to find a CheckBounds for the {second} input
+ // if it's type is already in UnsignedSmall range, since the bounds
+ // check is only going to narrow that range further, but the result
+ // is not going to make the representation selection any better.
+ if (!second_type.Is(Type::UnsignedSmall())) {
+ if (Node* check = checks->LookupBoundsCheckFor(second)) {
+ if (!second_type.Is(NodeProperties::GetType(check))) {
+ // Replace the {second} input with the {check}. This is safe,
+ // despite the fact that {check} can truncate -0 to 0, because
+ // the regular Number comparisons in JavaScript also identify
+ // 0 and -0 (unlike special comparisons as Object.is).
+ NodeProperties::ReplaceValueInput(node, check, 1);
+ Reduction const reduction = ReduceSpeculativeNumberComparison(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ }
+ }
+
+ return UpdateChecks(node, checks);
+}
+
+Reduction RedundancyElimination::ReduceSpeculativeNumberOperation(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ node->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
+ node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd ||
+ node->opcode() == IrOpcode::kSpeculativeSafeIntegerSubtract ||
+ node->opcode() == IrOpcode::kSpeculativeToNumber);
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+
+ Node* const first = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+
+ // Check if there's a CheckBounds operation on {first}
+ // in the graph already, which we might be able to
+ // reuse here to improve the representation selection
+ // for the {node} later on.
+ if (Node* check = checks->LookupBoundsCheckFor(first)) {
+ // Only use the bounds {check} if its type is better
+ // than the type of the {first} node, otherwise we
+ // would end up replacing NumberConstant inputs with
+ // CheckBounds operations, which is kind of pointless.
+ if (!NodeProperties::GetType(first).Is(NodeProperties::GetType(check))) {
+ NodeProperties::ReplaceValueInput(node, check, 0);
+ }
+ }
+
+ return UpdateChecks(node, checks);
+}
+
Reduction RedundancyElimination::ReduceStart(Node* node) {
return UpdateChecks(node, EffectPathChecks::Empty(zone()));
}
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index 05094a388e..e89a7b2649 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-class RedundancyElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
public:
RedundancyElimination(Editor* editor, Zone* zone);
~RedundancyElimination() final;
@@ -59,14 +59,14 @@ class RedundancyElimination final : public AdvancedReducer {
Reduction ReduceCheckNode(Node* node);
Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceSpeculativeNumberComparison(Node* node);
+ Reduction ReduceSpeculativeNumberOperation(Node* node);
Reduction ReduceStart(Node* node);
Reduction ReduceOtherNode(Node* node);
Reduction TakeChecksFromFirstEffect(Node* node);
Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
- Reduction TryReuseBoundsCheckForFirstInput(Node* node);
-
Zone* zone() const { return zone_; }
PathChecksForEffectNodes node_checks_;
diff --git a/deps/v8/src/compiler/refs-map.cc b/deps/v8/src/compiler/refs-map.cc
new file mode 100644
index 0000000000..8b176d0e4c
--- /dev/null
+++ b/deps/v8/src/compiler/refs-map.cc
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/refs-map.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using UnderlyingMap =
+ base::TemplateHashMapImpl<Address, ObjectData*, AddressMatcher,
+ ZoneAllocationPolicy>;
+
+RefsMap::RefsMap(uint32_t capacity, AddressMatcher match, Zone* zone)
+ : UnderlyingMap(capacity, match, ZoneAllocationPolicy(zone)) {}
+
+RefsMap::RefsMap(const RefsMap* other, Zone* zone)
+ : UnderlyingMap(other, ZoneAllocationPolicy(zone)) {}
+
+RefsMap::Entry* RefsMap::Lookup(const Address& key) const {
+ return UnderlyingMap::Lookup(key, Hash(key));
+}
+
+RefsMap::Entry* RefsMap::LookupOrInsert(const Address& key, Zone* zone) {
+ return UnderlyingMap::LookupOrInsert(key, RefsMap::Hash(key),
+ []() { return nullptr; },
+ ZoneAllocationPolicy(zone));
+}
+
+uint32_t RefsMap::Hash(Address addr) { return static_cast<uint32_t>(addr); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/refs-map.h b/deps/v8/src/compiler/refs-map.h
new file mode 100644
index 0000000000..daaf433049
--- /dev/null
+++ b/deps/v8/src/compiler/refs-map.h
@@ -0,0 +1,54 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REFS_MAP_H_
+#define V8_COMPILER_REFS_MAP_H_
+
+#include "src/base/hashmap.h"
+#include "src/globals.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ObjectData;
+
+class AddressMatcher : public base::KeyEqualityMatcher<Address> {
+ public:
+ bool operator()(uint32_t hash1, uint32_t hash2, const Address& key1,
+ const Address& key2) const {
+ return key1 == key2;
+ }
+};
+
+// This class employs our own implementation of hash map for the purpose of
+// storing the mapping between canonical Addresses and allocated ObjectData.
+// It's used as the refs map in JSHeapBroker and as the snapshot in
+// PerIsolateCompilerCache, as we need a cheap copy between the two and
+// std::unordered_map doesn't satisfy this requirement, as it rehashes the
+// whole map and copies all entries one by one.
+class RefsMap
+ : public base::TemplateHashMapImpl<Address, ObjectData*, AddressMatcher,
+ ZoneAllocationPolicy>,
+ public ZoneObject {
+ public:
+ RefsMap(uint32_t capacity, AddressMatcher match, Zone* zone);
+ RefsMap(const RefsMap* other, Zone* zone);
+
+ bool IsEmpty() const { return occupancy() == 0; }
+
+ // Wrappers around methods from UnderlyingMap
+ Entry* Lookup(const Address& key) const;
+ Entry* LookupOrInsert(const Address& key, Zone* zone);
+
+ private:
+ static uint32_t Hash(Address addr);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_REFS_MAP_H_
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 1938ef22b6..0649748a35 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -21,10 +21,10 @@ namespace compiler {
namespace {
-static const int kFloatRepBit =
- 1 << static_cast<int>(MachineRepresentation::kFloat32);
-static const int kSimd128RepBit =
- 1 << static_cast<int>(MachineRepresentation::kSimd128);
+static constexpr int kFloat32Bit =
+ RepresentationBit(MachineRepresentation::kFloat32);
+static constexpr int kSimd128Bit =
+ RepresentationBit(MachineRepresentation::kSimd128);
void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
auto it = std::find(v->begin(), v->end(), range);
@@ -2041,8 +2041,8 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
bool fixed_simd128_live_ranges = false;
if (!kSimpleFPAliasing) {
int mask = data()->code()->representation_mask();
- fixed_float_live_ranges = (mask & kFloatRepBit) != 0;
- fixed_simd128_live_ranges = (mask & kSimd128RepBit) != 0;
+ fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
+ fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
}
for (int index = block->last_instruction_index(); index >= block_start;
@@ -2556,7 +2556,7 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
check_fp_aliasing_(false) {
if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
check_fp_aliasing_ = (data->code()->representation_mask() &
- (kFloatRepBit | kSimd128RepBit)) != 0;
+ (kFloat32Bit | kSimd128Bit)) != 0;
}
}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index e3c49df7a6..ad4c5c916c 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -10,6 +10,7 @@
#include "src/code-factory.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/type-cache.h"
#include "src/heap/factory-inl.h"
namespace v8 {
@@ -24,8 +25,6 @@ const char* Truncation::description() const {
return "truncate-to-bool";
case TruncationKind::kWord32:
return "truncate-to-word32";
- case TruncationKind::kWord64:
- return "truncate-to-word64";
case TruncationKind::kFloat64:
switch (identify_zeros()) {
case kIdentifyZeros:
@@ -44,23 +43,22 @@ const char* Truncation::description() const {
UNREACHABLE();
}
-
// Partial order for truncations:
//
-// kWord64 kAny <-------+
-// ^ ^ |
-// \ | |
-// \ kFloat64 |
-// \ ^ |
-// \ / |
-// kWord32 kBool
-// ^ ^
-// \ /
-// \ /
-// \ /
-// \ /
-// \ /
-// kNone
+// kAny <-------+
+// ^ |
+// | |
+// kFloat64 |
+// ^ |
+// / |
+// kWord32 kBool
+// ^ ^
+// \ /
+// \ /
+// \ /
+// \ /
+// \ /
+// kNone
//
// TODO(jarin) We might consider making kBool < kFloat64.
@@ -103,10 +101,7 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny;
case TruncationKind::kWord32:
return rep2 == TruncationKind::kWord32 ||
- rep2 == TruncationKind::kWord64 ||
rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
- case TruncationKind::kWord64:
- return rep2 == TruncationKind::kWord64;
case TruncationKind::kFloat64:
return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
case TruncationKind::kAny:
@@ -130,6 +125,13 @@ bool IsWord(MachineRepresentation rep) {
} // namespace
+RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
+ : cache_(TypeCache::Get()),
+ jsgraph_(jsgraph),
+ isolate_(isolate),
+ testing_type_errors_(false),
+ type_error_(false) {}
+
// Changes representation from {output_rep} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
// out signedness for the word32->float64 conversion, then we check that the
@@ -238,6 +240,28 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
}
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ if (output_type.Is(Type::Signed31())) {
+ // int64 -> int32 -> tagged signed
+ node = InsertTruncateInt64ToInt32(node);
+ op = simplified()->ChangeInt31ToTaggedSigned();
+ } else if (output_type.Is(Type::Signed32()) && SmiValuesAre32Bits()) {
+ // int64 -> int32 -> tagged signed
+ node = InsertTruncateInt64ToInt32(node);
+ op = simplified()->ChangeInt32ToTagged();
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ if (output_type.Is(cache_.kPositiveSafeInteger)) {
+ op = simplified()->CheckedUint64ToTaggedSigned(use_info.feedback());
+ } else if (output_type.Is(cache_.kSafeInteger)) {
+ op = simplified()->CheckedInt64ToTaggedSigned(use_info.feedback());
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type.Is(Type::Signed31())) {
// float64 -> int32 -> tagged signed
@@ -326,6 +350,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kHeapConstant:
+ case IrOpcode::kDelayedStringConstant:
return node; // No change necessary.
case IrOpcode::kInt32Constant:
case IrOpcode::kFloat64Constant:
@@ -360,6 +385,16 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
MachineRepresentation::kTaggedPointer);
}
op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ if (output_type.Is(cache_.kSafeInteger)) {
+ // int64 -> float64 -> tagged pointer
+ op = machine()->ChangeInt64ToFloat64();
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ }
} else if (output_rep == MachineRepresentation::kFloat32) {
if (output_type.Is(Type::Number())) {
// float32 -> float64 -> tagged
@@ -399,6 +434,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
switch (node->opcode()) {
case IrOpcode::kNumberConstant:
case IrOpcode::kHeapConstant:
+ case IrOpcode::kDelayedStringConstant:
return node; // No change necessary.
case IrOpcode::kInt32Constant:
case IrOpcode::kFloat64Constant:
@@ -440,6 +476,29 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
}
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ if (output_type.Is(Type::Signed31())) {
+ // int64 -> int32 -> tagged signed
+ node = InsertTruncateInt64ToInt32(node);
+ op = simplified()->ChangeInt31ToTaggedSigned();
+ } else if (output_type.Is(Type::Signed32())) {
+ // int64 -> int32 -> tagged
+ node = InsertTruncateInt64ToInt32(node);
+ op = simplified()->ChangeInt32ToTagged();
+ } else if (output_type.Is(Type::Unsigned32())) {
+ // int64 -> uint32 -> tagged
+ node = InsertTruncateInt64ToInt32(node);
+ op = simplified()->ChangeUint32ToTagged();
+ } else if (output_type.Is(cache_.kPositiveSafeInteger)) {
+ // uint64 -> tagged
+ op = simplified()->ChangeUint64ToTagged();
+ } else if (output_type.Is(cache_.kSafeInteger)) {
+ // int64 -> tagged
+ op = simplified()->ChangeInt64ToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
} else if (output_rep ==
MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
node = InsertChangeFloat32ToFloat64(node);
@@ -526,6 +585,13 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kFloat64) {
op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ if (output_type.Is(Type::Signed32())) {
+ // int64 -> float64 -> float32
+ op = machine()->ChangeInt64ToFloat64();
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ }
}
if (op == nullptr) {
return TypeError(node, output_rep, output_type,
@@ -559,7 +625,9 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64), node);
} else if (IsWord(output_rep)) {
- if (output_type.Is(Type::Signed32())) {
+ if (output_type.Is(Type::Signed32()) ||
+ (output_type.Is(Type::Signed32OrMinusZero()) &&
+ use_info.truncation().IdentifiesZeroAndMinusZero())) {
op = machine()->ChangeInt32ToFloat64();
} else if (output_type.Is(Type::Unsigned32()) ||
use_info.truncation().IsUsedAsWord32()) {
@@ -595,6 +663,10 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ if (output_type.Is(cache_.kSafeInteger)) {
+ op = machine()->ChangeInt64ToFloat64();
+ }
}
if (op == nullptr) {
return TypeError(node, output_rep, output_type,
@@ -607,14 +679,17 @@ Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
return jsgraph()->Int32Constant(DoubleToInt32(value));
}
-void RepresentationChanger::InsertUnconditionalDeopt(Node* node,
- DeoptimizeReason reason) {
+Node* RepresentationChanger::InsertUnconditionalDeopt(Node* node,
+ DeoptimizeReason reason) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Node* deopt =
+ effect =
jsgraph()->graph()->NewNode(simplified()->CheckIf(reason),
jsgraph()->Int32Constant(0), effect, control);
- NodeProperties::ReplaceEffectInput(node, deopt);
+ Node* unreachable = effect = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->Unreachable(), effect, control);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ return unreachable;
}
Node* RepresentationChanger::GetWord32RepresentationFor(
@@ -623,6 +698,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
UNREACHABLE();
@@ -655,9 +731,11 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
CHECK(Truncation::Any(kIdentifyZeros)
.IsLessGeneralThan(use_info.truncation()));
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
- InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
+ Node* unreachable =
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
return jsgraph()->graph()->NewNode(
- jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32),
+ unreachable);
}
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type.Is(Type::Signed32())) {
@@ -730,6 +808,12 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
MachineRepresentation::kWord32);
}
} else if (output_rep == MachineRepresentation::kWord32) {
+ if (use_info.truncation().IdentifiesZeroAndMinusZero()) {
+ if (output_type.Is(Type::Signed32OrMinusZero()) ||
+ output_type.Is(Type::Unsigned32OrMinusZero())) {
+ return node;
+ }
+ }
// Only the checked case should get here, the non-checked case is
// handled in GetRepresentationFor.
if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
@@ -752,6 +836,27 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
DCHECK(use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32);
return node;
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ if (output_type.Is(Type::Signed32()) ||
+ output_type.Is(Type::Unsigned32())) {
+ op = machine()->TruncateInt64ToInt32();
+ } else if (output_type.Is(cache_.kSafeInteger) &&
+ use_info.truncation().IsUsedAsWord32()) {
+ op = machine()->TruncateInt64ToInt32();
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
+ use_info.type_check() == TypeCheckKind::kSigned32) {
+ if (output_type.Is(cache_.kPositiveSafeInteger)) {
+ op = simplified()->CheckedUint64ToInt32(use_info.feedback());
+ } else if (output_type.Is(cache_.kSafeInteger)) {
+ op = simplified()->CheckedInt64ToInt32(use_info.feedback());
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
+ }
}
if (op == nullptr) {
@@ -822,6 +927,11 @@ Node* RepresentationChanger::GetBitRepresentationFor(
jsgraph()->Int32Constant(0));
return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
jsgraph()->Int32Constant(0));
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ node = jsgraph()->graph()->NewNode(machine()->Word64Equal(), node,
+ jsgraph()->Int64Constant(0));
+ return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(0));
} else if (output_rep == MachineRepresentation::kFloat32) {
node = jsgraph()->graph()->NewNode(machine()->Float32Abs(), node);
return jsgraph()->graph()->NewNode(machine()->Float32LessThan(),
@@ -839,16 +949,84 @@ Node* RepresentationChanger::GetBitRepresentationFor(
Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kFloat64Constant:
+ UNREACHABLE();
+ break;
+ case IrOpcode::kNumberConstant: {
+ double const fv = OpParameter<double>(node->op());
+ int64_t const iv = static_cast<int64_t>(fv);
+ if (static_cast<double>(iv) == fv) {
+ return jsgraph()->Int64Constant(iv);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ // Select the correct X -> Word64 operator.
+ const Operator* op;
if (output_type.Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
return jsgraph()->graph()->NewNode(
- jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord64), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
+ } else if (IsWord(output_rep)) {
+ if (output_type.Is(Type::Unsigned32())) {
+ op = machine()->ChangeUint32ToUint64();
+ } else if (output_type.Is(Type::Signed32())) {
+ op = machine()->ChangeInt32ToInt64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+ }
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ if (output_type.Is(cache_.kInt64)) {
+ // float32 -> float64 -> int64
+ node = InsertChangeFloat32ToFloat64(node);
+ op = machine()->ChangeFloat64ToInt64();
+ } else if (output_type.Is(cache_.kUint64)) {
+ // float32 -> float64 -> uint64
+ node = InsertChangeFloat32ToFloat64(node);
+ op = machine()->ChangeFloat64ToUint64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+ }
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ if (output_type.Is(cache_.kInt64)) {
+ op = machine()->ChangeFloat64ToInt64();
+ } else if (output_type.Is(cache_.kUint64)) {
+ op = machine()->ChangeFloat64ToUint64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+ }
+ } else if (output_rep == MachineRepresentation::kTaggedSigned) {
+ if (output_type.Is(Type::SignedSmall())) {
+ op = simplified()->ChangeTaggedSignedToInt64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+ }
+ } else if (CanBeTaggedPointer(output_rep)) {
+ if (output_type.Is(cache_.kInt64)) {
+ op = simplified()->ChangeTaggedToInt64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
}
- // Can't really convert Word64 to anything else. Purported to be internal.
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kWord64);
+ return jsgraph()->graph()->NewNode(op, node);
}
const Operator* RepresentationChanger::Int32OperatorFor(
@@ -910,6 +1088,22 @@ const Operator* RepresentationChanger::Int32OverflowOperatorFor(
}
}
+const Operator* RepresentationChanger::Int64OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kSpeculativeNumberAdd: // Fall through.
+ case IrOpcode::kSpeculativeSafeIntegerAdd:
+ case IrOpcode::kNumberAdd:
+ return machine()->Int64Add();
+ case IrOpcode::kSpeculativeNumberSubtract: // Fall through.
+ case IrOpcode::kSpeculativeSafeIntegerSubtract:
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int64Sub();
+ default:
+ UNREACHABLE();
+ }
+}
+
const Operator* RepresentationChanger::TaggedSignedOperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
@@ -1124,6 +1318,10 @@ Node* RepresentationChanger::InsertChangeUint32ToFloat64(Node* node) {
return jsgraph()->graph()->NewNode(machine()->ChangeUint32ToFloat64(), node);
}
+Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 85a69d92eb..673c062d94 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -12,6 +12,9 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Foward declarations.
+class TypeCache;
+
enum IdentifyZeros { kIdentifyZeros, kDistinguishZeros };
class Truncation final {
@@ -26,9 +29,6 @@ class Truncation final {
static Truncation Word32() {
return Truncation(TruncationKind::kWord32, kIdentifyZeros);
}
- static Truncation Word64() {
- return Truncation(TruncationKind::kWord64, kIdentifyZeros);
- }
static Truncation Float64(IdentifyZeros identify_zeros = kDistinguishZeros) {
return Truncation(TruncationKind::kFloat64, identify_zeros);
}
@@ -57,10 +57,6 @@ class Truncation final {
return LessGeneral(kind_, TruncationKind::kWord32) ||
LessGeneral(kind_, TruncationKind::kBool);
}
- bool IdentifiesUndefinedAndNaN() {
- return LessGeneral(kind_, TruncationKind::kFloat64) ||
- LessGeneral(kind_, TruncationKind::kWord64);
- }
bool IdentifiesZeroAndMinusZero() const {
return identify_zeros() == kIdentifyZeros;
}
@@ -85,7 +81,6 @@ class Truncation final {
kNone,
kBool,
kWord32,
- kWord64,
kFloat64,
kAny
};
@@ -162,8 +157,11 @@ class UseInfo {
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
- static UseInfo TruncatingWord64() {
- return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ static UseInfo Word64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Any());
+ }
+ static UseInfo Word() {
+ return UseInfo(MachineType::PointerRepresentation(), Truncation::Any());
}
static UseInfo Bool() {
return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
@@ -171,11 +169,10 @@ class UseInfo {
static UseInfo Float32() {
return UseInfo(MachineRepresentation::kFloat32, Truncation::Any());
}
- static UseInfo TruncatingFloat64() {
- return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
- }
- static UseInfo PointerInt() {
- return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
+ static UseInfo TruncatingFloat64(
+ IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return UseInfo(MachineRepresentation::kFloat64,
+ Truncation::Float64(identify_zeros));
}
static UseInfo AnyTagged() {
return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
@@ -193,9 +190,11 @@ class UseInfo {
TypeCheckKind::kHeapObject);
}
static UseInfo CheckedSignedSmallAsTaggedSigned(
- const VectorSlotPair& feedback) {
- return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
- TypeCheckKind::kSignedSmall, feedback);
+ const VectorSlotPair& feedback,
+ IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return UseInfo(MachineRepresentation::kTaggedSigned,
+ Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
+ feedback);
}
static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros,
const VectorSlotPair& feedback) {
@@ -209,17 +208,20 @@ class UseInfo {
Truncation::Any(identify_zeros), TypeCheckKind::kSigned32,
feedback);
}
- static UseInfo CheckedNumberAsFloat64(const VectorSlotPair& feedback) {
- return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
- TypeCheckKind::kNumber, feedback);
+ static UseInfo CheckedNumberAsFloat64(IdentifyZeros identify_zeros,
+ const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kFloat64,
+ Truncation::Any(identify_zeros), TypeCheckKind::kNumber,
+ feedback);
}
static UseInfo CheckedNumberAsWord32(const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
TypeCheckKind::kNumber, feedback);
}
static UseInfo CheckedNumberOrOddballAsFloat64(
- const VectorSlotPair& feedback) {
- return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
+ IdentifyZeros identify_zeros, const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kFloat64,
+ Truncation::Any(identify_zeros),
TypeCheckKind::kNumberOrOddball, feedback);
}
static UseInfo CheckedNumberOrOddballAsWord32(
@@ -263,11 +265,7 @@ class UseInfo {
// Eagerly folds any representation changes for constants.
class RepresentationChanger final {
public:
- RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
- : jsgraph_(jsgraph),
- isolate_(isolate),
- testing_type_errors_(false),
- type_error_(false) {}
+ RepresentationChanger(JSGraph* jsgraph, Isolate* isolate);
// Changes representation from {output_type} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
@@ -278,6 +276,7 @@ class RepresentationChanger final {
UseInfo use_info);
const Operator* Int32OperatorFor(IrOpcode::Value opcode);
const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
+ const Operator* Int64OperatorFor(IrOpcode::Value opcode);
const Operator* TaggedSignedOperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OverflowOperatorFor(IrOpcode::Value opcode);
@@ -294,6 +293,7 @@ class RepresentationChanger final {
}
private:
+ TypeCache const& cache_;
JSGraph* jsgraph_;
Isolate* isolate_;
@@ -338,7 +338,8 @@ class RepresentationChanger final {
Node* InsertChangeTaggedToFloat64(Node* node);
Node* InsertChangeUint32ToFloat64(Node* node);
Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
- void InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
+ Node* InsertTruncateInt64ToInt32(Node* node);
+ Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index ab9bd16e81..03a6430ef2 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -67,6 +67,10 @@ class S390OperandConverter final : public InstructionOperandConverter {
return Operand(constant.ToInt64());
#endif
case Constant::kExternalReference:
+ return Operand(constant.ToExternalReference());
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -1032,6 +1036,17 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ LoadlW(result, result); \
} while (false)
+#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register addr = r1; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ __ lay(addr, op); \
+ __ load_and_op(result, value, MemOperand(addr)); \
+ } while (false)
+
#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end) \
do { \
Label do_cs; \
@@ -1047,6 +1062,22 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bne(&do_cs, Label::kNear); \
} while (false)
+#define ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end) \
+ do { \
+ Label do_cs; \
+ __ lg(prev, MemOperand(addr, offset)); \
+ __ bind(&do_cs); \
+ __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
+ Operand(static_cast<intptr_t>(shift_amount)), \
+ true); \
+ __ bin_inst(new_val, prev, temp); \
+ __ lgr(temp, prev); \
+ __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
+ Operand::Zero(), false); \
+ __ CmpAndSwap64(prev, temp, MemOperand(addr, offset)); \
+ __ bne(&do_cs, Label::kNear); \
+ } while (false)
+
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
{ \
@@ -1066,6 +1097,15 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
extract_result(); \
}
+#define ATOMIC_BIN_OP_WORD(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(4 * index); \
+ constexpr int shift_amount = 32 - (index * 32); \
+ constexpr int start = 32 - shift_amount; \
+ constexpr int end = start + 31; \
+ ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end); \
+ extract_result(); \
+ }
#else
#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
{ \
@@ -1085,6 +1125,15 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
extract_result(); \
}
+#define ATOMIC_BIN_OP_WORD(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(4 * index); \
+ constexpr int shift_amount = index * 32; \
+ constexpr int start = 32 - shift_amount; \
+ constexpr int end = start + 31; \
+ ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end); \
+ extract_result(); \
+ }
#endif // V8_TARGET_BIG_ENDIAN
#define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
@@ -1143,6 +1192,311 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bind(&done); \
} while (false)
+#define ASSEMBLE_ATOMIC64_BINOP_BYTE(bin_inst, extract_result) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register addr = i.TempRegister(0); \
+ Register prev = r0; \
+ Register new_val = r1; \
+ Register temp = kScratchReg; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ Label done, leftmost0, leftmost1, two, three, four, five, seven; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(7)); \
+ __ b(Condition(1), &seven); \
+ __ b(Condition(2), &leftmost1); \
+ __ b(Condition(4), &leftmost0); \
+ /* ending with 0b000 */ \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
+ __ b(&done); \
+ /* ending in 0b001 to 0b011 */ \
+ __ bind(&leftmost0); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(1), &three); \
+ __ b(Condition(2), &two); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
+ __ b(&done); \
+ /* ending in 0b010 */ \
+ __ bind(&two); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
+ __ b(&done); \
+ /* ending in 0b011 */ \
+ __ bind(&three); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
+ __ b(&done); \
+ /* ending in 0b100 to 0b110 */ \
+ __ bind(&leftmost1); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(8), &four); \
+ __ b(Condition(4), &five); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 6, extract_result); \
+ __ b(&done); \
+ /* ending in 0b100 */ \
+ __ bind(&four); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 4, extract_result); \
+ __ b(&done); \
+ /* ending in 0b101 */ \
+ __ bind(&five); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 5, extract_result); \
+ __ b(&done); \
+ /* ending in 0b111 */ \
+ __ bind(&seven); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 7, extract_result); \
+ __ bind(&done); \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC64_BINOP_HALFWORD(bin_inst, extract_result) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register prev = i.TempRegister(0); \
+ Register new_val = r0; \
+ Register addr = r1; \
+ Register temp = kScratchReg; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ Label done, one, two, three; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(6)); \
+ __ b(Condition(1), &three); \
+ __ b(Condition(2), &two); \
+ __ b(Condition(4), &one); \
+ /* ending in 0b00 */ \
+ ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
+ __ b(&done); \
+ /* ending in 0b01 */ \
+ __ bind(&one); \
+ ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
+ __ b(&done); \
+ /* ending in 0b10 */ \
+ __ bind(&two); \
+ ATOMIC_BIN_OP_HALFWORD(bin_inst, 2, extract_result); \
+ __ b(&done); \
+ /* ending in 0b11 */ \
+ __ bind(&three); \
+ ATOMIC_BIN_OP_HALFWORD(bin_inst, 3, extract_result); \
+ __ bind(&done); \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC64_BINOP_WORD(bin_inst, extract_result) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register prev = i.TempRegister(0); \
+ Register new_val = r0; \
+ Register addr = r1; \
+ Register temp = kScratchReg; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ Label done, one; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(4)); \
+ __ b(Condition(2), &one); \
+ /* ending in 0b000 */ \
+ ATOMIC_BIN_OP_WORD(bin_inst, 0, extract_result); \
+ __ b(&done); \
+ __ bind(&one); \
+ /* ending in 0b100 */ \
+ ATOMIC_BIN_OP_WORD(bin_inst, 1, extract_result); \
+ __ bind(&done); \
+ } while (false)
+
+#define ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, offset) \
+ { \
+ __ lg(temp0, MemOperand(addr, offset)); \
+ __ lgr(temp1, temp0); \
+ __ RotateInsertSelectBits(temp0, old_val, Operand(start), \
+ Operand(end), Operand(shift_amount), false); \
+ __ RotateInsertSelectBits(temp1, new_val, Operand(start), \
+ Operand(end), Operand(shift_amount), false); \
+ __ CmpAndSwap64(temp0, temp1, MemOperand(addr, offset)); \
+ __ RotateInsertSelectBits(output, temp0, Operand(start+shift_amount), \
+ Operand(end+shift_amount), Operand(64-shift_amount), true); \
+ }
+
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC64_COMP_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 8 * idx; \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = (7 - idx) * 8; \
+ ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC64_COMP_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 16 * idx; \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = (3 - idx) * 16; \
+ ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#define ATOMIC64_COMP_EXCHANGE_WORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 * idx; \
+ constexpr int end = start + 31; \
+ constexpr int shift_amount = (1 - idx) * 32; \
+ ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 4); \
+ }
+#else
+#define ATOMIC64_COMP_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 + 8 * (3 - idx); \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = idx * 8; \
+ ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC64_COMP_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 + 16 * (1 - idx); \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = idx * 16; \
+ ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#define ATOMIC64_COMP_EXCHANGE_WORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 * (1 - idx); \
+ constexpr int end = start + 31; \
+ constexpr int shift_amount = idx * 32; \
+ ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 4); \
+ }
+#endif
+
+#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_BYTE(load_and_ext) \
+ do { \
+ Register old_val = i.InputRegister(0); \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ Label done, leftmost0, leftmost1, two, three, four, five, seven; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(7)); \
+ __ b(Condition(1), &seven); \
+ __ b(Condition(2), &leftmost1); \
+ __ b(Condition(4), &leftmost0); \
+ /* ending with 0b000 */ \
+ ATOMIC64_COMP_EXCHANGE_BYTE(0); \
+ __ b(&done); \
+ /* ending in 0b001 to 0b011 */ \
+ __ bind(&leftmost0); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(1), &three); \
+ __ b(Condition(2), &two); \
+ ATOMIC64_COMP_EXCHANGE_BYTE(1); \
+ __ b(&done); \
+ /* ending in 0b010 */ \
+ __ bind(&two); \
+ ATOMIC64_COMP_EXCHANGE_BYTE(2); \
+ __ b(&done); \
+ /* ending in 0b011 */ \
+ __ bind(&three); \
+ ATOMIC64_COMP_EXCHANGE_BYTE(3); \
+ __ b(&done); \
+ /* ending in 0b100 to 0b110 */ \
+ __ bind(&leftmost1); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(8), &four); \
+ __ b(Condition(4), &five); \
+ ATOMIC64_COMP_EXCHANGE_BYTE(6); \
+ __ b(&done); \
+ /* ending in 0b100 */ \
+ __ bind(&four); \
+ ATOMIC64_COMP_EXCHANGE_BYTE(4); \
+ __ b(&done); \
+ /* ending in 0b101 */ \
+ __ bind(&five); \
+ ATOMIC64_COMP_EXCHANGE_BYTE(5); \
+ __ b(&done); \
+ /* ending in 0b111 */ \
+ __ bind(&seven); \
+ ATOMIC64_COMP_EXCHANGE_BYTE(7); \
+ __ bind(&done); \
+ __ load_and_ext(output, output); \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_HALFWORD(load_and_ext) \
+ do { \
+ Register old_val = i.InputRegister(0); \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ Label done, one, two, three; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(6)); \
+ __ b(Condition(1), &three); \
+ __ b(Condition(2), &two); \
+ __ b(Condition(4), &one); \
+ /* ending in 0b00 */ \
+ ATOMIC64_COMP_EXCHANGE_HALFWORD(0); \
+ __ b(&done); \
+ /* ending in 0b01 */ \
+ __ bind(&one); \
+ ATOMIC64_COMP_EXCHANGE_HALFWORD(1); \
+ __ b(&done); \
+ /* ending in 0b10 */ \
+ __ bind(&two); \
+ ATOMIC64_COMP_EXCHANGE_HALFWORD(2); \
+ __ b(&done); \
+ /* ending in 0b11 */ \
+ __ bind(&three); \
+ ATOMIC64_COMP_EXCHANGE_HALFWORD(3); \
+ __ bind(&done); \
+ __ load_and_ext(output, output); \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD(load_and_ext) \
+ do { \
+ Register old_val = i.InputRegister(0); \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ Label done, one; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(4)); \
+ __ b(Condition(2), &one); \
+ /* ending in 0b000 */ \
+ ATOMIC64_COMP_EXCHANGE_WORD(0); \
+ __ b(&done); \
+ __ bind(&one); \
+ /* ending in 0b100 */ \
+ ATOMIC64_COMP_EXCHANGE_WORD(1); \
+ __ bind(&done); \
+ __ load_and_ext(output, output); \
+ } while (false)
+
+#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
+ do { \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
@@ -2744,6 +3098,260 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicXorWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lax);
break;
+ case kS390_Word64AtomicLoadUint8:
+ __ LoadlB(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kS390_Word64AtomicLoadUint16:
+ __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kS390_Word64AtomicLoadUint32:
+ __ LoadlW(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kS390_Word64AtomicLoadUint64:
+ __ lg(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kS390_Word64AtomicStoreUint8:
+ __ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
+ break;
+ case kS390_Word64AtomicStoreUint16:
+ __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
+ break;
+ case kS390_Word64AtomicStoreUint32:
+ __ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
+ break;
+ case kS390_Word64AtomicStoreUint64:
+ __ stg(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
+ break;
+#define ATOMIC64_BINOP_CASE(op, inst) \
+ case kS390_Word64Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC64_BINOP_BYTE(inst, [&]() { \
+ int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
+ __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
+ Operand(static_cast<intptr_t>(rotate_left)), \
+ true); \
+ }); \
+ break; \
+ case kS390_Word64Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC64_BINOP_HALFWORD(inst, [&]() { \
+ int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
+ __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
+ Operand(static_cast<intptr_t>(rotate_left)), \
+ true); \
+ }); \
+ break; \
+ case kS390_Word64Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC64_BINOP_WORD(inst, [&]() { \
+ int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
+ __ RotateInsertSelectBits(result, prev, Operand(32), Operand(63), \
+ Operand(static_cast<intptr_t>(rotate_left)), \
+ true); \
+ }); \
+ break;
+ ATOMIC64_BINOP_CASE(Add, AddP)
+ ATOMIC64_BINOP_CASE(Sub, SubP)
+ ATOMIC64_BINOP_CASE(And, AndP)
+ ATOMIC64_BINOP_CASE(Or, OrP)
+ ATOMIC64_BINOP_CASE(Xor, XorP)
+#undef ATOMIC64_BINOP_CASE
+ case kS390_Word64AtomicAddUint64:
+ ASSEMBLE_ATOMIC_BINOP_WORD64(laag);
+ break;
+ case kS390_Word64AtomicSubUint64:
+ ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64);
+ break;
+ case kS390_Word64AtomicAndUint64:
+ ASSEMBLE_ATOMIC_BINOP_WORD64(lang);
+ break;
+ case kS390_Word64AtomicOrUint64:
+ ASSEMBLE_ATOMIC_BINOP_WORD64(laog);
+ break;
+ case kS390_Word64AtomicXorUint64:
+ ASSEMBLE_ATOMIC_BINOP_WORD64(laxg);
+ break;
+#define ATOMIC64_EXCHANGE(start, end, shift_amount, offset) \
+ { \
+ Label do_cs; \
+ __ lg(output, MemOperand(r1, offset)); \
+ __ bind(&do_cs); \
+ __ lgr(r0, output); \
+ __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
+ Operand(shift_amount), false); \
+ __ csg(output, r0, MemOperand(r1, offset)); \
+ __ bne(&do_cs, Label::kNear); \
+ __ srlg(output, output, Operand(shift_amount)); \
+ }
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC64_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 8 * idx; \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = (7 - idx) * 8; \
+ ATOMIC64_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC64_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 16 * idx; \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = (3 - idx) * 16; \
+ ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#define ATOMIC64_EXCHANGE_WORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 * idx; \
+ constexpr int end = start + 31; \
+ constexpr int shift_amount = (1 - idx) * 32; \
+ ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 4); \
+ }
+#else
+#define ATOMIC64_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 + 8 * (3 - idx); \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = idx * 8; \
+ ATOMIC64_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC64_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 + 16 * (1 - idx); \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = idx * 16; \
+ ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#define ATOMIC64_EXCHANGE_WORD(i) \
+ { \
+ constexpr int idx = (i); \
+ constexpr int start = 32 * (1 - idx); \
+ constexpr int end = start + 31; \
+ constexpr int shift_amount = idx * 32; \
+ ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 4); \
+ }
+#endif // V8_TARGET_BIG_ENDIAN
+ case kS390_Word64AtomicExchangeUint8: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label done, leftmost0, leftmost1, two, three, four, five, seven;
+ __ la(r1, MemOperand(base, index));
+ __ tmll(r1, Operand(7));
+ __ b(Condition(1), &seven);
+ __ b(Condition(2), &leftmost1);
+ __ b(Condition(4), &leftmost0);
+ /* ending with 0b000 */
+ ATOMIC64_EXCHANGE_BYTE(0);
+ __ b(&done);
+ /* ending in 0b001 to 0b011 */
+ __ bind(&leftmost0);
+ __ tmll(r1, Operand(3));
+ __ b(Condition(1), &three);
+ __ b(Condition(2), &two);
+ ATOMIC64_EXCHANGE_BYTE(1);
+ __ b(&done);
+ /* ending in 0b010 */
+ __ bind(&two);
+ ATOMIC64_EXCHANGE_BYTE(2);
+ __ b(&done);
+ /* ending in 0b011 */
+ __ bind(&three);
+ ATOMIC64_EXCHANGE_BYTE(3);
+ __ b(&done);
+ /* ending in 0b100 to 0b110 */
+ __ bind(&leftmost1);
+ __ tmll(r1, Operand(3));
+ __ b(Condition(8), &four);
+ __ b(Condition(4), &five);
+ ATOMIC64_EXCHANGE_BYTE(6);
+ __ b(&done);
+ /* ending in 0b100 */
+ __ bind(&four);
+ ATOMIC64_EXCHANGE_BYTE(4);
+ __ b(&done);
+ /* ending in 0b101 */
+ __ bind(&five);
+ ATOMIC64_EXCHANGE_BYTE(5);
+ __ b(&done);
+ /* ending in 0b111 */
+ __ bind(&seven);
+ ATOMIC64_EXCHANGE_BYTE(7);
+ __ bind(&done);
+ break;
+ }
+ case kS390_Word64AtomicExchangeUint16: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label done, one, two, three;
+ __ la(r1, MemOperand(base, index));
+ __ tmll(r1, Operand(6));
+ __ b(Condition(1), &three);
+ __ b(Condition(2), &two);
+ __ b(Condition(4), &one);
+ /* ending in 0b00 */
+ ATOMIC64_EXCHANGE_HALFWORD(0);
+ __ b(&done);
+ /* ending in 0b01 */
+ __ bind(&one);
+ ATOMIC64_EXCHANGE_HALFWORD(1);
+ __ b(&done);
+ /* ending in 0b10 */
+ __ bind(&two);
+ ATOMIC64_EXCHANGE_HALFWORD(2);
+ __ b(&done);
+ /* ending in 0b11 */
+ __ bind(&three);
+ ATOMIC64_EXCHANGE_HALFWORD(3);
+ __ bind(&done);
+ break;
+ }
+ case kS390_Word64AtomicExchangeUint32: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label done, one;
+ __ la(r1, MemOperand(base, index));
+ __ tmll(r1, Operand(4));
+ __ b(Condition(2), &one);
+ /* ending in 0b0 */
+ ATOMIC64_EXCHANGE_WORD(0);
+ __ b(&done);
+ __ bind(&one);
+ /* ending in 0b1 */
+ ATOMIC64_EXCHANGE_WORD(1);
+ __ bind(&done);
+ break;
+ }
+ case kS390_Word64AtomicExchangeUint64: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label do_cs;
+ __ la(r1, MemOperand(base, index));
+ __ lg(output, MemOperand(r1));
+ __ csg(output, value, MemOperand(r1));
+ __ bind(&do_cs);
+ __ bne(&do_cs, Label::kNear);
+ break;
+ }
+ case kS390_Word64AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC64_COMP_EXCHANGE_BYTE(LoadlB);
+ break;
+ case kS390_Word64AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC64_COMP_EXCHANGE_HALFWORD(LoadLogicalHalfWordP);
+ break;
+ case kS390_Word64AtomicCompareExchangeUint32:
+ ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD(LoadlW);
+ break;
+ case kS390_Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
+ break;
default:
UNREACHABLE();
break;
@@ -2776,7 +3384,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
+ condition == kOverflow || condition == kNotOverflow) {
return;
}
@@ -3170,9 +3779,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
break;
+ case Constant::kDelayedStringConstant:
+ __ mov(dst, Operand::EmbeddedStringConstant(
+ src.ToDelayedStringConstant()));
+ break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index b5296f63d0..9a704f9bef 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -11,157 +11,193 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(S390_Abs32) \
- V(S390_Abs64) \
- V(S390_And32) \
- V(S390_And64) \
- V(S390_Or32) \
- V(S390_Or64) \
- V(S390_Xor32) \
- V(S390_Xor64) \
- V(S390_ShiftLeft32) \
- V(S390_ShiftLeft64) \
- V(S390_ShiftLeftPair) \
- V(S390_ShiftRight32) \
- V(S390_ShiftRight64) \
- V(S390_ShiftRightPair) \
- V(S390_ShiftRightArith32) \
- V(S390_ShiftRightArith64) \
- V(S390_ShiftRightArithPair) \
- V(S390_RotRight32) \
- V(S390_RotRight64) \
- V(S390_Not32) \
- V(S390_Not64) \
- V(S390_RotLeftAndClear64) \
- V(S390_RotLeftAndClearLeft64) \
- V(S390_RotLeftAndClearRight64) \
- V(S390_Lay) \
- V(S390_Add32) \
- V(S390_Add64) \
- V(S390_AddPair) \
- V(S390_AddFloat) \
- V(S390_AddDouble) \
- V(S390_Sub32) \
- V(S390_Sub64) \
- V(S390_SubFloat) \
- V(S390_SubDouble) \
- V(S390_SubPair) \
- V(S390_MulPair) \
- V(S390_Mul32) \
- V(S390_Mul32WithOverflow) \
- V(S390_Mul64) \
- V(S390_MulHigh32) \
- V(S390_MulHighU32) \
- V(S390_MulFloat) \
- V(S390_MulDouble) \
- V(S390_Div32) \
- V(S390_Div64) \
- V(S390_DivU32) \
- V(S390_DivU64) \
- V(S390_DivFloat) \
- V(S390_DivDouble) \
- V(S390_Mod32) \
- V(S390_Mod64) \
- V(S390_ModU32) \
- V(S390_ModU64) \
- V(S390_ModDouble) \
- V(S390_Neg32) \
- V(S390_Neg64) \
- V(S390_NegDouble) \
- V(S390_NegFloat) \
- V(S390_SqrtFloat) \
- V(S390_FloorFloat) \
- V(S390_CeilFloat) \
- V(S390_TruncateFloat) \
- V(S390_AbsFloat) \
- V(S390_SqrtDouble) \
- V(S390_FloorDouble) \
- V(S390_CeilDouble) \
- V(S390_TruncateDouble) \
- V(S390_RoundDouble) \
- V(S390_MaxFloat) \
- V(S390_MaxDouble) \
- V(S390_MinFloat) \
- V(S390_MinDouble) \
- V(S390_AbsDouble) \
- V(S390_Cntlz32) \
- V(S390_Cntlz64) \
- V(S390_Popcnt32) \
- V(S390_Popcnt64) \
- V(S390_Cmp32) \
- V(S390_Cmp64) \
- V(S390_CmpFloat) \
- V(S390_CmpDouble) \
- V(S390_Tst32) \
- V(S390_Tst64) \
- V(S390_Push) \
- V(S390_PushFrame) \
- V(S390_StackClaim) \
- V(S390_StoreToStackSlot) \
- V(S390_SignExtendWord8ToInt32) \
- V(S390_SignExtendWord16ToInt32) \
- V(S390_SignExtendWord8ToInt64) \
- V(S390_SignExtendWord16ToInt64) \
- V(S390_SignExtendWord32ToInt64) \
- V(S390_Uint32ToUint64) \
- V(S390_Int64ToInt32) \
- V(S390_Int64ToFloat32) \
- V(S390_Int64ToDouble) \
- V(S390_Uint64ToFloat32) \
- V(S390_Uint64ToDouble) \
- V(S390_Int32ToFloat32) \
- V(S390_Int32ToDouble) \
- V(S390_Uint32ToFloat32) \
- V(S390_Uint32ToDouble) \
- V(S390_Float32ToInt64) \
- V(S390_Float32ToUint64) \
- V(S390_Float32ToInt32) \
- V(S390_Float32ToUint32) \
- V(S390_Float32ToDouble) \
- V(S390_Float64SilenceNaN) \
- V(S390_DoubleToInt32) \
- V(S390_DoubleToUint32) \
- V(S390_DoubleToInt64) \
- V(S390_DoubleToUint64) \
- V(S390_DoubleToFloat32) \
- V(S390_DoubleExtractLowWord32) \
- V(S390_DoubleExtractHighWord32) \
- V(S390_DoubleInsertLowWord32) \
- V(S390_DoubleInsertHighWord32) \
- V(S390_DoubleConstruct) \
- V(S390_BitcastInt32ToFloat32) \
- V(S390_BitcastFloat32ToInt32) \
- V(S390_BitcastInt64ToDouble) \
- V(S390_BitcastDoubleToInt64) \
- V(S390_LoadWordS8) \
- V(S390_LoadWordU8) \
- V(S390_LoadWordS16) \
- V(S390_LoadWordU16) \
- V(S390_LoadWordS32) \
- V(S390_LoadWordU32) \
- V(S390_LoadAndTestWord32) \
- V(S390_LoadAndTestWord64) \
- V(S390_LoadAndTestFloat32) \
- V(S390_LoadAndTestFloat64) \
- V(S390_LoadReverse16RR) \
- V(S390_LoadReverse32RR) \
- V(S390_LoadReverse64RR) \
- V(S390_LoadReverse16) \
- V(S390_LoadReverse32) \
- V(S390_LoadReverse64) \
- V(S390_LoadWord64) \
- V(S390_LoadFloat32) \
- V(S390_LoadDouble) \
- V(S390_StoreWord8) \
- V(S390_StoreWord16) \
- V(S390_StoreWord32) \
- V(S390_StoreWord64) \
- V(S390_StoreReverse16) \
- V(S390_StoreReverse32) \
- V(S390_StoreReverse64) \
- V(S390_StoreFloat32) \
- V(S390_StoreDouble)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(S390_Abs32) \
+ V(S390_Abs64) \
+ V(S390_And32) \
+ V(S390_And64) \
+ V(S390_Or32) \
+ V(S390_Or64) \
+ V(S390_Xor32) \
+ V(S390_Xor64) \
+ V(S390_ShiftLeft32) \
+ V(S390_ShiftLeft64) \
+ V(S390_ShiftLeftPair) \
+ V(S390_ShiftRight32) \
+ V(S390_ShiftRight64) \
+ V(S390_ShiftRightPair) \
+ V(S390_ShiftRightArith32) \
+ V(S390_ShiftRightArith64) \
+ V(S390_ShiftRightArithPair) \
+ V(S390_RotRight32) \
+ V(S390_RotRight64) \
+ V(S390_Not32) \
+ V(S390_Not64) \
+ V(S390_RotLeftAndClear64) \
+ V(S390_RotLeftAndClearLeft64) \
+ V(S390_RotLeftAndClearRight64) \
+ V(S390_Lay) \
+ V(S390_Add32) \
+ V(S390_Add64) \
+ V(S390_AddPair) \
+ V(S390_AddFloat) \
+ V(S390_AddDouble) \
+ V(S390_Sub32) \
+ V(S390_Sub64) \
+ V(S390_SubFloat) \
+ V(S390_SubDouble) \
+ V(S390_SubPair) \
+ V(S390_MulPair) \
+ V(S390_Mul32) \
+ V(S390_Mul32WithOverflow) \
+ V(S390_Mul64) \
+ V(S390_MulHigh32) \
+ V(S390_MulHighU32) \
+ V(S390_MulFloat) \
+ V(S390_MulDouble) \
+ V(S390_Div32) \
+ V(S390_Div64) \
+ V(S390_DivU32) \
+ V(S390_DivU64) \
+ V(S390_DivFloat) \
+ V(S390_DivDouble) \
+ V(S390_Mod32) \
+ V(S390_Mod64) \
+ V(S390_ModU32) \
+ V(S390_ModU64) \
+ V(S390_ModDouble) \
+ V(S390_Neg32) \
+ V(S390_Neg64) \
+ V(S390_NegDouble) \
+ V(S390_NegFloat) \
+ V(S390_SqrtFloat) \
+ V(S390_FloorFloat) \
+ V(S390_CeilFloat) \
+ V(S390_TruncateFloat) \
+ V(S390_AbsFloat) \
+ V(S390_SqrtDouble) \
+ V(S390_FloorDouble) \
+ V(S390_CeilDouble) \
+ V(S390_TruncateDouble) \
+ V(S390_RoundDouble) \
+ V(S390_MaxFloat) \
+ V(S390_MaxDouble) \
+ V(S390_MinFloat) \
+ V(S390_MinDouble) \
+ V(S390_AbsDouble) \
+ V(S390_Cntlz32) \
+ V(S390_Cntlz64) \
+ V(S390_Popcnt32) \
+ V(S390_Popcnt64) \
+ V(S390_Cmp32) \
+ V(S390_Cmp64) \
+ V(S390_CmpFloat) \
+ V(S390_CmpDouble) \
+ V(S390_Tst32) \
+ V(S390_Tst64) \
+ V(S390_Push) \
+ V(S390_PushFrame) \
+ V(S390_StackClaim) \
+ V(S390_StoreToStackSlot) \
+ V(S390_SignExtendWord8ToInt32) \
+ V(S390_SignExtendWord16ToInt32) \
+ V(S390_SignExtendWord8ToInt64) \
+ V(S390_SignExtendWord16ToInt64) \
+ V(S390_SignExtendWord32ToInt64) \
+ V(S390_Uint32ToUint64) \
+ V(S390_Int64ToInt32) \
+ V(S390_Int64ToFloat32) \
+ V(S390_Int64ToDouble) \
+ V(S390_Uint64ToFloat32) \
+ V(S390_Uint64ToDouble) \
+ V(S390_Int32ToFloat32) \
+ V(S390_Int32ToDouble) \
+ V(S390_Uint32ToFloat32) \
+ V(S390_Uint32ToDouble) \
+ V(S390_Float32ToInt64) \
+ V(S390_Float32ToUint64) \
+ V(S390_Float32ToInt32) \
+ V(S390_Float32ToUint32) \
+ V(S390_Float32ToDouble) \
+ V(S390_Float64SilenceNaN) \
+ V(S390_DoubleToInt32) \
+ V(S390_DoubleToUint32) \
+ V(S390_DoubleToInt64) \
+ V(S390_DoubleToUint64) \
+ V(S390_DoubleToFloat32) \
+ V(S390_DoubleExtractLowWord32) \
+ V(S390_DoubleExtractHighWord32) \
+ V(S390_DoubleInsertLowWord32) \
+ V(S390_DoubleInsertHighWord32) \
+ V(S390_DoubleConstruct) \
+ V(S390_BitcastInt32ToFloat32) \
+ V(S390_BitcastFloat32ToInt32) \
+ V(S390_BitcastInt64ToDouble) \
+ V(S390_BitcastDoubleToInt64) \
+ V(S390_LoadWordS8) \
+ V(S390_LoadWordU8) \
+ V(S390_LoadWordS16) \
+ V(S390_LoadWordU16) \
+ V(S390_LoadWordS32) \
+ V(S390_LoadWordU32) \
+ V(S390_LoadAndTestWord32) \
+ V(S390_LoadAndTestWord64) \
+ V(S390_LoadAndTestFloat32) \
+ V(S390_LoadAndTestFloat64) \
+ V(S390_LoadReverse16RR) \
+ V(S390_LoadReverse32RR) \
+ V(S390_LoadReverse64RR) \
+ V(S390_LoadReverse16) \
+ V(S390_LoadReverse32) \
+ V(S390_LoadReverse64) \
+ V(S390_LoadWord64) \
+ V(S390_LoadFloat32) \
+ V(S390_LoadDouble) \
+ V(S390_StoreWord8) \
+ V(S390_StoreWord16) \
+ V(S390_StoreWord32) \
+ V(S390_StoreWord64) \
+ V(S390_StoreReverse16) \
+ V(S390_StoreReverse32) \
+ V(S390_StoreReverse64) \
+ V(S390_StoreFloat32) \
+ V(S390_StoreDouble) \
+ V(S390_Word64AtomicLoadUint8) \
+ V(S390_Word64AtomicLoadUint16) \
+ V(S390_Word64AtomicLoadUint32) \
+ V(S390_Word64AtomicLoadUint64) \
+ V(S390_Word64AtomicStoreUint8) \
+ V(S390_Word64AtomicStoreUint16) \
+ V(S390_Word64AtomicStoreUint32) \
+ V(S390_Word64AtomicStoreUint64) \
+ V(S390_Word64AtomicExchangeUint8) \
+ V(S390_Word64AtomicExchangeUint16) \
+ V(S390_Word64AtomicExchangeUint32) \
+ V(S390_Word64AtomicExchangeUint64) \
+ V(S390_Word64AtomicCompareExchangeUint8) \
+ V(S390_Word64AtomicCompareExchangeUint16) \
+ V(S390_Word64AtomicCompareExchangeUint32) \
+ V(S390_Word64AtomicCompareExchangeUint64) \
+ V(S390_Word64AtomicAddUint8) \
+ V(S390_Word64AtomicAddUint16) \
+ V(S390_Word64AtomicAddUint32) \
+ V(S390_Word64AtomicAddUint64) \
+ V(S390_Word64AtomicSubUint8) \
+ V(S390_Word64AtomicSubUint16) \
+ V(S390_Word64AtomicSubUint32) \
+ V(S390_Word64AtomicSubUint64) \
+ V(S390_Word64AtomicAndUint8) \
+ V(S390_Word64AtomicAndUint16) \
+ V(S390_Word64AtomicAndUint32) \
+ V(S390_Word64AtomicAndUint64) \
+ V(S390_Word64AtomicOrUint8) \
+ V(S390_Word64AtomicOrUint16) \
+ V(S390_Word64AtomicOrUint32) \
+ V(S390_Word64AtomicOrUint64) \
+ V(S390_Word64AtomicXorUint8) \
+ V(S390_Word64AtomicXorUint16) \
+ V(S390_Word64AtomicXorUint32) \
+ V(S390_Word64AtomicXorUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index fd388a219a..fbd81e17c4 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -169,6 +169,46 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_StackClaim:
return kHasSideEffect;
+ case kS390_Word64AtomicLoadUint8:
+ case kS390_Word64AtomicLoadUint16:
+ case kS390_Word64AtomicLoadUint32:
+ case kS390_Word64AtomicLoadUint64:
+ return kIsLoadOperation;
+
+ case kS390_Word64AtomicStoreUint8:
+ case kS390_Word64AtomicStoreUint16:
+ case kS390_Word64AtomicStoreUint32:
+ case kS390_Word64AtomicStoreUint64:
+ case kS390_Word64AtomicExchangeUint8:
+ case kS390_Word64AtomicExchangeUint16:
+ case kS390_Word64AtomicExchangeUint32:
+ case kS390_Word64AtomicExchangeUint64:
+ case kS390_Word64AtomicCompareExchangeUint8:
+ case kS390_Word64AtomicCompareExchangeUint16:
+ case kS390_Word64AtomicCompareExchangeUint32:
+ case kS390_Word64AtomicCompareExchangeUint64:
+ case kS390_Word64AtomicAddUint8:
+ case kS390_Word64AtomicAddUint16:
+ case kS390_Word64AtomicAddUint32:
+ case kS390_Word64AtomicAddUint64:
+ case kS390_Word64AtomicSubUint8:
+ case kS390_Word64AtomicSubUint16:
+ case kS390_Word64AtomicSubUint32:
+ case kS390_Word64AtomicSubUint64:
+ case kS390_Word64AtomicAndUint8:
+ case kS390_Word64AtomicAndUint16:
+ case kS390_Word64AtomicAndUint32:
+ case kS390_Word64AtomicAndUint64:
+ case kS390_Word64AtomicOrUint8:
+ case kS390_Word64AtomicOrUint16:
+ case kS390_Word64AtomicOrUint32:
+ case kS390_Word64AtomicOrUint64:
+ case kS390_Word64AtomicXorUint8:
+ case kS390_Word64AtomicXorUint16:
+ case kS390_Word64AtomicXorUint32:
+ case kS390_Word64AtomicXorUint64:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 8174551777..018c288939 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -1409,8 +1409,11 @@ static inline bool TryMatchDoubleConstructFromInsert(
FLOAT_UNARY_OP_LIST_32(V) \
V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
null) \
+ V(Float64, ChangeFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
+ null) \
V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
OperandMode::kNone, null)
+
#define WORD32_UNARY_OP_LIST(V) \
WORD32_UNARY_OP_LIST_32(V) \
V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
@@ -1489,6 +1492,8 @@ static inline bool TryMatchDoubleConstructFromInsert(
null) \
V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
null) \
+ V(Word64, ChangeInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
+ null) \
V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
null) \
V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
@@ -2157,6 +2162,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
+ if (input.node == nullptr) continue;
Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
@@ -2250,11 +2256,26 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
inputs);
}
-void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- S390OperandGenerator g(this);
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
@@ -2271,42 +2292,34 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
UNREACHABLE();
return;
}
-
- AddressingMode addressing_mode = kMode_MRR;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs);
+ VisitAtomicExchange(this, node, opcode);
}
-void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- S390OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* old_value = node->InputAt(2);
- Node* new_value = node->InputAt(3);
-
- MachineType type = AtomicOpType(node->op());
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
- } else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
- } else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kS390_Word64AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
- } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kS390_Word64AtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kS390_Word64AtomicExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
return;
}
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ S390OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
InstructionOperand inputs[4];
size_t input_count = 0;
@@ -2328,34 +2341,53 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
outputs[output_count++] = g.DefineSameAsFirst(node);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, output_count, outputs, input_count, inputs);
+ selector->Emit(code, output_count, outputs, input_count, inputs);
}
-void InstructionSelector::VisitWord32AtomicBinaryOperation(
- Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
- ArchOpcode uint16_op, ArchOpcode word32_op) {
- S390OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
-
if (type == MachineType::Int8()) {
- opcode = int8_op;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = uint8_op;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = int16_op;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = uint16_op;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = word32_op;
+ opcode = kWord32AtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ MachineType type = AtomicOpType(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint8()) {
+ opcode = kS390_Word64AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kS390_Word64AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kS390_Word64AtomicCompareExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
return;
}
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ S390OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -2381,7 +2413,31 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
temps[temp_count++] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, output_count, outputs, input_count, inputs, temp_count, temps);
+ selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
+ temps);
+}
+
+void InstructionSelector::VisitWord32AtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ MachineType type = AtomicOpType(node->op());
+ ArchOpcode opcode = kArchNop;
+
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicBinop(this, node, opcode);
}
#define VISIT_ATOMIC_BINOP(op) \
@@ -2398,6 +2454,101 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode word32_op,
+ ArchOpcode word64_op) {
+ MachineType type = AtomicOpType(node->op());
+ ArchOpcode opcode = kArchNop;
+
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = word64_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicBinop(this, node, opcode);
+}
+
+#define VISIT_ATOMIC64_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kS390_Word64Atomic##op##Uint8, kS390_Word64Atomic##op##Uint16, \
+ kS390_Word64Atomic##op##Uint32, kS390_Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC64_BINOP(Add)
+VISIT_ATOMIC64_BINOP(Sub)
+VISIT_ATOMIC64_BINOP(And)
+VISIT_ATOMIC64_BINOP(Or)
+VISIT_ATOMIC64_BINOP(Xor)
+#undef VISIT_ATOMIC64_BINOP
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = kS390_Word64AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kS390_Word64AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kS390_Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kS390_Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kS390_Word64AtomicStoreUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kS390_Word64AtomicStoreUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kS390_Word64AtomicStoreUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kS390_Word64AtomicStoreUint64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
+ inputs);
+}
+
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
@@ -2598,6 +2749,82 @@ void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 01034ffb73..7632d3cc8c 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -42,48 +42,36 @@ bool BasicBlock::LoopContains(BasicBlock* block) const {
block->rpo_number_ < loop_end_->rpo_number_;
}
-
void BasicBlock::AddSuccessor(BasicBlock* successor) {
successors_.push_back(successor);
}
-
void BasicBlock::AddPredecessor(BasicBlock* predecessor) {
predecessors_.push_back(predecessor);
}
-
void BasicBlock::AddNode(Node* node) { nodes_.push_back(node); }
-
-void BasicBlock::set_control(Control control) {
- control_ = control;
-}
-
+void BasicBlock::set_control(Control control) { control_ = control; }
void BasicBlock::set_control_input(Node* control_input) {
control_input_ = control_input;
}
-
void BasicBlock::set_loop_depth(int32_t loop_depth) {
loop_depth_ = loop_depth;
}
-
void BasicBlock::set_rpo_number(int32_t rpo_number) {
rpo_number_ = rpo_number;
}
-
void BasicBlock::set_loop_end(BasicBlock* loop_end) { loop_end_ = loop_end; }
-
void BasicBlock::set_loop_header(BasicBlock* loop_header) {
loop_header_ = loop_header;
}
-
// static
BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
while (b1 != b2) {
@@ -141,12 +129,10 @@ std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
UNREACHABLE();
}
-
std::ostream& operator<<(std::ostream& os, const BasicBlock::Id& id) {
return os << id.ToSize();
}
-
Schedule::Schedule(Zone* zone, size_t node_count_hint)
: zone_(zone),
all_blocks_(zone),
@@ -157,7 +143,6 @@ Schedule::Schedule(Zone* zone, size_t node_count_hint)
nodeid_to_block_.reserve(node_count_hint);
}
-
BasicBlock* Schedule::block(Node* node) const {
if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
return nodeid_to_block_[node->id()];
@@ -165,25 +150,21 @@ BasicBlock* Schedule::block(Node* node) const {
return nullptr;
}
-
bool Schedule::IsScheduled(Node* node) {
if (node->id() >= nodeid_to_block_.size()) return false;
return nodeid_to_block_[node->id()] != nullptr;
}
-
BasicBlock* Schedule::GetBlockById(BasicBlock::Id block_id) {
DCHECK(block_id.ToSize() < all_blocks_.size());
return all_blocks_[block_id.ToSize()];
}
-
bool Schedule::SameBasicBlock(Node* a, Node* b) const {
BasicBlock* block = this->block(a);
return block != nullptr && block == this->block(b);
}
-
BasicBlock* Schedule::NewBasicBlock() {
BasicBlock* block = new (zone_)
BasicBlock(zone_, BasicBlock::Id::FromSize(all_blocks_.size()));
@@ -191,7 +172,6 @@ BasicBlock* Schedule::NewBasicBlock() {
return block;
}
-
void Schedule::PlanNode(BasicBlock* block, Node* node) {
if (FLAG_trace_turbo_scheduler) {
StdoutStream{} << "Planning #" << node->id() << ":"
@@ -202,7 +182,6 @@ void Schedule::PlanNode(BasicBlock* block, Node* node) {
SetBlockForNode(block, node);
}
-
void Schedule::AddNode(BasicBlock* block, Node* node) {
if (FLAG_trace_turbo_scheduler) {
StdoutStream{} << "Adding #" << node->id() << ":" << node->op()->mnemonic()
@@ -213,7 +192,6 @@ void Schedule::AddNode(BasicBlock* block, Node* node) {
SetBlockForNode(block, node);
}
-
void Schedule::AddGoto(BasicBlock* block, BasicBlock* succ) {
DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kGoto);
@@ -249,7 +227,6 @@ void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
SetControlInput(block, call);
}
-
void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock) {
DCHECK_EQ(BasicBlock::kNone, block->control());
@@ -260,7 +237,6 @@ void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
SetControlInput(block, branch);
}
-
void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
size_t succ_count) {
DCHECK_EQ(BasicBlock::kNone, block->control());
@@ -272,7 +248,6 @@ void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
SetControlInput(block, sw);
}
-
void Schedule::AddTailCall(BasicBlock* block, Node* input) {
DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kTailCall);
@@ -280,7 +255,6 @@ void Schedule::AddTailCall(BasicBlock* block, Node* input) {
if (block != end()) AddSuccessor(block, end());
}
-
void Schedule::AddReturn(BasicBlock* block, Node* input) {
DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kReturn);
@@ -288,7 +262,6 @@ void Schedule::AddReturn(BasicBlock* block, Node* input) {
if (block != end()) AddSuccessor(block, end());
}
-
void Schedule::AddDeoptimize(BasicBlock* block, Node* input) {
DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kDeoptimize);
@@ -296,7 +269,6 @@ void Schedule::AddDeoptimize(BasicBlock* block, Node* input) {
if (block != end()) AddSuccessor(block, end());
}
-
void Schedule::AddThrow(BasicBlock* block, Node* input) {
DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kThrow);
@@ -304,7 +276,6 @@ void Schedule::AddThrow(BasicBlock* block, Node* input) {
if (block != end()) AddSuccessor(block, end());
}
-
void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock) {
DCHECK_NE(BasicBlock::kNone, block->control());
@@ -320,7 +291,6 @@ void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
SetControlInput(block, branch);
}
-
void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
BasicBlock** succ_blocks, size_t succ_count) {
DCHECK_NE(BasicBlock::kNone, block->control());
@@ -343,7 +313,7 @@ void Schedule::EnsureCFGWellFormedness() {
BasicBlockVector all_blocks_copy(all_blocks_);
// Insert missing split edge blocks.
- for (auto block : all_blocks_copy) {
+ for (BasicBlock* block : all_blocks_copy) {
if (block->PredecessorCount() > 1) {
if (block != end_) {
EnsureSplitEdgeForm(block);
@@ -351,24 +321,42 @@ void Schedule::EnsureCFGWellFormedness() {
if (block->deferred()) {
EnsureDeferredCodeSingleEntryPoint(block);
}
- } else {
- EliminateNoopPhiNodes(block);
}
}
+
+ EliminateRedundantPhiNodes();
}
-void Schedule::EliminateNoopPhiNodes(BasicBlock* block) {
- // Ensure that useless phi nodes in blocks that only have a single predecessor
+void Schedule::EliminateRedundantPhiNodes() {
+ // Ensure that useless phi nodes that only have a single input, identical
+ // inputs, or are a self-referential loop phi,
// -- which can happen with the automatically generated code in the CSA and
// torque -- are pruned.
- if (block->PredecessorCount() == 1) {
- for (size_t i = 0; i < block->NodeCount();) {
- Node* node = block->NodeAt(i);
- if (node->opcode() == IrOpcode::kPhi) {
- node->ReplaceUses(node->InputAt(0));
- block->RemoveNode(block->begin() + i);
- } else {
- ++i;
+ // Since we have strucured control flow, this is enough to minimize the number
+ // of phi nodes.
+ bool reached_fixed_point = false;
+ while (!reached_fixed_point) {
+ reached_fixed_point = true;
+ for (BasicBlock* block : all_blocks_) {
+ int predecessor_count = static_cast<int>(block->PredecessorCount());
+ for (size_t node_pos = 0; node_pos < block->NodeCount(); ++node_pos) {
+ Node* node = block->NodeAt(node_pos);
+ if (node->opcode() == IrOpcode::kPhi) {
+ Node* first_input = node->InputAt(0);
+ bool inputs_equal = true;
+ for (int i = 1; i < predecessor_count; ++i) {
+ Node* input = node->InputAt(i);
+ if (input != first_input && input != node) {
+ inputs_equal = false;
+ break;
+ }
+ }
+ if (!inputs_equal) continue;
+ node->ReplaceUses(first_input);
+ block->RemoveNode(block->begin() + node_pos);
+ --node_pos;
+ reached_fixed_point = false;
+ }
}
}
}
@@ -481,7 +469,6 @@ void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
succ->AddPredecessor(block);
}
-
void Schedule::MoveSuccessors(BasicBlock* from, BasicBlock* to) {
for (BasicBlock* const successor : from->successors()) {
to->AddSuccessor(successor);
@@ -492,13 +479,11 @@ void Schedule::MoveSuccessors(BasicBlock* from, BasicBlock* to) {
from->ClearSuccessors();
}
-
void Schedule::SetControlInput(BasicBlock* block, Node* node) {
block->set_control_input(node);
SetBlockForNode(block, node);
}
-
void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
if (node->id() >= nodeid_to_block_.size()) {
nodeid_to_block_.resize(node->id() + 1);
@@ -506,7 +491,6 @@ void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
nodeid_to_block_[node->id()] = block;
}
-
std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock* block :
((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) {
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 74e51c5341..8d2079787e 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -166,12 +166,12 @@ class V8_EXPORT_PRIVATE BasicBlock final
BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
// nullptr if none. For loop headers, this points to
// enclosing loop header.
- BasicBlock* loop_end_; // end of the loop, if this block is a loop header.
- int32_t loop_depth_; // loop nesting, 0 is top-level
+ BasicBlock* loop_end_; // end of the loop, if this block is a loop header.
+ int32_t loop_depth_; // loop nesting, 0 is top-level
- Control control_; // Control at the end of the block.
- Node* control_input_; // Input value for control.
- NodeVector nodes_; // nodes of this block in forward order.
+ Control control_; // Control at the end of the block.
+ Node* control_input_; // Input value for control.
+ NodeVector nodes_; // nodes of this block in forward order.
BasicBlockVector successors_;
BasicBlockVector predecessors_;
@@ -187,7 +187,6 @@ std::ostream& operator<<(std::ostream&, const BasicBlock&);
std::ostream& operator<<(std::ostream&, const BasicBlock::Control&);
std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
-
// A schedule represents the result of assigning nodes to basic blocks
// and ordering them within basic blocks. Prior to computing a schedule,
// a graph has no notion of control flow ordering other than that induced
@@ -272,12 +271,13 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
friend class BasicBlockInstrumentor;
friend class RawMachineAssembler;
- // Ensure properties of the CFG assumed by further stages.
+ // For CSA/Torque: Ensure properties of the CFG assumed by further stages.
void EnsureCFGWellFormedness();
- // Eliminates no-op phi nodes added for blocks that only have a single
- // predecessor. This ensures the property required for SSA deconstruction that
- // the target block of a control flow split has no phis.
- void EliminateNoopPhiNodes(BasicBlock* block);
+ // For CSA/Torque: Eliminates unnecessary phi nodes, including phis with a
+ // single input. The latter is necessary to ensure the property required for
+ // SSA deconstruction that the target block of a control flow split has no
+ // phis.
+ void EliminateRedundantPhiNodes();
// Ensure split-edge form for a hand-assembled schedule.
void EnsureSplitEdgeForm(BasicBlock* block);
// Ensure entry into a deferred block happens from a single hot block.
@@ -294,9 +294,9 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
void SetBlockForNode(BasicBlock* block, Node* node);
Zone* zone_;
- BasicBlockVector all_blocks_; // All basic blocks in the schedule.
- BasicBlockVector nodeid_to_block_; // Map from node to containing block.
- BasicBlockVector rpo_order_; // Reverse-post-order block list.
+ BasicBlockVector all_blocks_; // All basic blocks in the schedule.
+ BasicBlockVector nodeid_to_block_; // Map from node to containing block.
+ BasicBlockVector rpo_order_; // Reverse-post-order block list.
BasicBlock* start_;
BasicBlock* end_;
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index b1a230962f..4d5bb99053 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -17,7 +17,7 @@ namespace compiler {
SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
: common_(common), graph_(graph) {}
-SelectLowering::~SelectLowering() {}
+SelectLowering::~SelectLowering() = default;
Reduction SelectLowering::Reduce(Node* node) {
diff --git a/deps/v8/src/compiler/select-lowering.h b/deps/v8/src/compiler/select-lowering.h
index b66f69f986..d8c12d4d54 100644
--- a/deps/v8/src/compiler/select-lowering.h
+++ b/deps/v8/src/compiler/select-lowering.h
@@ -20,7 +20,7 @@ class Graph;
class SelectLowering final : public Reducer {
public:
SelectLowering(Graph* graph, CommonOperatorBuilder* common);
- ~SelectLowering();
+ ~SelectLowering() override;
const char* reducer_name() const override { return "SelectLowering"; }
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 882e3b9d6e..32de83061d 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -38,7 +38,8 @@ SimdScalarLowering::SimdScalarLowering(
DCHECK_NOT_NULL(graph());
DCHECK_NOT_NULL(graph()->end());
replacements_ = zone()->NewArray<Replacement>(graph()->NodeCount());
- memset(replacements_, 0, sizeof(Replacement) * graph()->NodeCount());
+ memset(static_cast<void*>(replacements_), 0,
+ sizeof(Replacement) * graph()->NodeCount());
}
void SimdScalarLowering::LowerGraph() {
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 2d82fc99bc..739f81f90d 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -107,8 +107,9 @@ UseInfo CheckedUseInfoAsWord32FromHint(
UNREACHABLE();
}
-UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint,
- const VectorSlotPair& feedback) {
+UseInfo CheckedUseInfoAsFloat64FromHint(
+ NumberOperationHint hint, const VectorSlotPair& feedback,
+ IdentifyZeros identify_zeros = kDistinguishZeros) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
@@ -117,9 +118,9 @@ UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint,
UNREACHABLE();
break;
case NumberOperationHint::kNumber:
- return UseInfo::CheckedNumberAsFloat64(feedback);
+ return UseInfo::CheckedNumberAsFloat64(identify_zeros, feedback);
case NumberOperationHint::kNumberOrOddball:
- return UseInfo::CheckedNumberOrOddballAsFloat64(feedback);
+ return UseInfo::CheckedNumberOrOddballAsFloat64(identify_zeros, feedback);
}
UNREACHABLE();
}
@@ -140,7 +141,7 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kWord32:
return UseInfo::TruncatingWord32();
case MachineRepresentation::kWord64:
- return UseInfo::TruncatingWord64();
+ return UseInfo::Word64();
case MachineRepresentation::kBit:
return UseInfo::Bool();
case MachineRepresentation::kSimd128:
@@ -151,11 +152,11 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
}
UseInfo UseInfoForBasePointer(const FieldAccess& access) {
- return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word();
}
UseInfo UseInfoForBasePointer(const ElementAccess& access) {
- return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word();
}
void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
@@ -186,32 +187,6 @@ void ChangeToPureOp(Node* node, const Operator* new_op) {
NodeProperties::ChangeOp(node, new_op);
}
-#ifdef DEBUG
-// Helpers for monotonicity checking.
-class InputUseInfos {
- public:
- explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
-
- void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
- if (input_use_infos_.empty()) {
- input_use_infos_.resize(node->InputCount(), UseInfo::None());
- }
- // Check that the new use informatin is a super-type of the old
- // one.
- DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
- input_use_infos_[index] = use_info;
- }
-
- private:
- ZoneVector<UseInfo> input_use_infos_;
-
- static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
- return use1.truncation().IsLessGeneralThan(use2.truncation());
- }
-};
-
-#endif // DEBUG
-
bool CanOverflowSigned32(const Operator* op, Type left, Type right,
Zone* type_zone) {
// We assume the inputs are checked Signed32 (or known statically
@@ -241,6 +216,32 @@ bool IsSomePositiveOrderedNumber(Type type) {
} // namespace
+#ifdef DEBUG
+// Helpers for monotonicity checking.
+class InputUseInfos {
+ public:
+ explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
+
+ void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
+ if (input_use_infos_.empty()) {
+ input_use_infos_.resize(node->InputCount(), UseInfo::None());
+ }
+ // Check that the new use informatin is a super-type of the old
+ // one.
+ DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
+ input_use_infos_[index] = use_info;
+ }
+
+ private:
+ ZoneVector<UseInfo> input_use_infos_;
+
+ static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
+ return use1.truncation().IsLessGeneralThan(use2.truncation());
+ }
+};
+
+#endif // DEBUG
+
class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
@@ -306,7 +307,7 @@ class RepresentationSelector {
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
- op_typer_(jsgraph->isolate(), js_heap_broker, graph_zone()) {
+ op_typer_(js_heap_broker, graph_zone()) {
}
// Forward propagation of types from type feedback.
@@ -430,64 +431,71 @@ class RepresentationSelector {
}
}
+ // We preload these values here to avoid increasing the binary size too
+ // much, which happens if we inline the calls into the macros below.
+ Type input0_type;
+ if (node->InputCount() > 0) input0_type = FeedbackTypeOf(node->InputAt(0));
+ Type input1_type;
+ if (node->InputCount() > 1) input1_type = FeedbackTypeOf(node->InputAt(1));
+
switch (node->opcode()) {
-#define DECLARE_CASE(Name) \
- case IrOpcode::k##Name: { \
- new_type = op_typer_.Name(FeedbackTypeOf(node->InputAt(0)), \
- FeedbackTypeOf(node->InputAt(1))); \
- break; \
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = op_typer_.Name(input0_type, input1_type); \
+ break; \
}
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
DECLARE_CASE(SameValue)
#undef DECLARE_CASE
-#define DECLARE_CASE(Name) \
- case IrOpcode::k##Name: { \
- new_type = \
- Type::Intersect(op_typer_.Name(FeedbackTypeOf(node->InputAt(0)), \
- FeedbackTypeOf(node->InputAt(1))), \
- info->restriction_type(), graph_zone()); \
- break; \
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = Type::Intersect(op_typer_.Name(input0_type, input1_type), \
+ info->restriction_type(), graph_zone()); \
+ break; \
}
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
-#define DECLARE_CASE(Name) \
- case IrOpcode::k##Name: { \
- new_type = op_typer_.Name(FeedbackTypeOf(node->InputAt(0))); \
- break; \
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = op_typer_.Name(input0_type); \
+ break; \
}
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
-#define DECLARE_CASE(Name) \
- case IrOpcode::k##Name: { \
- new_type = \
- Type::Intersect(op_typer_.Name(FeedbackTypeOf(node->InputAt(0))), \
- info->restriction_type(), graph_zone()); \
- break; \
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = Type::Intersect(op_typer_.Name(input0_type), \
+ info->restriction_type(), graph_zone()); \
+ break; \
}
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
case IrOpcode::kConvertReceiver:
- new_type = op_typer_.ConvertReceiver(FeedbackTypeOf(node->InputAt(0)));
+ new_type = op_typer_.ConvertReceiver(input0_type);
break;
case IrOpcode::kPlainPrimitiveToNumber:
- new_type = op_typer_.ToNumber(FeedbackTypeOf(node->InputAt(0)));
+ new_type = op_typer_.ToNumber(input0_type);
+ break;
+
+ case IrOpcode::kCheckBounds:
+ new_type =
+ Type::Intersect(op_typer_.CheckBounds(input0_type, input1_type),
+ info->restriction_type(), graph_zone());
break;
case IrOpcode::kCheckFloat64Hole:
- new_type = Type::Intersect(
- op_typer_.CheckFloat64Hole(FeedbackTypeOf(node->InputAt(0))),
- info->restriction_type(), graph_zone());
+ new_type = Type::Intersect(op_typer_.CheckFloat64Hole(input0_type),
+ info->restriction_type(), graph_zone());
break;
case IrOpcode::kCheckNumber:
- new_type = Type::Intersect(
- op_typer_.CheckNumber(FeedbackTypeOf(node->InputAt(0))),
- info->restriction_type(), graph_zone());
+ new_type = Type::Intersect(op_typer_.CheckNumber(input0_type),
+ info->restriction_type(), graph_zone());
break;
case IrOpcode::kPhi: {
@@ -913,6 +921,9 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
}
+ void VisitInt64Binop(Node* node) {
+ VisitBinop(node, UseInfo::Word64(), MachineRepresentation::kWord64);
+ }
void VisitWord32TruncatingBinop(Node* node) {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
@@ -1067,16 +1078,15 @@ class RepresentationSelector {
if (type.IsNone()) {
return MachineType::None();
}
- // TODO(turbofan): Special treatment for ExternalPointer here,
- // to avoid incompatible truncations. We really need a story
- // for the JSFunction::entry field.
- if (type.Is(Type::ExternalPointer())) {
- return MachineType::Pointer();
- }
// Do not distinguish between various Tagged variations.
if (IsAnyTagged(rep)) {
return MachineType::AnyTagged();
}
+ // Word64 representation is only valid for safe integer values.
+ if (rep == MachineRepresentation::kWord64) {
+ DCHECK(type.Is(TypeCache::Get().kSafeInteger));
+ return MachineType(rep, MachineSemantic::kInt64);
+ }
MachineType machine_type(rep, DeoptValueSemanticOf(type));
DCHECK(machine_type.representation() != MachineRepresentation::kWord32 ||
machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -1174,6 +1184,10 @@ class RepresentationSelector {
return changer_->Int32OverflowOperatorFor(node->opcode());
}
+ const Operator* Int64Op(Node* node) {
+ return changer_->Int64OperatorFor(node->opcode());
+ }
+
const Operator* Uint32Op(Node* node) {
return changer_->Uint32OperatorFor(node->opcode());
}
@@ -1205,7 +1219,7 @@ class RepresentationSelector {
return kNoWriteBarrier;
}
if (value_type.IsHeapConstant()) {
- Heap::RootListIndex root_index;
+ RootIndex root_index;
Heap* heap = jsgraph_->isolate()->heap();
if (heap->IsRootHandle(value_type.AsHeapConstant()->Value(),
&root_index)) {
@@ -1366,7 +1380,9 @@ class RepresentationSelector {
}
// default case => Float64Add/Sub
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
+ VisitBinop(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
+ VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) {
ChangeToPureOp(node, Float64Op(node));
@@ -1424,17 +1440,24 @@ class RepresentationSelector {
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
// If the result is truncated, we only need to check the inputs.
+ // For the left hand side we just propagate the identify zeros
+ // mode of the {truncation}; and for modulus the sign of the
+ // right hand side doesn't matter anyways, so in particular there's
+ // no observable difference between a 0 and a -0 then.
+ UseInfo const lhs_use = CheckedUseInfoAsWord32FromHint(
+ hint, VectorSlotPair(), truncation.identify_zeros());
+ UseInfo const rhs_use = CheckedUseInfoAsWord32FromHint(
+ hint, VectorSlotPair(), kIdentifyZeros);
if (truncation.IsUsedAsWord32()) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32);
+ VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
} else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Unsigned32());
+ VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
+ Type::Unsigned32());
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
} else {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Signed32());
+ VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
+ Type::Signed32());
if (lower()) ChangeToInt32OverflowOp(node);
}
return;
@@ -1444,10 +1467,7 @@ class RepresentationSelector {
TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node).Is(Type::Unsigned32()))) {
- // We can only promise Float64 truncation here, as the decision is
- // based on the feedback types of the inputs.
- VisitBinop(node,
- UseInfo(MachineRepresentation::kWord32, Truncation::Float64()),
+ VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32, Type::Number());
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
return;
@@ -1456,21 +1476,51 @@ class RepresentationSelector {
TypeOf(node->InputAt(1)).Is(Type::Signed32()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node).Is(Type::Signed32()))) {
- // We can only promise Float64 truncation here, as the decision is
- // based on the feedback types of the inputs.
- VisitBinop(node,
- UseInfo(MachineRepresentation::kWord32, Truncation::Float64()),
+ VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32, Type::Number());
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
return;
}
+
// default case => Float64Mod
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
- MachineRepresentation::kFloat64, Type::Number());
+ // For the left hand side we just propagate the identify zeros
+ // mode of the {truncation}; and for modulus the sign of the
+ // right hand side doesn't matter anyways, so in particular there's
+ // no observable difference between a 0 and a -0 then.
+ UseInfo const lhs_use = UseInfo::CheckedNumberOrOddballAsFloat64(
+ truncation.identify_zeros(), VectorSlotPair());
+ UseInfo const rhs_use = UseInfo::CheckedNumberOrOddballAsFloat64(
+ kIdentifyZeros, VectorSlotPair());
+ VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kFloat64,
+ Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
}
+ void InsertUnreachableIfNecessary(Node* node) {
+ DCHECK(lower());
+ // If the node is effectful and it produces an impossible value, then we
+ // insert Unreachable node after it.
+ if (node->op()->ValueOutputCount() > 0 &&
+ node->op()->EffectOutputCount() > 0 &&
+ node->opcode() != IrOpcode::kUnreachable && TypeOf(node).IsNone()) {
+ Node* control = node->op()->ControlOutputCount() > 0
+ ? node
+ : NodeProperties::GetControlInput(node, 0);
+
+ Node* unreachable =
+ graph()->NewNode(common()->Unreachable(), node, control);
+
+ // Insert unreachable node and replace all the effect uses of the {node}
+ // with the new unreachable node.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge) && edge.from() != unreachable) {
+ edge.UpdateTo(unreachable);
+ }
+ }
+ }
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -1485,9 +1535,12 @@ class RepresentationSelector {
// a sane state still) and we would afterwards replace that use with
// Dead as well.
if (node->op()->ValueInputCount() > 0 &&
- node->op()->HasProperty(Operator::kPure)) {
- if (truncation.IsUnused()) return VisitUnused(node);
+ node->op()->HasProperty(Operator::kPure) && truncation.IsUnused()) {
+ return VisitUnused(node);
}
+
+ if (lower()) InsertUnreachableIfNecessary(node);
+
switch (node->opcode()) {
//------------------------------------------------------------------
// Common operators.
@@ -1521,6 +1574,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kHeapConstant:
+ case IrOpcode::kDelayedStringConstant:
return VisitLeaf(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kPointerConstant: {
VisitLeaf(node, MachineType::PointerRepresentation());
@@ -1609,9 +1663,13 @@ class RepresentationSelector {
case IrOpcode::kNumberEqual: {
Type const lhs_type = TypeOf(node->InputAt(0));
Type const rhs_type = TypeOf(node->InputAt(1));
- // Number comparisons reduce to integer comparisons for integer inputs.
- if ((lhs_type.Is(Type::Unsigned32()) &&
- rhs_type.Is(Type::Unsigned32())) ||
+ // Regular number comparisons in JavaScript generally identify zeros,
+ // so we always pass kIdentifyZeros for the inputs, and in addition
+ // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs.
+ // For equality we also handle the case that one side is non-zero, in
+ // which case we allow to truncate NaN to 0 on the other side.
+ if ((lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
+ rhs_type.Is(Type::Unsigned32OrMinusZero())) ||
(lhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
OneInputCannotBe(node, type_cache_.kZeroish))) {
@@ -1621,7 +1679,8 @@ class RepresentationSelector {
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
return;
}
- if ((lhs_type.Is(Type::Signed32()) && rhs_type.Is(Type::Signed32())) ||
+ if ((lhs_type.Is(Type::Signed32OrMinusZero()) &&
+ rhs_type.Is(Type::Signed32OrMinusZero())) ||
(lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
rhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
OneInputCannotBe(node, type_cache_.kZeroish))) {
@@ -1632,29 +1691,33 @@ class RepresentationSelector {
return;
}
// => Float64Cmp
- VisitBinop(node, UseInfo::TruncatingFloat64(),
+ VisitBinop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual: {
- // Number comparisons reduce to integer comparisons for integer inputs.
- if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) &&
- TypeOf(node->InputAt(1)).Is(Type::Unsigned32())) {
+ Type const lhs_type = TypeOf(node->InputAt(0));
+ Type const rhs_type = TypeOf(node->InputAt(1));
+ // Regular number comparisons in JavaScript generally identify zeros,
+ // so we always pass kIdentifyZeros for the inputs, and in addition
+ // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs.
+ if (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
+ rhs_type.Is(Type::Unsigned32OrMinusZero())) {
// => unsigned Int32Cmp
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- } else if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
- TypeOf(node->InputAt(1)).Is(Type::Signed32())) {
+ } else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
+ rhs_type.Is(Type::Signed32OrMinusZero())) {
// => signed Int32Cmp
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else {
// => Float64Cmp
- VisitBinop(node, UseInfo::TruncatingFloat64(),
+ VisitBinop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
@@ -1672,16 +1735,20 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberLessThan:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
case IrOpcode::kSpeculativeNumberEqual: {
- // Number comparisons reduce to integer comparisons for integer inputs.
- if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) &&
- TypeOf(node->InputAt(1)).Is(Type::Unsigned32())) {
+ Type const lhs_type = TypeOf(node->InputAt(0));
+ Type const rhs_type = TypeOf(node->InputAt(1));
+ // Regular number comparisons in JavaScript generally identify zeros,
+ // so we always pass kIdentifyZeros for the inputs, and in addition
+ // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs.
+ if (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
+ rhs_type.Is(Type::Unsigned32OrMinusZero())) {
// => unsigned Int32Cmp
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Uint32Op(node));
return;
- } else if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
- TypeOf(node->InputAt(1)).Is(Type::Signed32())) {
+ } else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
+ rhs_type.Is(Type::Signed32OrMinusZero())) {
// => signed Int32Cmp
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
@@ -1694,7 +1761,9 @@ class RepresentationSelector {
case NumberOperationHint::kSigned32:
case NumberOperationHint::kSignedSmall:
if (propagate()) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitBinop(node,
+ CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+ kIdentifyZeros),
MachineRepresentation::kBit);
} else if (retype()) {
SetOutput(node, MachineRepresentation::kBit, Type::Any());
@@ -1704,15 +1773,17 @@ class RepresentationSelector {
Node* rhs = node->InputAt(1);
if (IsNodeRepresentationTagged(lhs) &&
IsNodeRepresentationTagged(rhs)) {
- VisitBinop(
- node,
- UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()),
- MachineRepresentation::kBit);
+ VisitBinop(node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(
+ VectorSlotPair(), kIdentifyZeros),
+ MachineRepresentation::kBit);
ChangeToPureOp(
node, changer_->TaggedSignedOperatorFor(node->opcode()));
} else {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitBinop(node,
+ CheckedUseInfoAsWord32FromHint(
+ hint, VectorSlotPair(), kIdentifyZeros),
MachineRepresentation::kBit);
ChangeToPureOp(node, Int32Op(node));
}
@@ -1729,7 +1800,8 @@ class RepresentationSelector {
V8_FALLTHROUGH;
case NumberOperationHint::kNumber:
VisitBinop(node,
- CheckedUseInfoAsFloat64FromHint(hint, VectorSlotPair()),
+ CheckedUseInfoAsFloat64FromHint(hint, VectorSlotPair(),
+ kIdentifyZeros),
MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -1740,13 +1812,22 @@ class RepresentationSelector {
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract: {
- if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
- (GetUpperBound(node).Is(Type::Signed32()) ||
- GetUpperBound(node).Is(Type::Unsigned32()) ||
+ if (TypeOf(node->InputAt(0))
+ .Is(type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ TypeOf(node->InputAt(1))
+ .Is(type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ (TypeOf(node).Is(Type::Signed32()) ||
+ TypeOf(node).Is(Type::Unsigned32()) ||
truncation.IsUsedAsWord32())) {
// => Int32Add/Sub
VisitWord32TruncatingBinop(node);
if (lower()) ChangeToPureOp(node, Int32Op(node));
+ } else if (jsgraph_->machine()->Is64() &&
+ BothInputsAre(node, type_cache_.kSafeInteger) &&
+ GetUpperBound(node).Is(type_cache_.kSafeInteger)) {
+ // => Int64Add/Sub
+ VisitInt64Binop(node);
+ if (lower()) ChangeToPureOp(node, Int64Op(node));
} else {
// => Float64Add/Sub
VisitFloat64Binop(node);
@@ -1803,18 +1884,19 @@ class RepresentationSelector {
// Checked float64 x float64 => float64
VisitBinop(node,
- UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
+ UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
+ VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberMultiply: {
- if (BothInputsAre(node, Type::Integral32()) &&
- (NodeProperties::GetType(node).Is(Type::Signed32()) ||
- NodeProperties::GetType(node).Is(Type::Unsigned32()) ||
+ if (TypeOf(node->InputAt(0)).Is(Type::Integral32()) &&
+ TypeOf(node->InputAt(1)).Is(Type::Integral32()) &&
+ (TypeOf(node).Is(Type::Signed32()) ||
+ TypeOf(node).Is(Type::Unsigned32()) ||
(truncation.IsUsedAsWord32() &&
- NodeProperties::GetType(node).Is(
- type_cache_.kSafeIntegerOrMinusZero)))) {
+ TypeOf(node).Is(type_cache_.kSafeIntegerOrMinusZero)))) {
// Multiply reduces to Int32Mul if the inputs are integers, and
// (a) the output is either known to be Signed32, or
// (b) the output is known to be Unsigned32, or
@@ -1898,31 +1980,30 @@ class RepresentationSelector {
// default case => Float64Div
VisitBinop(node,
- UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
+ UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
+ VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberDivide: {
- if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
+ if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) &&
+ (truncation.IsUsedAsWord32() ||
+ TypeOf(node).Is(Type::Unsigned32()))) {
// => unsigned Uint32Div
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
return;
}
- if (BothInputsAreSigned32(node)) {
- if (NodeProperties::GetType(node).Is(Type::Signed32())) {
- // => signed Int32Div
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Div(node));
- return;
- }
- if (truncation.IsUsedAsWord32()) {
- // => signed Int32Div
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Div(node));
- return;
- }
+ if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1)).Is(Type::Signed32()) &&
+ (truncation.IsUsedAsWord32() ||
+ TypeOf(node).Is(Type::Signed32()))) {
+ // => signed Int32Div
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ return;
}
// Number x Number => Float64Div
VisitFloat64Binop(node);
@@ -1932,48 +2013,36 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberModulus:
return VisitSpeculativeNumberModulus(node, truncation, lowering);
case IrOpcode::kNumberModulus: {
- if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
+ Type const lhs_type = TypeOf(node->InputAt(0));
+ Type const rhs_type = TypeOf(node->InputAt(1));
+ if ((lhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
+ rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN())) &&
(truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node).Is(Type::Unsigned32()))) {
+ TypeOf(node).Is(Type::Unsigned32()))) {
// => unsigned Uint32Mod
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
return;
}
- if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
- (truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node).Is(Type::Signed32()))) {
+ if ((lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
+ rhs_type.Is(Type::Signed32OrMinusZeroOrNaN())) &&
+ (truncation.IsUsedAsWord32() || TypeOf(node).Is(Type::Signed32()) ||
+ (truncation.IdentifiesZeroAndMinusZero() &&
+ TypeOf(node).Is(Type::Signed32OrMinusZero())))) {
// => signed Int32Mod
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
return;
}
- if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) &&
- TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) &&
- (truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node).Is(Type::Unsigned32()))) {
- // We can only promise Float64 truncation here, as the decision is
- // based on the feedback types of the inputs.
- VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
- Truncation::Float64()),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
- return;
- }
- if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
- TypeOf(node->InputAt(1)).Is(Type::Signed32()) &&
- (truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node).Is(Type::Signed32()))) {
- // We can only promise Float64 truncation here, as the decision is
- // based on the feedback types of the inputs.
- VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
- Truncation::Float64()),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- return;
- }
- // default case => Float64Mod
- VisitFloat64Binop(node);
+ // => Float64Mod
+ // For the left hand side we just propagate the identify zeros
+ // mode of the {truncation}; and for modulus the sign of the
+ // right hand side doesn't matter anyways, so in particular there's
+ // no observable difference between a 0 and a -0 then.
+ UseInfo const lhs_use =
+ UseInfo::TruncatingFloat64(truncation.identify_zeros());
+ UseInfo const rhs_use = UseInfo::TruncatingFloat64(kIdentifyZeros);
+ VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kFloat64);
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
}
@@ -2096,21 +2165,24 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNumberAbs: {
- if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32())) {
+ // NumberAbs maps both 0 and -0 to 0, so we can generally
+ // pass the kIdentifyZeros truncation to its input, and
+ // choose to ignore minus zero in all cases.
+ Type const input_type = TypeOf(node->InputAt(0));
+ if (input_type.Is(Type::Unsigned32OrMinusZero())) {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if (TypeOf(node->InputAt(0)).Is(Type::Signed32())) {
+ } else if (input_type.Is(Type::Signed32OrMinusZero())) {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, lowering->Int32Abs(node));
- } else if (TypeOf(node->InputAt(0))
- .Is(type_cache_.kPositiveIntegerOrMinusZeroOrNaN)) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
+ } else if (input_type.Is(type_cache_.kPositiveIntegerOrNaN)) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
+ VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kFloat64);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
@@ -2138,32 +2210,45 @@ class RepresentationSelector {
// It is safe to use the feedback types for left and right hand side
// here, since we can only narrow those types and thus we can only
// promise a more specific truncation.
+ // For NumberMax we generally propagate whether the truncation
+ // identifies zeros to the inputs, and we choose to ignore minus
+ // zero in those cases.
Type const lhs_type = TypeOf(node->InputAt(0));
Type const rhs_type = TypeOf(node->InputAt(1));
- if (lhs_type.Is(Type::Unsigned32()) &&
- rhs_type.Is(Type::Unsigned32())) {
+ if ((lhs_type.Is(Type::Unsigned32()) &&
+ rhs_type.Is(Type::Unsigned32())) ||
+ (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
+ rhs_type.Is(Type::Unsigned32OrMinusZero()) &&
+ truncation.IdentifiesZeroAndMinusZero())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (lhs_type.Is(Type::Signed32()) &&
- rhs_type.Is(Type::Signed32())) {
+ } else if ((lhs_type.Is(Type::Signed32()) &&
+ rhs_type.Is(Type::Signed32())) ||
+ (lhs_type.Is(Type::Signed32OrMinusZero()) &&
+ rhs_type.Is(Type::Signed32OrMinusZero()) &&
+ truncation.IdentifiesZeroAndMinusZero())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMax(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (lhs_type.Is(Type::PlainNumber()) &&
- rhs_type.Is(Type::PlainNumber())) {
- VisitFloat64Binop(node);
+ } else {
+ VisitBinop(node,
+ UseInfo::TruncatingFloat64(truncation.identify_zeros()),
+ MachineRepresentation::kFloat64);
if (lower()) {
- lowering->DoMax(node, lowering->machine()->Float64LessThan(),
- MachineRepresentation::kFloat64);
+ if (truncation.IdentifiesZeroAndMinusZero() ||
+ (lhs_type.Is(Type::PlainNumber()) &&
+ rhs_type.Is(Type::PlainNumber()))) {
+ lowering->DoMax(node, lowering->machine()->Float64LessThan(),
+ MachineRepresentation::kFloat64);
+ } else {
+ NodeProperties::ChangeOp(node, Float64Op(node));
+ }
}
- } else {
- VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
return;
}
@@ -2171,32 +2256,45 @@ class RepresentationSelector {
// It is safe to use the feedback types for left and right hand side
// here, since we can only narrow those types and thus we can only
// promise a more specific truncation.
+ // For NumberMin we generally propagate whether the truncation
+ // identifies zeros to the inputs, and we choose to ignore minus
+ // zero in those cases.
Type const lhs_type = TypeOf(node->InputAt(0));
Type const rhs_type = TypeOf(node->InputAt(1));
- if (lhs_type.Is(Type::Unsigned32()) &&
- rhs_type.Is(Type::Unsigned32())) {
+ if ((lhs_type.Is(Type::Unsigned32()) &&
+ rhs_type.Is(Type::Unsigned32())) ||
+ (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
+ rhs_type.Is(Type::Unsigned32OrMinusZero()) &&
+ truncation.IdentifiesZeroAndMinusZero())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (lhs_type.Is(Type::Signed32()) &&
- rhs_type.Is(Type::Signed32())) {
+ } else if ((lhs_type.Is(Type::Signed32()) &&
+ rhs_type.Is(Type::Signed32())) ||
+ (lhs_type.Is(Type::Signed32OrMinusZero()) &&
+ rhs_type.Is(Type::Signed32OrMinusZero()) &&
+ truncation.IdentifiesZeroAndMinusZero())) {
VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMin(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
- } else if (lhs_type.Is(Type::PlainNumber()) &&
- rhs_type.Is(Type::PlainNumber())) {
- VisitFloat64Binop(node);
+ } else {
+ VisitBinop(node,
+ UseInfo::TruncatingFloat64(truncation.identify_zeros()),
+ MachineRepresentation::kFloat64);
if (lower()) {
- lowering->DoMin(node, lowering->machine()->Float64LessThan(),
- MachineRepresentation::kFloat64);
+ if (truncation.IdentifiesZeroAndMinusZero() ||
+ (lhs_type.Is(Type::PlainNumber()) &&
+ rhs_type.Is(Type::PlainNumber()))) {
+ lowering->DoMin(node, lowering->machine()->Float64LessThan(),
+ MachineRepresentation::kFloat64);
+ } else {
+ NodeProperties::ChangeOp(node, Float64Op(node));
+ }
}
- } else {
- VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
return;
}
@@ -2207,18 +2305,38 @@ class RepresentationSelector {
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
+ case IrOpcode::kNumberCeil:
+ case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberRound:
+ case IrOpcode::kNumberTrunc: {
+ // For NumberCeil, NumberFloor, NumberRound and NumberTrunc we propagate
+ // the zero identification part of the truncation, and we turn them into
+ // no-ops if we figure out (late) that their input is already an
+ // integer, NaN or -0.
+ Type const input_type = TypeOf(node->InputAt(0));
+ VisitUnop(node, UseInfo::TruncatingFloat64(truncation.identify_zeros()),
+ MachineRepresentation::kFloat64);
+ if (lower()) {
+ if (input_type.Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ DeferReplacement(node, node->InputAt(0));
+ } else if (node->opcode() == IrOpcode::kNumberRound) {
+ DeferReplacement(node, lowering->Float64Round(node));
+ } else {
+ NodeProperties::ChangeOp(node, Float64Op(node));
+ }
+ }
+ return;
+ }
case IrOpcode::kNumberAcos:
case IrOpcode::kNumberAcosh:
case IrOpcode::kNumberAsin:
case IrOpcode::kNumberAsinh:
case IrOpcode::kNumberAtan:
case IrOpcode::kNumberAtanh:
- case IrOpcode::kNumberCeil:
case IrOpcode::kNumberCos:
case IrOpcode::kNumberCosh:
case IrOpcode::kNumberExp:
case IrOpcode::kNumberExpm1:
- case IrOpcode::kNumberFloor:
case IrOpcode::kNumberLog:
case IrOpcode::kNumberLog1p:
case IrOpcode::kNumberLog2:
@@ -2227,19 +2345,12 @@ class RepresentationSelector {
case IrOpcode::kNumberSin:
case IrOpcode::kNumberSinh:
case IrOpcode::kNumberTan:
- case IrOpcode::kNumberTanh:
- case IrOpcode::kNumberTrunc: {
+ case IrOpcode::kNumberTanh: {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
- case IrOpcode::kNumberRound: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, lowering->Float64Round(node));
- return;
- }
case IrOpcode::kNumberSign: {
if (InputIs(node, Type::Signed32())) {
VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -2252,6 +2363,20 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kNumberSilenceNaN: {
+ Type const input_type = TypeOf(node->InputAt(0));
+ if (input_type.Is(Type::OrderedNumber())) {
+ // No need to silence anything if the input cannot be NaN.
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ }
+ return;
+ }
case IrOpcode::kNumberSqrt: {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
@@ -2259,17 +2384,22 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNumberToBoolean: {
+ // For NumberToBoolean we don't care whether the input is 0 or
+ // -0, since both of them are mapped to false anyways, so we
+ // can generally pass kIdentifyZeros truncation.
Type const input_type = TypeOf(node->InputAt(0));
- if (input_type.Is(Type::Integral32())) {
+ if (input_type.Is(Type::Integral32OrMinusZeroOrNaN())) {
+ // 0, -0 and NaN all map to false, so we can safely truncate
+ // all of them to zero here.
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
if (lower()) lowering->DoIntegral32ToBit(node);
} else if (input_type.Is(Type::OrderedNumber())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
+ VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
if (lower()) lowering->DoOrderedNumberToBit(node);
} else {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
+ VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
if (lower()) lowering->DoNumberToBit(node);
}
@@ -2337,6 +2467,18 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
}
case IrOpcode::kNewConsString: {
+ ProcessInput(node, 0, UseInfo::TruncatingWord32()); // length
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // first
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // second
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
+ case IrOpcode::kStringConcat: {
+ // TODO(turbofan): We currently depend on having this first length input
+ // to make sure that the overflow check is properly scheduled before the
+ // actual string concatenation. We should also use the length to pass it
+ // to the builtin or decide in optimized code how to construct the
+ // resulting string (i.e. cons string or sequential string).
ProcessInput(node, 0, UseInfo::TaggedSigned()); // length
ProcessInput(node, 1, UseInfo::AnyTagged()); // first
ProcessInput(node, 2, UseInfo::AnyTagged()); // second
@@ -2350,13 +2492,11 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
}
case IrOpcode::kStringCharCodeAt: {
- return VisitBinop(node, UseInfo::AnyTagged(),
- UseInfo::TruncatingWord32(),
+ return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
MachineRepresentation::kWord32);
}
case IrOpcode::kStringCodePointAt: {
- return VisitBinop(node, UseInfo::AnyTagged(),
- UseInfo::TruncatingWord32(),
+ return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
MachineRepresentation::kTaggedSigned);
}
case IrOpcode::kStringFromSingleCharCode: {
@@ -2380,8 +2520,7 @@ class RepresentationSelector {
// TODO(bmeurer): The input representation should be TaggedPointer.
// Fix this once we have a dedicated StringConcat/JSStringAdd
// operator, which marks it's output as TaggedPointer properly.
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedSigned);
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kWord32);
return;
}
case IrOpcode::kStringSubstring: {
@@ -2491,7 +2630,7 @@ class RepresentationSelector {
}
case IrOpcode::kAllocate: {
- ProcessInput(node, 0, UseInfo::TruncatingWord32());
+ ProcessInput(node, 0, UseInfo::Word());
ProcessRemainingInputs(node, 1);
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
@@ -2543,8 +2682,7 @@ class RepresentationSelector {
case IrOpcode::kLoadElement: {
if (truncation.IsUnused()) return VisitUnused(node);
ElementAccess access = ElementAccessOf(node->op());
- VisitBinop(node, UseInfoForBasePointer(access),
- UseInfo::TruncatingWord32(),
+ VisitBinop(node, UseInfoForBasePointer(access), UseInfo::Word(),
access.machine_type.representation());
return;
}
@@ -2564,7 +2702,7 @@ class RepresentationSelector {
access.base_is_tagged, element_representation, access.type,
input_info->representation(), value_node);
ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
- ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 1, UseInfo::Word()); // index
ProcessInput(node, 2,
TruncatingUseInfoFromRepresentation(
element_representation)); // value
@@ -2587,8 +2725,8 @@ class RepresentationSelector {
case IrOpcode::kTransitionAndStoreElement: {
Type value_type = TypeOf(node->InputAt(2));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // array
- ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // array
+ ProcessInput(node, 1, UseInfo::Word()); // index
if (value_type.Is(Type::SignedSmall())) {
ProcessInput(node, 2, UseInfo::TruncatingWord32()); // value
@@ -2623,10 +2761,10 @@ class RepresentationSelector {
case IrOpcode::kLoadTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
- ProcessInput(node, 2, UseInfo::PointerInt()); // external pointer
- ProcessInput(node, 3, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
+ ProcessInput(node, 2, UseInfo::Word()); // external pointer
+ ProcessInput(node, 3, UseInfo::Word()); // index
ProcessRemainingInputs(node, 4);
SetOutput(node, rep);
return;
@@ -2634,10 +2772,10 @@ class RepresentationSelector {
case IrOpcode::kLoadDataViewElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::PointerInt()); // external pointer
- ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
- ProcessInput(node, 3, UseInfo::Bool()); // little-endian
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput(node, 1, UseInfo::Word()); // external pointer
+ ProcessInput(node, 2, UseInfo::Word()); // index
+ ProcessInput(node, 3, UseInfo::Bool()); // little-endian
ProcessRemainingInputs(node, 4);
SetOutput(node, rep);
return;
@@ -2645,10 +2783,10 @@ class RepresentationSelector {
case IrOpcode::kStoreTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
- ProcessInput(node, 2, UseInfo::PointerInt()); // external pointer
- ProcessInput(node, 3, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
+ ProcessInput(node, 2, UseInfo::Word()); // external pointer
+ ProcessInput(node, 3, UseInfo::Word()); // index
ProcessInput(node, 4,
TruncatingUseInfoFromRepresentation(rep)); // value
ProcessRemainingInputs(node, 5);
@@ -2659,8 +2797,8 @@ class RepresentationSelector {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::PointerInt()); // external pointer
- ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 1, UseInfo::Word()); // external pointer
+ ProcessInput(node, 2, UseInfo::Word()); // index
ProcessInput(node, 3,
TruncatingUseInfoFromRepresentation(rep)); // value
ProcessInput(node, 4, UseInfo::Bool()); // little-endian
@@ -2949,8 +3087,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kArgumentsLength: {
- VisitUnop(node, UseInfo::PointerInt(),
- MachineRepresentation::kTaggedSigned);
+ VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTaggedSigned);
return;
}
case IrOpcode::kNewDoubleElements:
@@ -2960,14 +3097,10 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNewArgumentsElements: {
- VisitBinop(node, UseInfo::PointerInt(), UseInfo::TaggedSigned(),
+ VisitBinop(node, UseInfo::Word(), UseInfo::TaggedSigned(),
MachineRepresentation::kTaggedPointer);
return;
}
- case IrOpcode::kArrayBufferWasNeutered: {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
- return;
- }
case IrOpcode::kCheckFloat64Hole: {
Type const input_type = TypeOf(node->InputAt(0));
if (input_type.Is(Type::Number())) {
@@ -3045,20 +3178,28 @@ class RepresentationSelector {
return VisitBinop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
case IrOpcode::kMaybeGrowFastElements: {
+ Type const index_type = TypeOf(node->InputAt(2));
+ Type const length_type = TypeOf(node->InputAt(3));
ProcessInput(node, 0, UseInfo::AnyTagged()); // object
ProcessInput(node, 1, UseInfo::AnyTagged()); // elements
ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 3, UseInfo::TruncatingWord32()); // length
ProcessRemainingInputs(node, 4);
SetOutput(node, MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ // If the index is known to be less than the length (or if
+ // we're in dead code), we know that we don't need to grow
+ // the elements, so we can just remove this operation all
+ // together and replace it with the elements that we have
+ // on the inputs.
+ if (index_type.IsNone() || length_type.IsNone() ||
+ index_type.Max() < length_type.Min()) {
+ DeferReplacement(node, node->InputAt(1));
+ }
+ }
return;
}
- case IrOpcode::kNumberSilenceNaN:
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
- return;
case IrOpcode::kDateNow:
VisitInputs(node);
return SetOutput(node, MachineRepresentation::kTaggedPointer);
@@ -3104,7 +3245,7 @@ class RepresentationSelector {
Type const key_type = TypeOf(node->InputAt(1));
if (key_type.Is(Type::Signed32OrMinusZero())) {
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
if (lower()) {
NodeProperties::ChangeOp(
node,
@@ -3151,7 +3292,6 @@ class RepresentationSelector {
case IrOpcode::kJSDecrement:
case IrOpcode::kJSIncrement:
case IrOpcode::kJSNegate:
- case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToObject:
@@ -3950,7 +4090,7 @@ Node* SimplifiedLowering::ToNumberConvertBigIntCode() {
Node* SimplifiedLowering::ToNumericCode() {
if (!to_numeric_code_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
- to_number_code_.set(jsgraph()->HeapConstant(callable.code()));
+ to_numeric_code_.set(jsgraph()->HeapConstant(callable.code()));
}
return to_numeric_code_.get();
}
@@ -3959,9 +4099,10 @@ Operator const* SimplifiedLowering::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
- 0, flags, Operator::kNoProperties);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags,
+ Operator::kNoProperties);
to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
@@ -3972,9 +4113,10 @@ Operator const* SimplifiedLowering::ToNumberConvertBigIntOperator() {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kToNumberConvertBigInt);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
- 0, flags, Operator::kNoProperties);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags,
+ Operator::kNoProperties);
to_number_convert_big_int_operator_.set(common()->Call(call_descriptor));
}
return to_number_convert_big_int_operator_.get();
@@ -3984,9 +4126,10 @@ Operator const* SimplifiedLowering::ToNumericOperator() {
if (!to_numeric_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
- 0, flags, Operator::kNoProperties);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags,
+ Operator::kNoProperties);
to_numeric_operator_.set(common()->Call(call_descriptor));
}
return to_numeric_operator_.get();
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 7b21b07813..ba7c9b68b7 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level);
- ~SimplifiedLowering() {}
+ ~SimplifiedLowering() = default;
void LowerAllNodes();
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index ce7d18f34a..851d7927bd 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -38,7 +38,7 @@ SimplifiedOperatorReducer::SimplifiedOperatorReducer(
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker) {}
-SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
+SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 0c331bce5e..3aa3d30a27 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -79,7 +79,7 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
#endif
os << access.type << ", " << access.machine_type << ", "
<< access.write_barrier_kind;
- if (FLAG_untrusted_code_mitigations || FLAG_branch_load_poisoning) {
+ if (FLAG_untrusted_code_mitigations) {
os << ", " << access.load_sensitivity;
}
os << "]";
@@ -118,7 +118,7 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", "
<< access.type << ", " << access.machine_type << ", "
<< access.write_barrier_kind;
- if (FLAG_untrusted_code_mitigations || FLAG_branch_load_poisoning) {
+ if (FLAG_untrusted_code_mitigations) {
os << ", " << access.load_sensitivity;
}
return os;
@@ -709,6 +709,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(StringConcat, Operator::kNoProperties, 3, 0) \
V(StringToNumber, Operator::kNoProperties, 1, 0) \
V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
@@ -720,14 +721,18 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
@@ -798,11 +803,15 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
V(CheckedInt32ToTaggedSigned, 1, 1) \
+ V(CheckedInt64ToInt32, 1, 1) \
+ V(CheckedInt64ToTaggedSigned, 1, 1) \
V(CheckedTaggedSignedToInt32, 1, 1) \
V(CheckedTaggedToTaggedPointer, 1, 1) \
V(CheckedTaggedToTaggedSigned, 1, 1) \
V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1)
+ V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedUint64ToInt32, 1, 1) \
+ V(CheckedUint64ToTaggedSigned, 1, 1)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
@@ -891,13 +900,6 @@ struct SimplifiedOperatorGlobalCache final {
StringFromSingleCodePointOperator<UnicodeEncoding::UTF32>
kStringFromSingleCodePointOperatorUTF32;
- struct ArrayBufferWasNeuteredOperator final : public Operator {
- ArrayBufferWasNeuteredOperator()
- : Operator(IrOpcode::kArrayBufferWasNeutered, Operator::kEliminatable,
- "ArrayBufferWasNeutered", 1, 1, 1, 1, 1, 0) {}
- };
- ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
-
struct FindOrderedHashMapEntryOperator final : public Operator {
FindOrderedHashMapEntryOperator()
: Operator(IrOpcode::kFindOrderedHashMapEntry, Operator::kEliminatable,
@@ -1124,7 +1126,6 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
PURE_OP_LIST(GET_FROM_CACHE)
EFFECT_DEPENDENT_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
-GET_FROM_CACHE(ArrayBufferWasNeutered)
GET_FROM_CACHE(ArgumentsFrame)
GET_FROM_CACHE(FindOrderedHashMapEntry)
GET_FROM_CACHE(FindOrderedHashMapEntryForInt32Key)
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index df44e899cd..df823fb0b0 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -228,7 +228,7 @@ enum class CheckTaggedInputMode : uint8_t {
size_t hash_value(CheckTaggedInputMode);
-std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
class CheckTaggedInputParameters {
public:
@@ -616,6 +616,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ToBoolean();
+ const Operator* StringConcat();
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
@@ -641,13 +642,17 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* PlainPrimitiveToFloat64();
const Operator* ChangeTaggedSignedToInt32();
+ const Operator* ChangeTaggedSignedToInt64();
const Operator* ChangeTaggedToInt32();
+ const Operator* ChangeTaggedToInt64();
const Operator* ChangeTaggedToUint32();
const Operator* ChangeTaggedToFloat64();
const Operator* ChangeTaggedToTaggedSigned();
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
+ const Operator* ChangeInt64ToTagged();
const Operator* ChangeUint32ToTagged();
+ const Operator* ChangeUint64ToTagged();
const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode);
const Operator* ChangeFloat64ToTaggedPointer();
const Operator* ChangeTaggedToBit();
@@ -686,6 +691,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
const Operator* CheckedInt32Sub();
const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback);
+ const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode,
const VectorSlotPair& feedback);
@@ -699,6 +706,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedUint32Mod();
const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback);
const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedUint64ToInt32(const VectorSlotPair& feedback);
+ const Operator* CheckedUint64ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* ConvertReceiver(ConvertReceiverMode);
@@ -741,9 +750,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// new-cons-string length, first, second
const Operator* NewConsString();
- // array-buffer-was-neutered buffer
- const Operator* ArrayBufferWasNeutered();
-
// ensure-writable-fast-elements object, elements
const Operator* EnsureWritableFastElements();
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index ce2c71a3d7..589e3948b9 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -32,7 +32,7 @@ namespace compiler {
if (V8_UNLIKELY(!(condition))) { \
FATAL("Check failed: %s. Extra info: " fmt, #condition, ##__VA_ARGS__); \
} \
- } while (0)
+ } while (false)
#ifdef DEBUG
#define DCHECK_EXTRA(condition, fmt, ...) \
@@ -100,9 +100,9 @@ class UnobservablesSet final {
static UnobservablesSet Unvisited();
static UnobservablesSet VisitedEmpty(Zone* zone);
UnobservablesSet(); // unvisited
- UnobservablesSet(const UnobservablesSet& other) : set_(other.set_) {}
+ UnobservablesSet(const UnobservablesSet& other) = default;
- UnobservablesSet Intersect(UnobservablesSet other, Zone* zone) const;
+ UnobservablesSet Intersect(const UnobservablesSet& other, Zone* zone) const;
UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
@@ -140,7 +140,7 @@ class RedundantStoreFinder final {
private:
void VisitEffectfulNode(Node* node);
UnobservablesSet RecomputeUseIntersection(Node* node);
- UnobservablesSet RecomputeSet(Node* node, UnobservablesSet uses);
+ UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses);
static bool CannotObserveStoreField(Node* node);
void MarkForRevisit(Node* node);
@@ -252,8 +252,8 @@ void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) {
// Recompute unobservables-set for a node. Will also mark superfluous nodes
// as to be removed.
-UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
- UnobservablesSet uses) {
+UnobservablesSet RedundantStoreFinder::RecomputeSet(
+ Node* node, const UnobservablesSet& uses) {
switch (node->op()->opcode()) {
case IrOpcode::kStoreField: {
Node* stored_to = node->InputAt(0);
@@ -472,7 +472,7 @@ UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) {
// Computes the intersection of two UnobservablesSets. May return
// UnobservablesSet::Unvisited() instead of an empty UnobservablesSet for
// speed.
-UnobservablesSet UnobservablesSet::Intersect(UnobservablesSet other,
+UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
Zone* zone) const {
if (IsEmpty() || other.IsEmpty()) {
return Unvisited();
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 7a02c9a37c..251ea08751 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -8,6 +8,7 @@
#include "src/compiler/types.h"
#include "src/date.h"
#include "src/objects/code.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/string.h"
namespace v8 {
@@ -34,6 +35,8 @@ class TypeCache final {
Type const kUint16 = CreateRange<uint16_t>();
Type const kInt32 = Type::Signed32();
Type const kUint32 = Type::Unsigned32();
+ Type const kInt64 = CreateRange<int64_t>();
+ Type const kUint64 = CreateRange<uint64_t>();
Type const kFloat32 = Type::Number();
Type const kFloat64 = Type::Number();
Type const kBigInt64 = Type::BigInt();
@@ -96,6 +99,20 @@ class TypeCache final {
// [0, kMaxUInt32].
Type const kJSArrayLengthType = Type::Unsigned32();
+ // The JSArrayBuffer::byte_length property is limited to safe integer range
+ // per specification, but on 32-bit architectures is implemented as uint32_t
+ // field, so it's in the [0, kMaxUInt32] range in that case.
+ Type const kJSArrayBufferByteLengthType =
+ CreateRange(0.0, JSArrayBuffer::kMaxByteLength);
+
+ // The type for the JSArrayBufferView::byte_length property is the same as
+ // JSArrayBuffer::byte_length above.
+ Type const kJSArrayBufferViewByteLengthType = kJSArrayBufferByteLengthType;
+
+ // The type for the JSArrayBufferView::byte_offset property is the same as
+ // JSArrayBuffer::byte_length above.
+ Type const kJSArrayBufferViewByteOffsetType = kJSArrayBufferByteLengthType;
+
// The JSTypedArray::length property always contains a tagged number in the
// range [0, kMaxSmiValue].
Type const kJSTypedArrayLengthType = Type::UnsignedSmall();
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.cc b/deps/v8/src/compiler/type-narrowing-reducer.cc
index 01afdcb911..c0343f70e7 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.cc
+++ b/deps/v8/src/compiler/type-narrowing-reducer.cc
@@ -15,9 +15,9 @@ TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- op_typer_(jsgraph->isolate(), js_heap_broker, zone()) {}
+ op_typer_(js_heap_broker, zone()) {}
-TypeNarrowingReducer::~TypeNarrowingReducer() {}
+TypeNarrowingReducer::~TypeNarrowingReducer() = default;
Reduction TypeNarrowingReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index b77fc97859..6a9430bd29 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -32,7 +32,7 @@ TypedOptimization::TypedOptimization(Editor* editor,
graph()->zone())),
type_cache_(TypeCache::Get()) {}
-TypedOptimization::~TypedOptimization() {}
+TypedOptimization::~TypedOptimization() = default;
Reduction TypedOptimization::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
@@ -61,6 +61,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReduceNumberRoundop(node);
case IrOpcode::kNumberFloor:
return ReduceNumberFloor(node);
+ case IrOpcode::kNumberSilenceNaN:
+ return ReduceNumberSilenceNaN(node);
case IrOpcode::kNumberToUint8Clamped:
return ReduceNumberToUint8Clamped(node);
case IrOpcode::kPhi:
@@ -71,6 +73,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
return ReduceStringComparison(node);
+ case IrOpcode::kStringLength:
+ return ReduceStringLength(node);
case IrOpcode::kSameValue:
return ReduceSameValue(node);
case IrOpcode::kSelect:
@@ -272,6 +276,15 @@ Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceNumberSilenceNaN(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type const input_type = NodeProperties::GetType(input);
+ if (input_type.Is(Type::OrderedNumber())) {
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceNumberToUint8Clamped(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type const input_type = NodeProperties::GetType(input);
@@ -454,6 +467,30 @@ Reduction TypedOptimization::ReduceStringComparison(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceStringLength(Node* node) {
+ DCHECK_EQ(IrOpcode::kStringLength, node->opcode());
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ switch (input->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ // Constant-fold the String::length of the {input}.
+ HeapObjectMatcher m(input);
+ if (m.Ref(js_heap_broker()).IsString()) {
+ uint32_t const length = m.Ref(js_heap_broker()).AsString().length();
+ Node* value = jsgraph()->Constant(length);
+ return Replace(value);
+ }
+ break;
+ }
+ case IrOpcode::kStringConcat: {
+ // The first value input to the {input} is the resulting length.
+ return Replace(input->InputAt(0));
+ }
+ default:
+ break;
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceSameValue(Node* node) {
DCHECK_EQ(IrOpcode::kSameValue, node->opcode());
Node* const lhs = NodeProperties::GetValueInput(node, 0);
@@ -578,8 +615,6 @@ Reduction TypedOptimization::ReduceTypeOf(Node* node) {
} else if (type.Is(Type::Function())) {
return Replace(
jsgraph()->Constant(ObjectRef(js_heap_broker(), f->function_string())));
- } else if (type.IsHeapConstant()) {
- return Replace(jsgraph()->Constant(type.AsHeapConstant()->Ref().TypeOf()));
}
return NoChange();
}
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index baee65dd4e..b49982b4e6 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -29,7 +29,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
public:
TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
JSGraph* jsgraph, JSHeapBroker* js_heap_broker);
- ~TypedOptimization();
+ ~TypedOptimization() override;
const char* reducer_name() const override { return "TypedOptimization"; }
@@ -46,10 +46,12 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceLoadField(Node* node);
Reduction ReduceNumberFloor(Node* node);
Reduction ReduceNumberRoundop(Node* node);
+ Reduction ReduceNumberSilenceNaN(Node* node);
Reduction ReduceNumberToUint8Clamped(Node* node);
Reduction ReducePhi(Node* node);
Reduction ReduceReferenceEqual(Node* node);
Reduction ReduceStringComparison(Node* node);
+ Reduction ReduceStringLength(Node* node);
Reduction ReduceSameValue(Node* node);
Reduction ReduceSelect(Node* node);
Reduction ReduceSpeculativeToNumber(Node* node);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 7627d27b08..eb357054e0 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -19,6 +19,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/builtin-function-id.h"
namespace v8 {
namespace internal {
@@ -33,14 +34,13 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(Isolate* isolate, JSHeapBroker* js_heap_broker, Flags flags,
- Graph* graph)
+Typer::Typer(JSHeapBroker* js_heap_broker, Flags flags, Graph* graph)
: flags_(flags),
graph_(graph),
decorator_(nullptr),
cache_(TypeCache::Get()),
js_heap_broker_(js_heap_broker),
- operation_typer_(isolate, js_heap_broker, zone()) {
+ operation_typer_(js_heap_broker, zone()) {
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
@@ -1114,10 +1114,6 @@ Type Typer::Visitor::TypeToBoolean(Node* node) {
return TypeUnaryOp(node, ToBoolean);
}
-Type Typer::Visitor::TypeJSToInteger(Node* node) {
- return TypeUnaryOp(node, ToInteger);
-}
-
Type Typer::Visitor::TypeJSToLength(Node* node) {
return TypeUnaryOp(node, ToLength);
}
@@ -1215,6 +1211,10 @@ Type Typer::Visitor::TypeJSCreateEmptyLiteralArray(Node* node) {
return Type::Array();
}
+Type Typer::Visitor::TypeJSCreateArrayFromIterable(Node* node) {
+ return Type::Array();
+}
+
Type Typer::Visitor::TypeJSCreateLiteralObject(Node* node) {
return Type::OtherObject();
}
@@ -1433,7 +1433,6 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
// Unary math functions.
case BuiltinFunctionId::kMathAbs:
case BuiltinFunctionId::kMathExp:
- case BuiltinFunctionId::kMathExpm1:
return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
case BuiltinFunctionId::kMathAcos:
case BuiltinFunctionId::kMathAcosh:
@@ -1443,6 +1442,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kMathAtanh:
case BuiltinFunctionId::kMathCbrt:
case BuiltinFunctionId::kMathCos:
+ case BuiltinFunctionId::kMathExpm1:
case BuiltinFunctionId::kMathFround:
case BuiltinFunctionId::kMathLog:
case BuiltinFunctionId::kMathLog1p:
@@ -1490,6 +1490,10 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
// Symbol functions.
case BuiltinFunctionId::kSymbolConstructor:
return Type::Symbol();
+ case BuiltinFunctionId::kSymbolPrototypeToString:
+ return Type::String();
+ case BuiltinFunctionId::kSymbolPrototypeValueOf:
+ return Type::Symbol();
// BigInt functions.
case BuiltinFunctionId::kBigIntConstructor:
@@ -1714,16 +1718,11 @@ Type Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsSmi:
return TypeUnaryOp(node, ObjectIsSmi);
case Runtime::kInlineIsArray:
- case Runtime::kInlineIsDate:
case Runtime::kInlineIsTypedArray:
case Runtime::kInlineIsRegExp:
return Type::Boolean();
case Runtime::kInlineCreateIterResultObject:
return Type::OtherObject();
- case Runtime::kInlineStringCharFromCode:
- return Type::String();
- case Runtime::kInlineToInteger:
- return TypeUnaryOp(node, ToInteger);
case Runtime::kInlineToLength:
return TypeUnaryOp(node, ToLength);
case Runtime::kInlineToNumber:
@@ -1855,6 +1854,8 @@ Type Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) {
return TypeBinaryOp(node, NumberLessThanOrEqualTyper);
}
+Type Typer::Visitor::TypeStringConcat(Node* node) { return Type::String(); }
+
Type Typer::Visitor::TypeStringToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
@@ -1947,18 +1948,8 @@ Type Typer::Visitor::TypePoisonIndex(Node* node) {
}
Type Typer::Visitor::TypeCheckBounds(Node* node) {
- Type index = Operand(node, 0);
- Type length = Operand(node, 1);
- DCHECK(length.Is(Type::Unsigned31()));
- if (index.Maybe(Type::MinusZero())) {
- index = Type::Union(index, typer_->cache_.kSingletonZero, zone());
- }
- index = Type::Intersect(index, Type::Integral32(), zone());
- if (index.IsNone() || length.IsNone()) return Type::None();
- double min = std::max(index.Min(), 0.0);
- double max = std::min(index.Max(), length.Max() - 1);
- if (max < min) return Type::None();
- return Type::Range(min, max, zone());
+ return typer_->operation_typer_.CheckBounds(Operand(node, 0),
+ Operand(node, 1));
}
Type Typer::Visitor::TypeCheckHeapObject(Node* node) {
@@ -2191,8 +2182,8 @@ Type Typer::Visitor::TypeNewArgumentsElements(Node* node) {
Type Typer::Visitor::TypeNewConsString(Node* node) { return Type::String(); }
-Type Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
- return Type::Boolean();
+Type Typer::Visitor::TypeDelayedStringConstant(Node* node) {
+ return Type::String();
}
Type Typer::Visitor::TypeFindOrderedHashMapEntry(Node* node) {
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 741ca481c2..f6703fe366 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -25,8 +25,7 @@ class V8_EXPORT_PRIVATE Typer {
};
typedef base::Flags<Flag> Flags;
- Typer(Isolate* isolate, JSHeapBroker* js_heap_broker, Flags flags,
- Graph* graph);
+ Typer(JSHeapBroker* js_heap_broker, Flags flags, Graph* graph);
~Typer();
void Run();
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 968d788fcc..e8954f9202 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -132,8 +132,11 @@ Type::bitset Type::BitsetLub() const {
UNREACHABLE();
}
-Type::bitset BitsetType::Lub(HeapObjectType const& type) {
- switch (type.instance_type()) {
+// TODO(neis): Once the broker mode kDisabled is gone, change the input type to
+// MapRef and get rid of the HeapObjectType class.
+template <typename MapRefLike>
+Type::bitset BitsetType::Lub(const MapRefLike& map) {
+ switch (map.instance_type()) {
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
case THIN_STRING_TYPE:
@@ -143,18 +146,18 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case EXTERNAL_STRING_TYPE:
case EXTERNAL_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case UNCACHED_EXTERNAL_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case STRING_TYPE:
case ONE_BYTE_STRING_TYPE:
return kString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
return kInternalizedString;
@@ -163,7 +166,7 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case BIGINT_TYPE:
return kBigInt;
case ODDBALL_TYPE:
- switch (type.oddball_type()) {
+ switch (map.oddball_type()) {
case OddballType::kNone:
break;
case OddballType::kHole:
@@ -189,15 +192,15 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case JS_GLOBAL_PROXY_TYPE:
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
- if (type.is_undetectable()) {
+ if (map.is_undetectable()) {
// Currently we assume that every undetectable receiver is also
// callable, which is what we need to support document.all. We
// could add another Type bit to support other use cases in the
// future if necessary.
- DCHECK(type.is_callable());
+ DCHECK(map.is_callable());
return kOtherUndetectable;
}
- if (type.is_callable()) {
+ if (map.is_callable()) {
return kOtherCallable;
}
return kOtherObject;
@@ -207,11 +210,15 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
case JS_INTL_LIST_FORMAT_TYPE:
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -235,23 +242,24 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
- case WASM_MODULE_TYPE:
+ case WASM_EXCEPTION_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_INSTANCE_TYPE:
case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
- DCHECK(!type.is_callable());
- DCHECK(!type.is_undetectable());
+ DCHECK(!map.is_callable());
+ DCHECK(!map.is_undetectable());
return kOtherObject;
case JS_BOUND_FUNCTION_TYPE:
- DCHECK(!type.is_undetectable());
+ DCHECK(!map.is_undetectable());
return kBoundFunction;
case JS_FUNCTION_TYPE:
- DCHECK(!type.is_undetectable());
+ DCHECK(!map.is_undetectable());
return kFunction;
case JS_PROXY_TYPE:
- DCHECK(!type.is_undetectable());
- if (type.is_callable()) return kCallableProxy;
+ DCHECK(!map.is_undetectable());
+ if (map.is_callable()) return kCallableProxy;
return kOtherProxy;
case MAP_TYPE:
case ALLOCATION_SITE_TYPE:
@@ -285,6 +293,7 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case FOREIGN_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -299,6 +308,7 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case PROPERTY_CELL_TYPE:
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
+ case MICROTASK_QUEUE_TYPE:
case CELL_TYPE:
case PRE_PARSED_SCOPE_DATA_TYPE:
case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
@@ -347,6 +357,9 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
UNREACHABLE();
}
+// Explicit instantiation.
+template Type::bitset BitsetType::Lub<MapRef>(const MapRef& map);
+
Type::bitset BitsetType::Lub(double value) {
DisallowHeapAllocation no_allocation;
if (IsMinusZero(value)) return kMinusZero;
@@ -1009,14 +1022,16 @@ void Type::PrintTo(std::ostream& os) const {
os << "(";
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
Type type_i = this->AsUnion()->Get(i);
- if (i > 0) os << " | " << type_i;
+ if (i > 0) os << " | ";
+ os << type_i;
}
os << ")";
} else if (this->IsTuple()) {
os << "<";
for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
Type type_i = this->AsTuple()->Element(i);
- if (i > 0) os << ", " << type_i;
+ if (i > 0) os << ", ";
+ os << type_i;
}
os << ">";
} else {
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index d27f6e3e75..27f38edae7 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -169,7 +169,8 @@ namespace compiler {
kNumber | kNullOrUndefined | kBoolean) \
V(PlainPrimitive, kNumber | kString | kBoolean | \
kNullOrUndefined) \
- V(Primitive, kSymbol | kBigInt | kPlainPrimitive) \
+ V(NonBigIntPrimitive, kSymbol | kPlainPrimitive) \
+ V(Primitive, kBigInt | kNonBigIntPrimitive) \
V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
V(Proxy, kCallableProxy | kOtherProxy) \
V(ArrayOrOtherObject, kArray | kOtherObject) \
@@ -193,7 +194,8 @@ namespace compiler {
kUndefined | kReceiver) \
V(Internal, kHole | kExternalPointer | kOtherInternal) \
V(NonInternal, kPrimitive | kReceiver) \
- V(NonNumber, kUnique | kString | kInternal) \
+ V(NonBigInt, kNonBigIntPrimitive | kReceiver) \
+ V(NonNumber, kBigInt | kUnique | kString | kInternal) \
V(Any, 0xfffffffeu)
// clang-format on
@@ -251,7 +253,10 @@ class V8_EXPORT_PRIVATE BitsetType {
static double Max(bitset);
static bitset Glb(double min, double max);
- static bitset Lub(HeapObjectType const& type);
+ static bitset Lub(HeapObjectType const& type) {
+ return Lub<HeapObjectType>(type);
+ }
+ static bitset Lub(MapRef const& map) { return Lub<MapRef>(map); }
static bitset Lub(double value);
static bitset Lub(double min, double max);
static bitset ExpandInternals(bitset bits);
@@ -273,6 +278,9 @@ class V8_EXPORT_PRIVATE BitsetType {
static const Boundary BoundariesArray[];
static inline const Boundary* Boundaries();
static inline size_t BoundariesSize();
+
+ template <typename MapRefLike>
+ static bitset Lub(MapRefLike const& map);
};
// -----------------------------------------------------------------------------
@@ -377,8 +385,10 @@ class V8_EXPORT_PRIVATE Type {
static Type Union(Type type1, Type type2, Zone* zone);
static Type Intersect(Type type1, Type type2, Zone* zone);
- static Type For(JSHeapBroker* js_heap_broker, Handle<i::Map> map) {
- HeapObjectType type = js_heap_broker->HeapObjectTypeFromMap(map);
+ static Type For(HeapObjectType const& type) {
+ return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
+ }
+ static Type For(MapRef const& type) {
return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
}
@@ -545,7 +555,7 @@ class V8_EXPORT_PRIVATE HeapConstantType : public NON_EXPORTED_BASE(TypeBase) {
static HeapConstantType* New(const HeapObjectRef& heap_ref, Zone* zone) {
DCHECK(!heap_ref.IsHeapNumber());
DCHECK_IMPLIES(heap_ref.IsString(), heap_ref.IsInternalizedString());
- BitsetType::bitset bitset = BitsetType::Lub(heap_ref.type());
+ BitsetType::bitset bitset = BitsetType::Lub(heap_ref.GetHeapObjectType());
return new (zone->New(sizeof(HeapConstantType)))
HeapConstantType(bitset, heap_ref);
}
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index 864d33bc20..af0bc99746 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -21,7 +21,7 @@ ValueNumberingReducer::ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone)
temp_zone_(temp_zone),
graph_zone_(graph_zone) {}
-ValueNumberingReducer::~ValueNumberingReducer() {}
+ValueNumberingReducer::~ValueNumberingReducer() = default;
Reduction ValueNumberingReducer::Reduce(Node* node) {
diff --git a/deps/v8/src/compiler/value-numbering-reducer.h b/deps/v8/src/compiler/value-numbering-reducer.h
index 44195468c3..489ab71d74 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.h
+++ b/deps/v8/src/compiler/value-numbering-reducer.h
@@ -17,7 +17,7 @@ class V8_EXPORT_PRIVATE ValueNumberingReducer final
: public NON_EXPORTED_BASE(Reducer) {
public:
explicit ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone);
- ~ValueNumberingReducer();
+ ~ValueNumberingReducer() override;
const char* reducer_name() const override { return "ValueNumberingReducer"; }
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 55eaf07711..913a2631dc 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -617,10 +617,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is Boolean.
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kJSToInteger:
- // Type is OrderedNumber.
- CheckTypeIs(node, Type::OrderedNumber());
- break;
case IrOpcode::kJSToLength:
CheckTypeIs(node, Type::Range(0, kMaxSafeInteger, zone));
break;
@@ -715,6 +711,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is Array.
CheckTypeIs(node, Type::Array());
break;
+ case IrOpcode::kJSCreateArrayFromIterable:
+ // Type is Array.
+ CheckTypeIs(node, Type::Array());
+ break;
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateEmptyLiteralObject:
case IrOpcode::kJSCloneObject:
@@ -730,7 +730,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kJSLoadNamed:
// Type can be anything.
CheckTypeIs(node, Type::Any());
- CHECK(NamedAccessOf(node->op()).feedback().IsValid());
break;
case IrOpcode::kJSLoadGlobal:
// Type can be anything.
@@ -745,7 +744,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kJSStoreNamed:
// Type is empty.
CheckNotTyped(node);
- CHECK(NamedAccessOf(node->op()).feedback().IsValid());
break;
case IrOpcode::kJSStoreGlobal:
// Type is empty.
@@ -1090,6 +1088,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::PlainPrimitive());
CheckTypeIs(node, Type::Number());
break;
+ case IrOpcode::kStringConcat:
+ CheckValueInputIs(node, 0, TypeCache::Get().kStringLengthType);
+ CheckValueInputIs(node, 1, Type::String());
+ CheckValueInputIs(node, 2, Type::String());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
@@ -1173,7 +1177,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kObjectIsString:
case IrOpcode::kObjectIsSymbol:
case IrOpcode::kObjectIsUndetectable:
- case IrOpcode::kArrayBufferWasNeutered:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
@@ -1243,6 +1246,9 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 2, Type::String());
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kDelayedStringConstant:
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
break;
@@ -1275,6 +1281,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeTaggedSignedToInt64:
+ break;
case IrOpcode::kChangeTaggedToInt32: {
// Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1284,6 +1292,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeTaggedToInt64:
+ break;
case IrOpcode::kChangeTaggedToUint32: {
// Unsigned32 /\ Tagged -> Unsigned32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1332,6 +1342,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeInt64ToTagged:
+ break;
case IrOpcode::kChangeUint32ToTagged: {
// Unsigned32 /\ UntaggedInt32 -> Unsigned32 /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1341,6 +1353,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeUint64ToTagged:
+ break;
case IrOpcode::kChangeFloat64ToTagged: {
// Number /\ UntaggedFloat64 -> Number /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1430,7 +1444,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Symbol());
break;
-
case IrOpcode::kConvertReceiver:
// (Any, Any) -> Receiver
CheckValueInputIs(node, 0, Type::Any());
@@ -1446,8 +1459,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedInt32Mul:
case IrOpcode::kCheckedInt32ToTaggedSigned:
+ case IrOpcode::kCheckedInt64ToInt32:
+ case IrOpcode::kCheckedInt64ToTaggedSigned:
case IrOpcode::kCheckedUint32ToInt32:
case IrOpcode::kCheckedUint32ToTaggedSigned:
+ case IrOpcode::kCheckedUint64ToInt32:
+ case IrOpcode::kCheckedUint64ToTaggedSigned:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedTaggedSignedToInt32:
case IrOpcode::kCheckedTaggedToInt32:
@@ -1689,9 +1706,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:
+ case IrOpcode::kChangeInt64ToFloat64:
case IrOpcode::kChangeUint32ToFloat64:
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
+ case IrOpcode::kChangeFloat64ToInt64:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kChangeFloat64ToUint64:
case IrOpcode::kFloat64SilenceNaN:
@@ -1747,13 +1766,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32AtomicPairXor:
case IrOpcode::kWord32AtomicPairExchange:
case IrOpcode::kWord32AtomicPairCompareExchange:
- case IrOpcode::kWord64AtomicNarrowAdd:
- case IrOpcode::kWord64AtomicNarrowSub:
- case IrOpcode::kWord64AtomicNarrowAnd:
- case IrOpcode::kWord64AtomicNarrowOr:
- case IrOpcode::kWord64AtomicNarrowXor:
- case IrOpcode::kWord64AtomicNarrowExchange:
- case IrOpcode::kWord64AtomicNarrowCompareExchange:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index f544c2eb10..4f35476dfb 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -15,7 +15,6 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/compiler.h"
-#include "src/compiler/access-builder.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -43,6 +42,7 @@
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/object-access.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
@@ -55,6 +55,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
// TODO(titzer): pull WASM_64 up to a common header.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
#define WASM_64 1
@@ -66,27 +68,34 @@ namespace compiler {
FATAL("Unsupported opcode 0x%x:%s", (opcode), \
wasm::WasmOpcodes::OpcodeName(opcode));
+MachineType assert_size(int expected_size, MachineType type) {
+ DCHECK_EQ(expected_size, ElementSizeInBytes(type.representation()));
+ return type;
+}
+
+#define WASM_INSTANCE_OBJECT_SIZE(name) \
+ (WasmInstanceObject::k##name##OffsetEnd - \
+ WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent)
+
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
- (WasmInstanceObject::k##name##Offset - kHeapObjectTag)
+ wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
#define LOAD_INSTANCE_FIELD(name, type) \
SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Load(type), instance_node_.get(), \
+ mcgraph()->machine()->Load( \
+ assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type)), \
+ instance_node_.get(), \
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \
Control()))
-#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
- SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Load(MachineType::TaggedPointer()), array_node, \
- mcgraph()->Int32Constant(FixedArrayOffsetMinusTag(index)), Effect(), \
- Control()))
+#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Load(MachineType::TaggedPointer()), base_pointer, \
+ mcgraph()->Int32Constant(byte_offset), Effect(), Control()))
-int FixedArrayOffsetMinusTag(uint32_t index) {
- auto access = AccessBuilder::ForFixedArraySlot(index);
- return access.offset - access.tag();
-}
-
-namespace {
+#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
+ LOAD_TAGGED_POINTER( \
+ array_node, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
@@ -2001,7 +2010,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
}
Node* WasmGraphBuilder::GrowMemory(Node* input) {
- SetNeedsStackCheck();
+ needs_stack_check_ = true;
WasmGrowMemoryDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -2034,16 +2043,17 @@ uint32_t WasmGraphBuilder::GetExceptionEncodedSize(
return encoded_size;
}
-Node* WasmGraphBuilder::Throw(uint32_t tag,
+Node* WasmGraphBuilder::Throw(uint32_t exception_index,
const wasm::WasmException* exception,
const Vector<Node*> values) {
- SetNeedsStackCheck();
+ needs_stack_check_ = true;
uint32_t encoded_size = GetExceptionEncodedSize(exception);
Node* create_parameters[] = {
- BuildChangeUint31ToSmi(ConvertExceptionTagToRuntimeId(tag)),
+ LoadExceptionTagFromTable(exception_index),
BuildChangeUint31ToSmi(Uint32Constant(encoded_size))};
- BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
- arraysize(create_parameters));
+ Node* except_obj =
+ BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
+ arraysize(create_parameters));
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
MachineOperatorBuilder* m = mcgraph()->machine();
@@ -2054,7 +2064,7 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
V8_FALLTHROUGH;
case wasm::kWasmI32:
- BuildEncodeException32BitValue(&index, value);
+ BuildEncodeException32BitValue(except_obj, &index, value);
break;
case wasm::kWasmF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
@@ -2063,9 +2073,9 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
Node* upper32 = graph()->NewNode(
m->TruncateInt64ToInt32(),
Binop(wasm::kExprI64ShrU, value, Int64Constant(32)));
- BuildEncodeException32BitValue(&index, upper32);
+ BuildEncodeException32BitValue(except_obj, &index, upper32);
Node* lower32 = graph()->NewNode(m->TruncateInt64ToInt32(), value);
- BuildEncodeException32BitValue(&index, lower32);
+ BuildEncodeException32BitValue(except_obj, &index, lower32);
break;
}
default:
@@ -2073,14 +2083,24 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
}
}
DCHECK_EQ(encoded_size, index);
- return BuildCallToRuntime(Runtime::kWasmThrow, nullptr, 0);
+ WasmThrowDescriptor interface_descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmThrow, RelocInfo::WASM_STUB_CALL);
+ return SetEffect(SetControl(
+ graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
+ except_obj, Effect(), Control())));
}
-void WasmGraphBuilder::BuildEncodeException32BitValue(uint32_t* index,
+void WasmGraphBuilder::BuildEncodeException32BitValue(Node* except_obj,
+ uint32_t* index,
Node* value) {
MachineOperatorBuilder* machine = mcgraph()->machine();
Node* upper_parameters[] = {
- BuildChangeUint31ToSmi(Int32Constant(*index)),
+ except_obj, BuildChangeUint31ToSmi(Int32Constant(*index)),
BuildChangeUint31ToSmi(
graph()->NewNode(machine->Word32Shr(), value, Int32Constant(16))),
};
@@ -2088,7 +2108,7 @@ void WasmGraphBuilder::BuildEncodeException32BitValue(uint32_t* index,
arraysize(upper_parameters));
++(*index);
Node* lower_parameters[] = {
- BuildChangeUint31ToSmi(Int32Constant(*index)),
+ except_obj, BuildChangeUint31ToSmi(Int32Constant(*index)),
BuildChangeUint31ToSmi(graph()->NewNode(machine->Word32And(), value,
Int32Constant(0xFFFFu))),
};
@@ -2109,26 +2129,40 @@ Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* const* values,
return value;
}
-Node* WasmGraphBuilder::Rethrow() {
- SetNeedsStackCheck();
- Node* result = BuildCallToRuntime(Runtime::kWasmThrow, nullptr, 0);
- return result;
+Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
+ needs_stack_check_ = true;
+ WasmThrowDescriptor interface_descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmThrow, RelocInfo::WASM_STUB_CALL);
+ return SetEffect(SetControl(
+ graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
+ except_obj, Effect(), Control())));
}
-Node* WasmGraphBuilder::ConvertExceptionTagToRuntimeId(uint32_t tag) {
- // TODO(kschimpf): Handle exceptions from different modules, when they are
- // linked at runtime.
- return Uint32Constant(tag);
+Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
+ Node* expected_tag) {
+ MachineOperatorBuilder* machine = mcgraph()->machine();
+ return graph()->NewNode(machine->WordEqual(), caught_tag, expected_tag);
}
-Node* WasmGraphBuilder::GetExceptionRuntimeId() {
- SetNeedsStackCheck();
- return BuildChangeSmiToInt32(
- BuildCallToRuntime(Runtime::kWasmGetExceptionRuntimeId, nullptr, 0));
+Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
+ Node* exceptions_table =
+ LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
+ Node* tag = LOAD_FIXED_ARRAY_SLOT(exceptions_table, exception_index);
+ return tag;
+}
+
+Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
+ needs_stack_check_ = true;
+ return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1);
}
Node** WasmGraphBuilder::GetExceptionValues(
- const wasm::WasmException* except_decl) {
+ Node* except_obj, const wasm::WasmException* except_decl) {
// TODO(kschimpf): We need to move this code to the function-body-decoder.cc
// in order to build landing-pad (exception) edges in case the runtime
// call causes an exception.
@@ -2137,7 +2171,8 @@ Node** WasmGraphBuilder::GetExceptionValues(
uint32_t encoded_size = GetExceptionEncodedSize(except_decl);
Node** values = Buffer(encoded_size);
for (uint32_t i = 0; i < encoded_size; ++i) {
- Node* parameters[] = {BuildChangeUint31ToSmi(Uint32Constant(i))};
+ Node* parameters[] = {except_obj,
+ BuildChangeUint31ToSmi(Uint32Constant(i))};
values[i] = BuildCallToRuntime(Runtime::kWasmExceptionGetElement,
parameters, arraysize(parameters));
}
@@ -2517,7 +2552,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
DCHECK_NOT_NULL(instance_node_);
instance_node = instance_node_.get();
}
- SetNeedsStackCheck();
+ needs_stack_check_ = true;
const size_t params = sig->parameter_count();
const size_t extra = 3; // instance_node, effect, and control.
const size_t count = 1 + params + extra;
@@ -2557,10 +2592,10 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
return call;
}
-Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
- wasm::WasmCodePosition position,
- int func_index) {
+Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
+ Node*** rets,
+ wasm::WasmCodePosition position,
+ int func_index) {
// Load the instance from the imported_instances array at a known offset.
Node* imported_instances = LOAD_INSTANCE_FIELD(ImportedFunctionInstances,
MachineType::TaggedPointer());
@@ -2578,17 +2613,18 @@ Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
}
-Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
- wasm::WasmCodePosition position,
- Node* func_index) {
+Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
+ Node*** rets,
+ wasm::WasmCodePosition position,
+ Node* func_index) {
// Load the instance from the imported_instances array.
Node* imported_instances = LOAD_INSTANCE_FIELD(ImportedFunctionInstances,
MachineType::TaggedPointer());
// Access fixed array at {header_size - tag + func_index * kPointerSize}.
- Node* imported_instances_data =
- graph()->NewNode(mcgraph()->machine()->IntAdd(), imported_instances,
- mcgraph()->IntPtrConstant(FixedArrayOffsetMinusTag(0)));
+ Node* imported_instances_data = graph()->NewNode(
+ mcgraph()->machine()->IntAdd(), imported_instances,
+ mcgraph()->IntPtrConstant(
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
Node* func_index_times_pointersize = graph()->NewNode(
mcgraph()->machine()->IntMul(), Uint32ToUintptr(func_index),
mcgraph()->Int32Constant(kPointerSize));
@@ -2616,7 +2652,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
if (env_ && index < env_->module->num_imported_functions) {
// Call to an imported function.
- return BuildImportWasmCall(sig, args, rets, position, index);
+ return BuildImportCall(sig, args, rets, position, index);
}
// A direct call to a wasm function defined in this module.
@@ -2687,11 +2723,11 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
SetEffect(graph()->NewNode(machine->Load(MachineType::Pointer()),
ift_targets, scaled_key, Effect(), Control()));
- auto access = AccessBuilder::ForFixedArrayElement();
Node* target_instance = SetEffect(graph()->NewNode(
machine->Load(MachineType::TaggedPointer()),
graph()->NewNode(machine->IntAdd(), ift_instances, scaled_key),
- Int32Constant(access.header_size - access.tag()), Effect(), Control()));
+ Int32Constant(wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)),
+ Effect(), Control()));
args[0] = target;
@@ -2792,11 +2828,8 @@ void WasmGraphBuilder::InitInstanceCache(
if (untrusted_code_mitigations_) {
// Load the memory mask.
- instance_cache->mem_mask = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::UintPtr()),
- instance_node_.get(),
- mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryMask)),
- Effect(), Control()));
+ instance_cache->mem_mask =
+ LOAD_INSTANCE_FIELD(MemoryMask, MachineType::UintPtr());
} else {
// Explicitly set to nullptr to ensure a SEGV when we try to use it.
instance_cache->mem_mask = nullptr;
@@ -2810,9 +2843,9 @@ void WasmGraphBuilder::PrepareInstanceCacheForLoop(
instance_cache->field, control);
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
- INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
+ INTRODUCE_PHI(mem_size, MachineType::PointerRepresentation());
if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
+ INTRODUCE_PHI(mem_mask, MachineType::PointerRepresentation());
}
#undef INTRODUCE_PHI
@@ -2958,6 +2991,14 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
return result;
}
+Node* WasmGraphBuilder::BuildLoadBuiltinFromInstance(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ Node* roots =
+ LOAD_INSTANCE_FIELD(RootsArrayAddress, MachineType::TaggedPointer());
+ return LOAD_TAGGED_POINTER(
+ roots, Heap::roots_to_builtins_offset() + builtin_index * kPointerSize);
+}
+
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded js_context.
Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
@@ -3226,7 +3267,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
}
- if (FLAG_wasm_trace_memory) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, memtype.representation(), index, offset,
position);
}
@@ -3271,7 +3312,7 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
SetEffect(store);
- if (FLAG_wasm_trace_memory) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, mem_rep, index, offset, position);
}
@@ -4346,8 +4387,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildLoadFunctionDataFromExportedFunction(Node* closure) {
Node* shared = SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->Int32Constant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->Int32Constant(
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()),
Effect(), Control()));
return SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), shared,
@@ -4437,8 +4478,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Load function index from {WasmExportedFunctionData}.
Node* function_index =
BuildLoadFunctionIndexFromExportedFunctionData(function_data);
- BuildImportWasmCall(sig_, args, &rets, wasm::kNoCodePosition,
- function_index);
+ BuildImportCall(sig_, args, &rets, wasm::kNoCodePosition, function_index);
} else {
// Call to a wasm function defined in this module.
// The call target is the jump table slot for that function.
@@ -4462,9 +4502,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Return(jsval);
}
- bool BuildWasmToJSWrapper(Handle<JSReceiver> target, int index) {
- DCHECK(target->IsCallable());
-
+ bool BuildWasmImportCallWrapper(WasmImportCallKind kind, int func_index) {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
@@ -4473,16 +4511,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Create the instance_node from the passed parameter.
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
- Node* callables_node = LOAD_INSTANCE_FIELD(ImportedFunctionCallables,
- MachineType::TaggedPointer());
- Node* callable_node = LOAD_FIXED_ARRAY_SLOT(callables_node, index);
- Node* undefined_node =
- LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
Node* native_context =
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer());
- if (!wasm::IsJSCompatibleSignature(sig_)) {
- // Throw a TypeError.
+ if (kind == WasmImportCallKind::kRuntimeTypeError) {
+ // =======================================================================
+ // === Runtime TypeError =================================================
+ // =======================================================================
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
native_context, nullptr, 0);
// We don't need to return a value here, as the runtime call will not
@@ -4491,114 +4526,156 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return false;
}
- CallDescriptor* call_descriptor;
- Node** args = Buffer(wasm_count + 9);
+ Node* callables_node = LOAD_INSTANCE_FIELD(ImportedFunctionCallables,
+ MachineType::TaggedPointer());
+ Node* callable_node = LOAD_FIXED_ARRAY_SLOT(callables_node, func_index);
+ Node* undefined_node =
+ LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
+
Node* call = nullptr;
+ bool sloppy_receiver = true;
+
+ BuildModifyThreadInWasmFlag(false); // exiting WASM via call.
+
+ switch (kind) {
+ // =======================================================================
+ // === JS Functions with matching arity ==================================
+ // =======================================================================
+ case WasmImportCallKind::kJSFunctionArityMatch:
+ sloppy_receiver = false;
+ V8_FALLTHROUGH; // fallthru
+ case WasmImportCallKind::kJSFunctionArityMatchSloppy: {
+ Node** args = Buffer(wasm_count + 9);
+ int pos = 0;
+ Node* function_context = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::TaggedPointer()),
+ callable_node,
+ mcgraph()->Int32Constant(
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction()),
+ Effect(), Control()));
+ args[pos++] = callable_node; // target callable.
+ // Receiver.
+ if (sloppy_receiver) {
+ Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
+ native_context, Context::GLOBAL_PROXY_INDEX);
+ args[pos++] = global_proxy;
+ } else {
+ args[pos++] = undefined_node;
+ }
- BuildModifyThreadInWasmFlag(false);
+ auto call_descriptor = Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
- if (target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(target);
- FieldAccess field_access = AccessBuilder::ForJSFunctionContext();
- Node* function_context = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::TaggedPointer()),
- callable_node,
- mcgraph()->Int32Constant(field_access.offset - field_access.tag()),
- Effect(), Control()));
-
- if (!IsClassConstructor(function->shared()->kind())) {
- if (function->shared()->internal_formal_parameter_count() ==
- wasm_count) {
- int pos = 0;
- args[pos++] = callable_node; // target callable.
- // Receiver.
- if (is_sloppy(function->shared()->language_mode()) &&
- !function->shared()->native()) {
- Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
- native_context, Context::GLOBAL_PROXY_INDEX);
- args[pos++] = global_proxy;
- } else {
- args[pos++] = undefined_node;
- }
-
- call_descriptor = Linkage::GetJSCallDescriptor(
- graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
-
- // Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
-
- args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
- args[pos++] = function_context;
- args[pos++] = Effect();
- args[pos++] = Control();
-
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- pos, args);
- } else if (function->shared()->internal_formal_parameter_count() >= 0) {
- int pos = 0;
- args[pos++] = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmArgumentsAdaptor, RelocInfo::WASM_STUB_CALL);
- args[pos++] = callable_node; // target callable
- args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
- args[pos++] = mcgraph()->Int32Constant(
- function->shared()->internal_formal_parameter_count());
- // Receiver.
- if (is_sloppy(function->shared()->language_mode()) &&
- !function->shared()->native()) {
- Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
- native_context, Context::GLOBAL_PROXY_INDEX);
- args[pos++] = global_proxy;
- } else {
- args[pos++] = undefined_node;
- }
-
- call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), ArgumentAdaptorDescriptor{}, 1 + wasm_count,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
-
- // Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
- args[pos++] = function_context;
- args[pos++] = Effect();
- args[pos++] = Control();
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- pos, args);
- }
+ // Convert wasm numbers to JS values.
+ pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+
+ args[pos++] = undefined_node; // new target
+ args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = function_context;
+ args[pos++] = Effect();
+ args[pos++] = Control();
+
+ call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
+ args);
+ break;
}
- }
+ // =======================================================================
+ // === JS Functions with arguments adapter ===============================
+ // =======================================================================
+ case WasmImportCallKind::kJSFunctionArityMismatch:
+ sloppy_receiver = false;
+ V8_FALLTHROUGH; // fallthru
+ case WasmImportCallKind::kJSFunctionArityMismatchSloppy: {
+ Node** args = Buffer(wasm_count + 9);
+ int pos = 0;
+ Node* function_context = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::TaggedPointer()),
+ callable_node,
+ mcgraph()->Int32Constant(
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction()),
+ Effect(), Control()));
+ args[pos++] =
+ BuildLoadBuiltinFromInstance(Builtins::kArgumentsAdaptorTrampoline);
+ args[pos++] = callable_node; // target callable
+ args[pos++] = undefined_node; // new target
+ args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+
+ // Load shared function info, and then the formal parameter count.
+ Node* shared_function_info = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::TaggedPointer()),
+ callable_node,
+ mcgraph()->Int32Constant(
+ wasm::ObjectAccess::
+ SharedFunctionInfoOffsetInTaggedJSFunction()),
+ Effect(), Control()));
+ Node* formal_param_count = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Uint16()),
+ shared_function_info,
+ mcgraph()->Int32Constant(
+ wasm::ObjectAccess::
+ FormalParameterCountOffsetInSharedFunctionInfo()),
+ Effect(), Control()));
+ args[pos++] = formal_param_count;
+
+ // Receiver.
+ if (sloppy_receiver) {
+ Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
+ native_context, Context::GLOBAL_PROXY_INDEX);
+ args[pos++] = global_proxy;
+ } else {
+ args[pos++] = undefined_node;
+ }
- // We cannot call the target directly, we have to use the Call builtin.
- if (!call) {
- int pos = 0;
- args[pos++] = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmCallJavaScript, RelocInfo::WASM_STUB_CALL);
- args[pos++] = callable_node;
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
- args[pos++] = undefined_node; // receiver
-
- call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
-
- // Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
-
- // The native_context is sufficient here, because all kind of callables
- // which depend on the context provide their own context. The context here
- // is only needed if the target is a constructor to throw a TypeError, if
- // the target is a native function, or if the target is a callable
- // JSObject, which can only be constructed by the runtime.
- args[pos++] = native_context;
- args[pos++] = Effect();
- args[pos++] = Control();
-
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
+
+ // Convert wasm numbers to JS values.
+ pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+ args[pos++] = function_context;
+ args[pos++] = Effect();
+ args[pos++] = Control();
+ call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
+ args);
+ break;
+ }
+ // =======================================================================
+ // === General case of unknown callable ==================================
+ // =======================================================================
+ case WasmImportCallKind::kUseCallBuiltin: {
+ Node** args = Buffer(wasm_count + 9);
+ int pos = 0;
+ args[pos++] = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmCallJavaScript, RelocInfo::WASM_STUB_CALL);
+ args[pos++] = callable_node;
+ args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // receiver
+
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ StubCallMode::kCallWasmRuntimeStub);
+
+ // Convert wasm numbers to JS values.
+ pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+
+ // The native_context is sufficient here, because all kind of callables
+ // which depend on the context provide their own context. The context
+ // here is only needed if the target is a constructor to throw a
+ // TypeError, if the target is a native function, or if the target is a
+ // callable JSObject, which can only be constructed by the runtime.
+ args[pos++] = native_context;
+ args[pos++] = Effect();
+ args[pos++] = Control();
+
+ call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
+ args);
+ break;
+ }
+ default:
+ UNREACHABLE();
}
+ DCHECK_NOT_NULL(call);
SetEffect(call);
SetSourcePosition(call, 0);
@@ -4608,7 +4685,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
? mcgraph()->Int32Constant(0)
: FromJS(call, native_context, sig_->GetReturn());
- BuildModifyThreadInWasmFlag(true);
+ BuildModifyThreadInWasmFlag(true); // reentering WASM upon return.
Return(val);
return true;
@@ -4661,9 +4738,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// We are passing the raw arg_buffer here. To the GC and other parts, it
// looks like a Smi (lowest bit not set). In the runtime function however,
// don't call Smi::value on it, but just cast it to a byte pointer.
- Node* parameters[] = {
- jsgraph()->SmiConstant(func_index), arg_buffer,
- };
+ Node* parameters[] = {jsgraph()->SmiConstant(func_index), arg_buffer};
BuildCallToRuntime(Runtime::kWasmRunInterpreter, parameters,
arraysize(parameters));
@@ -4689,12 +4764,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5)));
// Create parameter nodes (offset by 1 for the receiver parameter).
- Node* foreign_code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
- MachineOperatorBuilder* machine = mcgraph()->machine();
- Node* code_obj = graph()->NewNode(
- machine->Load(MachineType::Pointer()), foreign_code_obj,
- Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
- Effect(), Control());
+ Node* code_entry = Param(CWasmEntryParameters::kCodeEntry + 1);
Node* instance_node = Param(CWasmEntryParameters::kWasmInstance + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
@@ -4703,7 +4773,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node** args = Buffer(arg_count);
int pos = 0;
- args[pos++] = code_obj;
+ args[pos++] = code_entry;
args[pos++] = instance_node;
int offset = 0;
@@ -4759,16 +4829,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
StubCallMode stub_mode_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
};
+
+void AppendSignature(char* buffer, size_t max_name_len,
+ wasm::FunctionSig* sig) {
+ size_t name_len = strlen(buffer);
+ auto append_name_char = [&](char c) {
+ if (name_len + 1 < max_name_len) buffer[name_len++] = c;
+ };
+ for (wasm::ValueType t : sig->parameters()) {
+ append_name_char(wasm::ValueTypes::ShortNameOf(t));
+ }
+ append_name_char(':');
+ for (wasm::ValueType t : sig->returns()) {
+ append_name_char(wasm::ValueTypes::ShortNameOf(t));
+ }
+ buffer[name_len] = '\0';
+}
+
} // namespace
-MaybeHandle<Code> CompileJSToWasmWrapper(
- Isolate* isolate, const wasm::NativeModule* native_module,
- wasm::FunctionSig* sig, bool is_import,
- wasm::UseTrapHandler use_trap_handler) {
+MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+ wasm::FunctionSig* sig,
+ bool is_import) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"CompileJSToWasmWrapper");
- const wasm::WasmModule* module = native_module->module();
-
//----------------------------------------------------------------------------
// Create the Graph.
//----------------------------------------------------------------------------
@@ -4784,7 +4868,8 @@ MaybeHandle<Code> CompileJSToWasmWrapper(
Node* control = nullptr;
Node* effect = nullptr;
- wasm::ModuleEnv env(module, use_trap_handler, wasm::kRuntimeExceptionSupport);
+ wasm::ModuleEnv env(nullptr, wasm::kNoTrapHandler,
+ wasm::kRuntimeExceptionSupport);
WasmWrapperGraphBuilder builder(&zone, &env, &jsgraph, sig, nullptr,
StubCallMode::kCallOnHeapBuiltin);
builder.set_control_ptr(&control);
@@ -4794,28 +4879,18 @@ MaybeHandle<Code> CompileJSToWasmWrapper(
//----------------------------------------------------------------------------
// Run the compilation pipeline.
//----------------------------------------------------------------------------
-#ifdef DEBUG
- EmbeddedVector<char, 32> func_name;
- static unsigned id = 0;
- func_name.Truncate(SNPrintF(func_name, "js-to-wasm#%d", id++));
-#else
- Vector<const char> func_name = CStrVector("js-to-wasm");
-#endif
-
- OptimizedCompilationInfo info(func_name, &zone, Code::JS_TO_WASM_FUNCTION);
-
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- Graph after change lowering -- " << std::endl
- << AsRPO(graph);
- }
+ static constexpr size_t kMaxNameLen = 128;
+ char debug_name[kMaxNameLen] = "js_to_wasm:";
+ AppendSignature(debug_name, kMaxNameLen, sig);
// Schedule and compile to machine code.
int params = static_cast<int>(sig->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
- &info, isolate, incoming, &graph, WasmAssemblerOptions());
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
+ isolate, incoming, &graph, Code::JS_TO_WASM_FUNCTION, debug_name,
+ WasmAssemblerOptions());
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
return maybe_code;
@@ -4824,24 +4899,67 @@ MaybeHandle<Code> CompileJSToWasmWrapper(
if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
- code->Disassemble(func_name.start(), os);
+ code->Disassemble(debug_name, os);
}
#endif
if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
- "%.*s", func_name.length(), func_name.start());
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code, "%s",
+ debug_name);
}
return code;
}
-MaybeHandle<Code> CompileWasmToJSWrapper(
- Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
+WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
+ wasm::FunctionSig* expected_sig) {
+ if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
+ auto imported_function = WasmExportedFunction::cast(*target);
+ wasm::FunctionSig* imported_sig =
+ imported_function->instance()
+ ->module()
+ ->functions[imported_function->function_index()]
+ .sig;
+ if (*imported_sig != *expected_sig) {
+ return WasmImportCallKind::kLinkError;
+ }
+ return WasmImportCallKind::kWasmToWasm;
+ }
+ // Assuming we are calling to JS, check whether this would be a runtime error.
+ if (!wasm::IsJSCompatibleSignature(expected_sig)) {
+ return WasmImportCallKind::kRuntimeTypeError;
+ }
+ // For JavaScript calls, determine whether the target has an arity match
+ // and whether it has a sloppy receiver.
+ if (target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+ if (IsClassConstructor(function->shared()->kind())) {
+ // Class constructor will throw anyway.
+ return WasmImportCallKind::kUseCallBuiltin;
+ }
+ bool sloppy = is_sloppy(function->shared()->language_mode()) &&
+ !function->shared()->native();
+ if (function->shared()->internal_formal_parameter_count() ==
+ expected_sig->parameter_count()) {
+ return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMatch;
+ }
+ return sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMismatch;
+ }
+ // Unknown case. Use the call builtin.
+ return WasmImportCallKind::kUseCallBuiltin;
+}
+
+MaybeHandle<Code> CompileWasmImportCallWrapper(
+ Isolate* isolate, WasmImportCallKind kind, wasm::FunctionSig* sig,
uint32_t index, wasm::ModuleOrigin origin,
wasm::UseTrapHandler use_trap_handler) {
+ DCHECK_NE(WasmImportCallKind::kLinkError, kind);
+ DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
+
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileWasmToJSWrapper");
+ "CompileWasmImportCallWrapper");
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -4869,31 +4987,19 @@ MaybeHandle<Code> CompileWasmToJSWrapper(
StubCallMode::kCallWasmRuntimeStub);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildWasmToJSWrapper(target, index);
+ builder.BuildWasmImportCallWrapper(kind, index);
-#ifdef DEBUG
EmbeddedVector<char, 32> func_name;
- static unsigned id = 0;
- func_name.Truncate(SNPrintF(func_name, "wasm-to-js#%d", id++));
-#else
- Vector<const char> func_name = CStrVector("wasm-to-js");
-#endif
-
- OptimizedCompilationInfo info(func_name, &zone, Code::WASM_TO_JS_FUNCTION);
-
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- Graph after change lowering -- " << std::endl
- << AsRPO(graph);
- }
+ func_name.Truncate(SNPrintF(func_name, "wasm-to-js#%d", index));
// Schedule and compile to machine code.
CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
- &info, isolate, incoming, &graph, AssemblerOptions::Default(isolate),
- nullptr, source_position_table);
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
+ isolate, incoming, &graph, Code::WASM_TO_JS_FUNCTION, func_name.start(),
+ AssemblerOptions::Default(isolate), source_position_table);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
return maybe_code;
@@ -4943,24 +5049,14 @@ MaybeHandle<Code> CompileWasmInterpreterEntry(Isolate* isolate,
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
-#ifdef DEBUG
+
EmbeddedVector<char, 32> func_name;
func_name.Truncate(
SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
-#else
- Vector<const char> func_name = CStrVector("wasm-interpreter-entry");
-#endif
-
- OptimizedCompilationInfo info(func_name, &zone, Code::WASM_INTERPRETER_ENTRY);
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- Wasm interpreter entry graph -- " << std::endl
- << AsRPO(graph);
- }
-
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
- &info, isolate, incoming, &graph, AssemblerOptions::Default(isolate),
- nullptr);
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
+ isolate, incoming, &graph, Code::WASM_INTERPRETER_ENTRY,
+ func_name.start(), AssemblerOptions::Default(isolate));
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
return maybe_code;
@@ -5008,28 +5104,11 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
// Build a name in the form "c-wasm-entry:<params>:<returns>".
static constexpr size_t kMaxNameLen = 128;
char debug_name[kMaxNameLen] = "c-wasm-entry:";
- size_t name_len = strlen(debug_name);
- auto append_name_char = [&](char c) {
- if (name_len + 1 < kMaxNameLen) debug_name[name_len++] = c;
- };
- for (wasm::ValueType t : sig->parameters()) {
- append_name_char(wasm::ValueTypes::ShortNameOf(t));
- }
- append_name_char(':');
- for (wasm::ValueType t : sig->returns()) {
- append_name_char(wasm::ValueTypes::ShortNameOf(t));
- }
- debug_name[name_len] = '\0';
- Vector<const char> debug_name_vec(debug_name, name_len);
-
- OptimizedCompilationInfo info(debug_name_vec, &zone, Code::C_WASM_ENTRY);
-
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- C Wasm entry graph -- " << std::endl << AsRPO(graph);
- }
+ AppendSignature(debug_name, kMaxNameLen, sig);
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForTesting(
- &info, isolate, incoming, &graph, AssemblerOptions::Default(isolate));
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
+ isolate, incoming, &graph, Code::C_WASM_ENTRY, debug_name,
+ AssemblerOptions::Default(isolate));
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
return maybe_code;
@@ -5100,23 +5179,17 @@ SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
}
namespace {
-Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
- if (!name.is_empty()) {
- return name;
- }
-#ifdef DEBUG
- constexpr int kBufferLength = 15;
+Vector<const char> GetDebugName(Zone* zone, int index) {
+ // TODO(herhut): Use name from module if available.
+ constexpr int kBufferLength = 24;
EmbeddedVector<char, kBufferLength> name_vector;
- int name_len = SNPrintF(name_vector, "wasm#%d", index);
+ int name_len = SNPrintF(name_vector, "wasm-function#%d", index);
DCHECK(name_len > 0 && name_len < name_vector.length());
char* index_name = zone->NewArray<char>(name_len);
memcpy(index_name, name_vector.start(), name_len);
return Vector<const char>(index_name, name_len);
-#else
- return {};
-#endif
}
} // namespace
@@ -5142,13 +5215,17 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation(
Zone compilation_zone(wasm_unit_->wasm_engine_->allocator(), ZONE_NAME);
OptimizedCompilationInfo info(
- GetDebugName(&compilation_zone, wasm_unit_->func_name_,
- wasm_unit_->func_index_),
+ GetDebugName(&compilation_zone, wasm_unit_->func_index_),
&compilation_zone, Code::WASM_FUNCTION);
if (wasm_unit_->env_->runtime_exception_support) {
info.SetWasmRuntimeExceptionSupport();
}
+ if (info.trace_turbo_json_enabled()) {
+ TurboCfgFile tcf;
+ tcf << AsC1VCompilation(&info);
+ }
+
NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
? new (&graph_zone)
NodeOriginTable(mcgraph->graph())
@@ -5209,16 +5286,22 @@ wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
wasm::ErrorThrower* thrower) {
if (!ok_) {
if (graph_construction_result_.failed()) {
- // Add the function as another context for the exception.
+ // Add the function as another context for the exception. This is
+ // user-visible, so use official format.
EmbeddedVector<char, 128> message;
- if (wasm_unit_->func_name_.start() == nullptr) {
- SNPrintF(message, "Compiling wasm function #%d failed",
- wasm_unit_->func_index_);
+ wasm::ModuleWireBytes wire_bytes(
+ wasm_unit_->native_module()->wire_bytes());
+ wasm::WireBytesRef name_ref =
+ wasm_unit_->native_module()->module()->LookupFunctionName(
+ wire_bytes, wasm_unit_->func_index_);
+ if (name_ref.is_set()) {
+ wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ SNPrintF(message, "Compiling wasm function \"%.*s\" failed",
+ name.length(), name.start());
} else {
- wasm::TruncatedUserString<> trunc_name(wasm_unit_->func_name_);
- SNPrintF(message, "Compiling wasm function #%d:%.*s failed",
- wasm_unit_->func_index_, trunc_name.length(),
- trunc_name.start());
+ SNPrintF(message,
+ "Compiling wasm function \"wasm-function[%d]\" failed",
+ wasm_unit_->func_index_);
}
thrower->CompileFailed(message.start(), graph_construction_result_);
}
@@ -5416,8 +5499,10 @@ AssemblerOptions WasmAssemblerOptions() {
#undef WASM_64
#undef FATAL_UNSUPPORTED_OPCODE
+#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_INSTANCE_FIELD
+#undef LOAD_TAGGED_POINTER
#undef LOAD_FIXED_ARRAY_SLOT
} // namespace compiler
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 775c817242..f1f341c9af 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -68,31 +68,43 @@ class TurbofanWasmCompilationUnit {
DISALLOW_COPY_AND_ASSIGN(TurbofanWasmCompilationUnit);
};
-// Wraps a JS function, producing a code object that can be called from wasm.
-MaybeHandle<Code> CompileWasmToJSWrapper(Isolate*, Handle<JSReceiver> target,
- wasm::FunctionSig*, uint32_t index,
- wasm::ModuleOrigin,
- wasm::UseTrapHandler);
+// Calls to WASM imports are handled in several different ways, depending
+// on the type of the target function/callable and whether the signature
+// matches the argument arity.
+enum class WasmImportCallKind {
+ kLinkError, // static WASM->WASM type error
+ kRuntimeTypeError, // runtime WASM->JS type error
+ kWasmToWasm, // fast WASM->WASM call
+ kJSFunctionArityMatch, // fast WASM->JS call
+ kJSFunctionArityMatchSloppy, // fast WASM->JS call, sloppy receiver
+ kJSFunctionArityMismatch, // WASM->JS, needs adapter frame
+ kJSFunctionArityMismatchSloppy, // WASM->JS, needs adapter frame, sloppy
+ kUseCallBuiltin // everything else
+};
+
+WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> callable,
+ wasm::FunctionSig* sig);
+
+// Compiles an import call wrapper, which allows WASM to call imports.
+MaybeHandle<Code> CompileWasmImportCallWrapper(Isolate*, WasmImportCallKind,
+ wasm::FunctionSig*,
+ uint32_t index,
+ wasm::ModuleOrigin,
+ wasm::UseTrapHandler);
// Creates a code object calling a wasm function with the given signature,
// callable from JS.
-// TODO(clemensh): Remove the {UseTrapHandler} parameter to make js-to-wasm
-// wrappers sharable across instances.
-V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(
- Isolate*, const wasm::NativeModule*, wasm::FunctionSig*, bool is_import,
- wasm::UseTrapHandler);
+V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(Isolate*,
+ wasm::FunctionSig*,
+ bool is_import);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
MaybeHandle<Code> CompileWasmInterpreterEntry(Isolate*, uint32_t func_index,
wasm::FunctionSig*);
-// Helper function to get the offset into a fixed array for a given {index}.
-// TODO(titzer): access-builder.h is not accessible outside compiler. Move?
-int FixedArrayOffsetMinusTag(uint32_t index);
-
enum CWasmEntryParameters {
- kCodeObject,
+ kCodeEntry,
kWasmInstance,
kArgumentsBuffer,
// marker:
@@ -164,12 +176,14 @@ class WasmGraphBuilder {
Node* Unop(wasm::WasmOpcode opcode, Node* input,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
Node* GrowMemory(Node* input);
- Node* Throw(uint32_t tag, const wasm::WasmException* exception,
+ Node* Throw(uint32_t exception_index, const wasm::WasmException* exception,
const Vector<Node*> values);
- Node* Rethrow();
- Node* ConvertExceptionTagToRuntimeId(uint32_t tag);
- Node* GetExceptionRuntimeId();
- Node** GetExceptionValues(const wasm::WasmException* except_decl);
+ Node* Rethrow(Node* except_obj);
+ Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag);
+ Node* LoadExceptionTagFromTable(uint32_t exception_index);
+ Node* GetExceptionTag(Node* except_obj);
+ Node** GetExceptionValues(Node* except_obj,
+ const wasm::WasmException* except_decl);
bool IsPhiWithMerge(Node* phi, Node* merge);
bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
void AppendToMerge(Node* merge, Node* from);
@@ -370,10 +384,10 @@ class WasmGraphBuilder {
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
wasm::WasmCodePosition position, Node* instance_node,
UseRetpoline use_retpoline);
- Node* BuildImportWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, int func_index);
- Node* BuildImportWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, Node* func_index);
+ Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
+ wasm::WasmCodePosition position, int func_index);
+ Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
+ wasm::WasmCodePosition position, Node* func_index);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
@@ -436,8 +450,6 @@ class WasmGraphBuilder {
Node* BuildSmiShiftBitsConstant();
Node* BuildChangeSmiToInt32(Node* value);
- Node* BuildLoadInstanceFromExportedFunction(Node* closure);
-
// Asm.js specific functionality.
Node* BuildI32AsmjsSConvertF32(Node* input);
Node* BuildI32AsmjsSConvertF64(Node* input);
@@ -451,7 +463,8 @@ class WasmGraphBuilder {
Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
uint32_t GetExceptionEncodedSize(const wasm::WasmException* exception) const;
- void BuildEncodeException32BitValue(uint32_t* index, Node* value);
+ void BuildEncodeException32BitValue(Node* except_obj, uint32_t* index,
+ Node* value);
Node* BuildDecodeException32BitValue(Node* const* values, uint32_t* index);
Node** Realloc(Node* const* buffer, size_t old_count, size_t new_count) {
@@ -460,7 +473,7 @@ class WasmGraphBuilder {
return buf;
}
- void SetNeedsStackCheck() { needs_stack_check_ = true; }
+ Node* BuildLoadBuiltinFromInstance(int builtin_index);
//-----------------------------------------------------------------------
// Operations involving the CEntry, a dependency we want to remove
@@ -471,10 +484,6 @@ class WasmGraphBuilder {
Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, Node* js_context,
Node** parameters, int parameter_count);
- Node* BuildCallToRuntimeWithContextFromJS(Runtime::FunctionId f,
- Node* js_context,
- Node* const* parameters,
- int parameter_count);
TrapId GetTrapIdForTrap(wasm::TrapReason reason);
};
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 2ccb56907d..178d2b33b9 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -353,7 +353,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} // namespace
-
#define ASSEMBLE_UNOP(asm_instr) \
do { \
if (instr->Output()->IsRegister()) { \
@@ -361,7 +360,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} else { \
__ asm_instr(i.OutputOperand()); \
} \
- } while (0)
+ } while (false)
#define ASSEMBLE_BINOP(asm_instr) \
do { \
@@ -384,7 +383,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} \
} \
- } while (0)
+ } while (false)
#define ASSEMBLE_COMPARE(asm_instr) \
do { \
@@ -411,7 +410,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} \
} \
- } while (0)
+ } while (false)
#define ASSEMBLE_MULT(asm_instr) \
do { \
@@ -430,8 +429,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
} \
} \
- } while (0)
-
+ } while (false)
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
@@ -448,8 +446,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ asm_instr##_cl(i.OutputOperand()); \
} \
} \
- } while (0)
-
+ } while (false)
#define ASSEMBLE_MOVX(asm_instr) \
do { \
@@ -460,7 +457,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} else { \
__ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
} \
- } while (0)
+ } while (false)
#define ASSEMBLE_SSE_BINOP(asm_instr) \
do { \
@@ -469,7 +466,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} else { \
__ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
} \
- } while (0)
+ } while (false)
#define ASSEMBLE_SSE_UNOP(asm_instr) \
do { \
@@ -478,7 +475,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} else { \
__ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
} \
- } while (0)
+ } while (false)
#define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \
@@ -490,7 +487,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputOperand(1)); \
} \
- } while (0)
+ } while (false)
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
@@ -2140,6 +2137,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2), select);
break;
}
+ case kX64F32x4SConvertI32x4: {
+ __ cvtdq2ps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4UConvertI32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ DCHECK_NE(i.OutputSimd128Register(), kScratchDoubleReg);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg); // zeros
+ __ pblendw(kScratchDoubleReg, dst, 0x55); // get lo 16 bits
+ __ psubd(dst, kScratchDoubleReg); // get hi 16 bits
+ __ cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // convert lo exactly
+ __ psrld(dst, 1); // divide by 2 to get in unsigned range
+ __ cvtdq2ps(dst, dst); // convert hi exactly
+ __ addps(dst, dst); // double hi, exactly
+ __ addps(dst, kScratchDoubleReg); // add hi and lo, may round.
+ break;
+ }
case kX64F32x4Abs: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
@@ -2248,6 +2264,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I32x4SConvertF32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ // NAN->0
+ __ movaps(kScratchDoubleReg, dst);
+ __ cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
+ __ pand(dst, kScratchDoubleReg);
+ // Set top bit if >= 0 (but not -0.0!)
+ __ pxor(kScratchDoubleReg, dst);
+ // Convert
+ __ cvttps2dq(dst, dst);
+ // Set top bit if >=0 is now < 0
+ __ pand(kScratchDoubleReg, dst);
+ __ psrad(kScratchDoubleReg, 31);
+ // Set positive overflow lanes to 0x7FFFFFFF
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I32x4SConvertI16x8Low: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pmovsxwd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I32x4SConvertI16x8High: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ palignr(dst, i.InputSimd128Register(0), 8);
+ __ pmovsxwd(dst, dst);
+ break;
+ }
case kX64I32x4Neg: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
@@ -2319,6 +2365,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pcmpeqd(dst, src);
break;
}
+ case kX64I32x4UConvertF32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+ // NAN->0, negative->0
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ maxps(dst, kScratchDoubleReg);
+ // scratch: float representation of max_signed
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1); // 0x7fffffff
+ __ cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // 0x4f000000
+ // tmp: convert (src-max_signed).
+ // Positive overflow lanes -> 0x7FFFFFFF
+ // Negative lanes -> 0
+ __ movaps(tmp, dst);
+ __ subps(tmp, kScratchDoubleReg);
+ __ cmpleps(kScratchDoubleReg, tmp);
+ __ cvttps2dq(tmp, tmp);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ pmaxsd(tmp, kScratchDoubleReg);
+ // convert. Overflow lanes above max_signed will be 0x80000000
+ __ cvttps2dq(dst, dst);
+ // Add (src-max_signed) for overflow lanes.
+ __ paddd(dst, tmp);
+ break;
+ }
+ case kX64I32x4UConvertI16x8Low: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pmovzxwd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I32x4UConvertI16x8High: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ palignr(dst, i.InputSimd128Register(0), 8);
+ __ pmovzxwd(dst, dst);
+ break;
+ }
case kX64I32x4ShrU: {
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
break;
@@ -2380,6 +2466,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I16x8SConvertI8x16Low: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pmovsxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I16x8SConvertI8x16High: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ palignr(dst, i.InputSimd128Register(0), 8);
+ __ pmovsxbw(dst, dst);
+ break;
+ }
case kX64I16x8Neg: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
@@ -2401,6 +2499,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ psraw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
+ case kX64I16x8SConvertI32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64I16x8Add: {
__ paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2459,10 +2562,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pcmpeqw(dst, src);
break;
}
+ case kX64I16x8UConvertI8x16Low: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pmovzxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I16x8UConvertI8x16High: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ palignr(dst, i.InputSimd128Register(0), 8);
+ __ pmovzxbw(dst, dst);
+ break;
+ }
case kX64I16x8ShrU: {
__ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
+ case kX64I16x8UConvertI32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ // Change negative lanes to 0x7FFFFFFF
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ pminud(dst, kScratchDoubleReg);
+ __ pminud(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ packusdw(dst, kScratchDoubleReg);
+ break;
+ }
case kX64I16x8AddSaturateU: {
__ paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2524,6 +2651,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I8x16SConvertI16x8: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ packsswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64I8x16Neg: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
@@ -2585,6 +2717,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pcmpeqb(dst, src);
break;
}
+ case kX64I8x16UConvertI16x8: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ // Change negative lanes to 0x7FFF
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlw(kScratchDoubleReg, 1);
+ __ pminuw(dst, kScratchDoubleReg);
+ __ pminuw(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ packuswb(dst, kScratchDoubleReg);
+ break;
+ }
case kX64I8x16AddSaturateU: {
__ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2656,8 +2800,44 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, i.InputSimd128Register(2));
break;
}
+ case kX64S1x4AnyTrue:
+ case kX64S1x8AnyTrue:
+ case kX64S1x16AnyTrue: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ Register dst = i.OutputRegister();
+ XMMRegister src = i.InputSimd128Register(0);
+ Register tmp = i.TempRegister(0);
+ __ xorq(tmp, tmp);
+ __ movq(dst, Immediate(-1));
+ __ ptest(src, src);
+ __ cmovq(zero, dst, tmp);
+ break;
+ }
+ case kX64S1x4AllTrue:
+ case kX64S1x8AllTrue:
+ case kX64S1x16AllTrue: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ Register dst = i.OutputRegister();
+ XMMRegister src = i.InputSimd128Register(0);
+ Register tmp = i.TempRegister(0);
+ __ movq(tmp, Immediate(-1));
+ __ xorq(dst, dst);
+ // Compare all src lanes to false.
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ if (arch_opcode == kX64S1x4AllTrue) {
+ __ pcmpeqd(kScratchDoubleReg, src);
+ } else if (arch_opcode == kX64S1x8AllTrue) {
+ __ pcmpeqw(kScratchDoubleReg, src);
+ } else {
+ __ pcmpeqb(kScratchDoubleReg, src);
+ }
+ // If kScratchDoubleReg is all zero, none of src lanes are false.
+ __ ptest(kScratchDoubleReg, kScratchDoubleReg);
+ __ cmovq(zero, dst, tmp);
+ break;
+ }
case kX64StackCheck:
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ CompareRoot(rsp, RootIndex::kStackLimit);
break;
case kWord32AtomicExchangeInt8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
@@ -3273,7 +3453,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
@@ -3281,6 +3461,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kDelayedStringConstant: {
+ const StringConstantBase* src_constant = src.ToDelayedStringConstant();
+ __ MoveStringConstant(dst, src_constant);
+ break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): load of labels on x64.
break;
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 6a9e313f4e..c2a194e94a 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -151,6 +151,8 @@ namespace compiler {
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
+ V(X64F32x4SConvertI32x4) \
+ V(X64F32x4UConvertI32x4) \
V(X64F32x4Abs) \
V(X64F32x4Neg) \
V(X64F32x4RecipApprox) \
@@ -168,6 +170,9 @@ namespace compiler {
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
+ V(X64I32x4SConvertF32x4) \
+ V(X64I32x4SConvertI16x8Low) \
+ V(X64I32x4SConvertI16x8High) \
V(X64I32x4Neg) \
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
@@ -181,6 +186,9 @@ namespace compiler {
V(X64I32x4Ne) \
V(X64I32x4GtS) \
V(X64I32x4GeS) \
+ V(X64I32x4UConvertF32x4) \
+ V(X64I32x4UConvertI16x8Low) \
+ V(X64I32x4UConvertI16x8High) \
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
@@ -189,9 +197,12 @@ namespace compiler {
V(X64I16x8Splat) \
V(X64I16x8ExtractLane) \
V(X64I16x8ReplaceLane) \
+ V(X64I16x8SConvertI8x16Low) \
+ V(X64I16x8SConvertI8x16High) \
V(X64I16x8Neg) \
V(X64I16x8Shl) \
V(X64I16x8ShrS) \
+ V(X64I16x8SConvertI32x4) \
V(X64I16x8Add) \
V(X64I16x8AddSaturateS) \
V(X64I16x8AddHoriz) \
@@ -204,7 +215,10 @@ namespace compiler {
V(X64I16x8Ne) \
V(X64I16x8GtS) \
V(X64I16x8GeS) \
+ V(X64I16x8UConvertI8x16Low) \
+ V(X64I16x8UConvertI8x16High) \
V(X64I16x8ShrU) \
+ V(X64I16x8UConvertI32x4) \
V(X64I16x8AddSaturateU) \
V(X64I16x8SubSaturateU) \
V(X64I16x8MinU) \
@@ -214,6 +228,7 @@ namespace compiler {
V(X64I8x16Splat) \
V(X64I8x16ExtractLane) \
V(X64I8x16ReplaceLane) \
+ V(X64I8x16SConvertI16x8) \
V(X64I8x16Neg) \
V(X64I8x16Add) \
V(X64I8x16AddSaturateS) \
@@ -225,6 +240,7 @@ namespace compiler {
V(X64I8x16Ne) \
V(X64I8x16GtS) \
V(X64I8x16GeS) \
+ V(X64I8x16UConvertI16x8) \
V(X64I8x16AddSaturateU) \
V(X64I8x16SubSaturateU) \
V(X64I8x16MinU) \
@@ -237,6 +253,12 @@ namespace compiler {
V(X64S128Not) \
V(X64S128Select) \
V(X64S128Zero) \
+ V(X64S1x4AnyTrue) \
+ V(X64S1x4AllTrue) \
+ V(X64S1x8AnyTrue) \
+ V(X64S1x8AllTrue) \
+ V(X64S1x16AnyTrue) \
+ V(X64S1x16AllTrue) \
V(X64Word64AtomicLoadUint8) \
V(X64Word64AtomicLoadUint16) \
V(X64Word64AtomicLoadUint32) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index b1f380badf..e5523fd49d 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -128,6 +128,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
+ case kX64F32x4SConvertI32x4:
+ case kX64F32x4UConvertI32x4:
case kX64F32x4RecipApprox:
case kX64F32x4RecipSqrtApprox:
case kX64F32x4Abs:
@@ -145,6 +147,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
+ case kX64I32x4SConvertF32x4:
+ case kX64I32x4SConvertI16x8Low:
+ case kX64I32x4SConvertI16x8High:
case kX64I32x4Neg:
case kX64I32x4Shl:
case kX64I32x4ShrS:
@@ -158,6 +163,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Ne:
case kX64I32x4GtS:
case kX64I32x4GeS:
+ case kX64I32x4UConvertF32x4:
+ case kX64I32x4UConvertI16x8Low:
+ case kX64I32x4UConvertI16x8High:
case kX64I32x4ShrU:
case kX64I32x4MinU:
case kX64I32x4MaxU:
@@ -166,9 +174,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8Splat:
case kX64I16x8ExtractLane:
case kX64I16x8ReplaceLane:
+ case kX64I16x8SConvertI8x16Low:
+ case kX64I16x8SConvertI8x16High:
case kX64I16x8Neg:
case kX64I16x8Shl:
case kX64I16x8ShrS:
+ case kX64I16x8SConvertI32x4:
case kX64I16x8Add:
case kX64I16x8AddSaturateS:
case kX64I16x8AddHoriz:
@@ -181,6 +192,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8Ne:
case kX64I16x8GtS:
case kX64I16x8GeS:
+ case kX64I16x8UConvertI8x16Low:
+ case kX64I16x8UConvertI8x16High:
+ case kX64I16x8UConvertI32x4:
case kX64I16x8ShrU:
case kX64I16x8AddSaturateU:
case kX64I16x8SubSaturateU:
@@ -191,6 +205,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16Splat:
case kX64I8x16ExtractLane:
case kX64I8x16ReplaceLane:
+ case kX64I8x16SConvertI16x8:
case kX64I8x16Neg:
case kX64I8x16Add:
case kX64I8x16AddSaturateS:
@@ -202,6 +217,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16Ne:
case kX64I8x16GtS:
case kX64I8x16GeS:
+ case kX64I8x16UConvertI16x8:
case kX64I8x16AddSaturateU:
case kX64I8x16SubSaturateU:
case kX64I8x16MinU:
@@ -214,6 +230,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Not:
case kX64S128Select:
case kX64S128Zero:
+ case kX64S1x4AnyTrue:
+ case kX64S1x4AllTrue:
+ case kX64S1x8AnyTrue:
+ case kX64S1x8AllTrue:
+ case kX64S1x16AnyTrue:
+ case kX64S1x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index b5d7fa6d55..211794ace8 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -1273,6 +1273,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(Float64Sqrt, kSSEFloat64Sqrt) \
V(Float32Sqrt, kSSEFloat32Sqrt) \
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(ChangeFloat64ToInt64, kSSEFloat64ToInt64) \
V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \
@@ -1281,6 +1282,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(ChangeInt64ToFloat64, kSSEInt64ToFloat64) \
V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
@@ -1665,6 +1667,17 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
+ // The 32-bit comparisons automatically truncate Word64
+ // values to Word32 range, no need to do that explicitly.
+ if (opcode == kX64Cmp32 || opcode == kX64Test32) {
+ while (left->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ left = left->InputAt(0);
+ }
+ while (right->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ right = right->InputAt(0);
+ }
+ }
+
opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
// If one of the two inputs is an immediate, make sure it's on the right, or
@@ -1708,7 +1721,7 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
X64OperandGenerator g(selector);
if (selector->CanUseRootsRegister()) {
Heap* const heap = selector->isolate()->heap();
- Heap::RootListIndex root_index;
+ RootIndex root_index;
HeapObjectBinopMatcher m(node);
if (m.right().HasValue() &&
heap->IsRootHandle(m.right().Value(), &root_index)) {
@@ -2483,6 +2496,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MaxU) \
V(I32x4GtU) \
V(I32x4GeU) \
+ V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
V(I16x8AddHoriz) \
@@ -2501,6 +2515,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8MaxU) \
V(I16x8GtU) \
V(I16x8GeU) \
+ V(I8x16SConvertI16x8) \
V(I8x16Add) \
V(I8x16AddSaturateS) \
V(I8x16Sub) \
@@ -2521,14 +2536,23 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Or) \
V(S128Xor)
-#define SIMD_UNOP_LIST(V) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(I32x4Neg) \
- V(I16x8Neg) \
- V(I8x16Neg) \
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4Neg) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8Neg) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I8x16Neg) \
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
@@ -2539,6 +2563,16 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8ShrS) \
V(I16x8ShrU)
+#define SIMD_ANYTRUE_LIST(V) \
+ V(S1x4AnyTrue) \
+ V(S1x8AnyTrue) \
+ V(S1x16AnyTrue)
+
+#define SIMD_ALLTRUE_LIST(V) \
+ V(S1x4AllTrue) \
+ V(S1x8AllTrue) \
+ V(S1x16AllTrue)
+
void InstructionSelector::VisitS128Zero(Node* node) {
X64OperandGenerator g(this);
Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
@@ -2583,6 +2617,7 @@ SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
+#undef SIMD_SHIFT_OPCODES
#define VISIT_SIMD_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2592,6 +2627,7 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
}
SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
+#undef SIMD_UNOP_LIST
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2601,10 +2637,30 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
-#undef SIMD_TYPES
#undef SIMD_BINOP_LIST
-#undef SIMD_UNOP_LIST
-#undef SIMD_SHIFT_OPCODES
+
+#define VISIT_SIMD_ANYTRUE(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempRegister()}; \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
+ }
+SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
+#undef VISIT_SIMD_ANYTRUE
+#undef SIMD_ANYTRUE_LIST
+
+#define VISIT_SIMD_ALLTRUE(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempRegister()}; \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
+ }
+SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
+#undef VISIT_SIMD_ALLTRUE
+#undef SIMD_ALLTRUE_LIST
+#undef SIMD_TYPES
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
@@ -2613,6 +2669,36 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64I16x8UConvertI32x4, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64I8x16UConvertI16x8, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 3470ff99d7..2194c6bf9b 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -99,6 +99,10 @@ bool Context::IsDebugEvaluateContext() const {
return map()->instance_type() == DEBUG_EVALUATE_CONTEXT_TYPE;
}
+bool Context::IsAwaitContext() const {
+ return map()->instance_type() == AWAIT_CONTEXT_TYPE;
+}
+
bool Context::IsBlockContext() const {
return map()->instance_type() == BLOCK_CONTEXT_TYPE;
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 709ae6164a..f33c8b16bc 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -10,6 +10,8 @@
namespace v8 {
namespace internal {
+class JSGlobalObject;
+class JSGlobalProxy;
class NativeContext;
class RegExpMatchInfo;
@@ -71,47 +73,45 @@ enum ContextLookupFlags {
V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
- V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
- V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
- V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
- V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
- V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(CANONICALIZE_LOCALE_LIST_FUNCTION_INDEX, JSFunction, \
- canonicalize_locale_list) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(ERROR_TO_STRING, JSFunction, error_to_string) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
- V(MAP_DELETE_INDEX, JSFunction, map_delete) \
- V(MAP_GET_INDEX, JSFunction, map_get) \
- V(MAP_HAS_INDEX, JSFunction, map_has) \
- V(MAP_SET_INDEX, JSFunction, map_set) \
- V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
- V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
- V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(CACHED_OR_NEW_SERVICE_LOCALE_FUNCTION_INDEX, JSFunction, \
- cached_or_new_service) \
- V(RESOLVE_LOCALE_FUNCTION_INDEX, JSFunction, resolve_locale) \
- V(SET_ADD_INDEX, JSFunction, set_add) \
- V(SET_DELETE_INDEX, JSFunction, set_delete) \
- V(SET_HAS_INDEX, JSFunction, set_has) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
- V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_compile_error_function) \
- V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
- V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_runtime_error_function) \
- V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
+ V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
+ V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(CANONICALIZE_LOCALE_LIST_FUNCTION_INDEX, JSFunction, \
+ canonicalize_locale_list) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
+ V(MAP_DELETE_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_INDEX, JSFunction, map_has) \
+ V(MAP_SET_INDEX, JSFunction, map_set) \
+ V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(CACHED_OR_NEW_SERVICE_LOCALE_FUNCTION_INDEX, JSFunction, \
+ cached_or_new_service) \
+ V(RESOLVE_LOCALE_FUNCTION_INDEX, JSFunction, resolve_locale) \
+ V(SET_ADD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_compile_error_function) \
+ V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
+ V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_runtime_error_function) \
+ V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
+ V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add)
#define NATIVE_CONTEXT_FIELDS(V) \
@@ -194,6 +194,9 @@ enum ContextLookupFlags {
V(INITIAL_MAP_PROTOTYPE_MAP_INDEX, Map, initial_map_prototype_map) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INITIAL_SET_PROTOTYPE_MAP_INDEX, Map, initial_set_prototype_map) \
+ V(INITIAL_STRING_ITERATOR_MAP_INDEX, Map, initial_string_iterator_map) \
+ V(INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_string_iterator_prototype) \
V(INITIAL_STRING_PROTOTYPE_INDEX, JSObject, initial_string_prototype) \
V(INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX, Map, initial_weakmap_prototype_map) \
V(INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX, Map, initial_weakset_prototype_map) \
@@ -204,21 +207,13 @@ enum ContextLookupFlags {
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_date_time_format_function) \
- V(INTL_DATE_FORMAT_INTERNAL_FORMAT_SHARED_FUN, SharedFunctionInfo, \
- date_format_internal_format_shared_fun) \
V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_number_format_function) \
- V(INTL_NUMBER_FORMAT_INTERNAL_FORMAT_NUMBER_SHARED_FUN, SharedFunctionInfo, \
- number_format_internal_format_number_shared_fun) \
V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \
V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
- V(INTL_COLLATOR_INTERNAL_COMPARE_SHARED_FUN, SharedFunctionInfo, \
- collator_internal_compare_shared_fun) \
V(INTL_PLURAL_RULES_FUNCTION_INDEX, JSFunction, intl_plural_rules_function) \
V(INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX, JSFunction, \
intl_v8_break_iterator_function) \
- V(INTL_V8_BREAK_ITERATOR_INTERNAL_ADOPT_TEXT_SHARED_FUN, SharedFunctionInfo, \
- break_iterator_internal_adopt_text_shared_fun) \
V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \
js_array_packed_smi_elements_map) \
V(JS_ARRAY_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
@@ -241,7 +236,8 @@ enum ContextLookupFlags {
V(MAP_KEY_VALUE_ITERATOR_MAP_INDEX, Map, map_key_value_iterator_map) \
V(MAP_VALUE_ITERATOR_MAP_INDEX, Map, map_value_iterator_map) \
V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index) \
- V(MATH_RANDOM_CACHE_INDEX, Object, math_random_cache) \
+ V(MATH_RANDOM_STATE_INDEX, ByteArray, math_random_state) \
+ V(MATH_RANDOM_CACHE_INDEX, FixedDoubleArray, math_random_cache) \
V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \
V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
@@ -337,9 +333,9 @@ enum ContextLookupFlags {
V(CLASS_FUNCTION_MAP_INDEX, Map, class_function_map) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
- V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(NATIVE_FUNCTION_MAP_INDEX, Map, native_function_map) \
+ V(WASM_EXCEPTION_CONSTRUCTOR_INDEX, JSFunction, wasm_exception_constructor) \
V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
@@ -446,11 +442,6 @@ class ScriptContextTable : public FixedArray {
class Context : public FixedArray, public NeverReadOnlySpaceObject {
public:
- // Use the mixin methods over the HeapObject methods.
- // TODO(v8:7786) Remove once the HeapObject methods are gone.
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
// Conversions.
static inline Context* cast(Object* context);
@@ -462,7 +453,8 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
// The extension slot is used for either the global object (in native
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts), the serialized
- // scope info (block contexts), or the module instance (module contexts).
+ // scope info (block contexts), the module instance (module contexts), or
+ // the generator object (await contexts).
EXTENSION_INDEX,
NATIVE_CONTEXT_INDEX,
@@ -549,6 +541,7 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
inline bool IsCatchContext() const;
inline bool IsWithContext() const;
inline bool IsDebugEvaluateContext() const;
+ inline bool IsAwaitContext() const;
inline bool IsBlockContext() const;
inline bool IsModuleContext() const;
inline bool IsEvalContext() const;
@@ -624,12 +617,7 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
static const int kNotFound = -1;
- // GC support.
- typedef FixedBodyDescriptor<kHeaderSize, kSize, kSize> BodyDescriptor;
-
- typedef FixedBodyDescriptor<
- kHeaderSize, kHeaderSize + FIRST_WEAK_SLOT * kPointerSize, kSize>
- BodyDescriptorWeak;
+ class BodyDescriptor;
private:
#ifdef DEBUG
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index baf8b3a6d5..ee40201544 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -30,8 +30,6 @@
namespace v8 {
namespace internal {
-namespace {
-
inline double JunkStringValue() {
return bit_cast<double, uint64_t>(kQuietNaNMask);
}
@@ -194,7 +192,7 @@ class StringToIntHelper {
// buffer of one-byte digits, along with an optional radix prefix.
StringToIntHelper(Isolate* isolate, const uint8_t* subject, int length)
: isolate_(isolate), raw_one_byte_subject_(subject), length_(length) {}
- virtual ~StringToIntHelper() {}
+ virtual ~StringToIntHelper() = default;
protected:
// Subclasses must implement these:
@@ -462,13 +460,13 @@ class NumberParseIntHelper : public StringToIntHelper {
}
protected:
- virtual void AllocateResult() {}
- virtual void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) {
+ void AllocateResult() override {}
+ void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) override {
result_ = result_ * multiplier + part;
}
private:
- virtual void HandleSpecialCases() {
+ void HandleSpecialCases() override {
bool is_power_of_two = base::bits::IsPowerOfTwo(radix());
if (!is_power_of_two && radix() != 10) return;
DisallowHeapAllocation no_gc;
@@ -812,8 +810,6 @@ parsing_done:
return (sign == NEGATIVE) ? -converted : converted;
}
-} // namespace
-
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
// We cast to const uint8_t* here to avoid instantiating the
@@ -911,7 +907,7 @@ class StringToBigIntHelper : public StringToIntHelper {
}
protected:
- virtual void AllocateResult() {
+ void AllocateResult() override {
// We have to allocate a BigInt that's big enough to fit the result.
// Conseratively assume that all remaining digits are significant.
// Optimization opportunity: Would it makes sense to scan for trailing
@@ -928,7 +924,7 @@ class StringToBigIntHelper : public StringToIntHelper {
}
}
- virtual void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) {
+ void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) override {
BigInt::InplaceMultiplyAdd(result_, static_cast<uintptr_t>(multiplier),
static_cast<uintptr_t>(part));
}
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index bcea9e0f42..a4b08127cd 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -118,7 +118,8 @@ Counters::Counters(Isolate* isolate)
STATS_COUNTER_TS_LIST(SC)
#undef SC
// clang format on
- runtime_call_stats_() {
+ runtime_call_stats_(),
+ worker_thread_runtime_call_stats_() {
static const struct {
Histogram Counters::*member;
const char* caption;
@@ -529,5 +530,64 @@ void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
in_use_ = false;
}
+WorkerThreadRuntimeCallStats::WorkerThreadRuntimeCallStats()
+ : tls_key_(base::Thread::CreateThreadLocalKey()) {}
+
+WorkerThreadRuntimeCallStats::~WorkerThreadRuntimeCallStats() {
+ base::Thread::DeleteThreadLocalKey(tls_key_);
+}
+
+RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
+ DCHECK(FLAG_runtime_stats);
+ std::unique_ptr<RuntimeCallStats> new_table =
+ base::make_unique<RuntimeCallStats>();
+ RuntimeCallStats* result = new_table.get();
+
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ tables_.push_back(std::move(new_table));
+ return result;
+}
+
+void WorkerThreadRuntimeCallStats::AddToMainTable(
+ RuntimeCallStats* main_call_stats) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ for (auto& worker_stats : tables_) {
+ DCHECK_NE(main_call_stats, worker_stats.get());
+ main_call_stats->Add(worker_stats.get());
+ worker_stats->Reset();
+ }
+}
+
+WorkerThreadRuntimeCallStatsScope::WorkerThreadRuntimeCallStatsScope(
+ WorkerThreadRuntimeCallStats* worker_stats)
+ : table_(nullptr) {
+ if (V8_LIKELY(!FLAG_runtime_stats)) return;
+
+ table_ = reinterpret_cast<RuntimeCallStats*>(
+ base::Thread::GetThreadLocal(worker_stats->GetKey()));
+ if (table_ == nullptr) {
+ table_ = worker_stats->NewTable();
+ base::Thread::SetThreadLocal(worker_stats->GetKey(), table_);
+ }
+
+ if (FLAG_runtime_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ table_->Reset();
+ }
+}
+
+WorkerThreadRuntimeCallStatsScope::~WorkerThreadRuntimeCallStatsScope() {
+ if (V8_LIKELY(table_ == nullptr)) return;
+
+ if ((FLAG_runtime_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ auto value = v8::tracing::TracedValue::Create();
+ table_->Dump(value.get());
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
+ "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
+ "runtime-call-stats", std::move(value));
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index fed7edb44a..719bcc55e0 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -98,7 +98,7 @@ class StatsCounterBase {
const char* name_;
int* ptr_;
- StatsCounterBase() {}
+ StatsCounterBase() = default;
StatsCounterBase(Counters* counters, const char* name)
: counters_(counters), name_(name), ptr_(nullptr) {}
@@ -161,7 +161,7 @@ class StatsCounter : public StatsCounterBase {
private:
friend class Counters;
- StatsCounter() {}
+ StatsCounter() = default;
StatsCounter(Counters* counters, const char* name)
: StatsCounterBase(counters, name), lookup_done_(false) {}
@@ -227,7 +227,7 @@ class Histogram {
}
protected:
- Histogram() {}
+ Histogram() = default;
Histogram(const char* name, int min, int max, int num_buckets,
Counters* counters)
: name_(name),
@@ -277,7 +277,7 @@ class TimedHistogram : public Histogram {
friend class Counters;
HistogramTimerResolution resolution_;
- TimedHistogram() {}
+ TimedHistogram() = default;
TimedHistogram(const char* name, int min, int max,
HistogramTimerResolution resolution, int num_buckets,
Counters* counters)
@@ -393,7 +393,7 @@ class HistogramTimer : public TimedHistogram {
base::ElapsedTimer timer_;
- HistogramTimer() {}
+ HistogramTimer() = default;
};
// Helper class for scoping a HistogramTimer.
@@ -401,7 +401,7 @@ class HistogramTimer : public TimedHistogram {
// Parser is currently reentrant (when it throws an error, we call back
// into JavaScript and all bets are off), but ElapsedTimer is not
// reentry-safe. Fix this properly and remove |allow_nesting|.
-class HistogramTimerScope BASE_EMBEDDED {
+class HistogramTimerScope {
public:
explicit HistogramTimerScope(HistogramTimer* timer,
bool allow_nesting = false)
@@ -439,7 +439,7 @@ enum class OptionalHistogramTimerScopeMode { TAKE_TIME, DONT_TAKE_TIME };
// Helper class for scoping a HistogramTimer.
// It will not take time if take_time is set to false.
-class OptionalHistogramTimerScope BASE_EMBEDDED {
+class OptionalHistogramTimerScope {
public:
OptionalHistogramTimerScope(HistogramTimer* timer,
OptionalHistogramTimerScopeMode mode)
@@ -487,7 +487,7 @@ class AggregatableHistogramTimer : public Histogram {
private:
friend class Counters;
- AggregatableHistogramTimer() {}
+ AggregatableHistogramTimer() = default;
AggregatableHistogramTimer(const char* name, int min, int max,
int num_buckets, Counters* counters)
: Histogram(name, min, max, num_buckets, counters) {}
@@ -750,6 +750,9 @@ class RuntimeCallTimer final {
V(Map_Has) \
V(Map_New) \
V(Map_Set) \
+ V(WeakMap_Get) \
+ V(WeakMap_Set) \
+ V(WeakMap_New) \
V(Message_GetEndColumn) \
V(Message_GetLineNumber) \
V(Message_GetSourceLine) \
@@ -867,7 +870,9 @@ class RuntimeCallTimer final {
V(BoundFunctionNameGetter) \
V(BoundFunctionLengthGetter) \
V(CompileBackgroundAnalyse) \
+ V(CompileBackgroundCompileTask) \
V(CompileBackgroundEval) \
+ V(CompileBackgroundFunction) \
V(CompileBackgroundIgnition) \
V(CompileBackgroundScript) \
V(CompileBackgroundRewriteReturnResult) \
@@ -875,6 +880,9 @@ class RuntimeCallTimer final {
V(CompileDeserialize) \
V(CompileEval) \
V(CompileAnalyse) \
+ V(CompileEnqueueOnDispatcher) \
+ V(CompileFinalizeBackgroundCompileTask) \
+ V(CompileFinishNowOnDispatcher) \
V(CompileFunction) \
V(CompileGetFromOptimizedCodeMap) \
V(CompileIgnition) \
@@ -1023,7 +1031,7 @@ enum RuntimeCallCounterId {
kNumberOfCounters
};
-class RuntimeCallStats final : public ZoneObject {
+class RuntimeCallStats final {
public:
V8_EXPORT_PRIVATE RuntimeCallStats();
@@ -1075,6 +1083,42 @@ class RuntimeCallStats final : public ZoneObject {
RuntimeCallCounter counters_[kNumberOfCounters];
};
+class WorkerThreadRuntimeCallStats final {
+ public:
+ WorkerThreadRuntimeCallStats();
+ ~WorkerThreadRuntimeCallStats();
+
+ // Returns the TLS key associated with this WorkerThreadRuntimeCallStats.
+ base::Thread::LocalStorageKey GetKey() const { return tls_key_; }
+
+ // Returns a new worker thread runtime call stats table managed by this
+ // WorkerThreadRuntimeCallStats.
+ RuntimeCallStats* NewTable();
+
+ // Adds the counters from the worker thread tables to |main_call_stats|.
+ void AddToMainTable(RuntimeCallStats* main_call_stats);
+
+ private:
+ base::Mutex mutex_;
+ std::vector<std::unique_ptr<RuntimeCallStats>> tables_;
+ base::Thread::LocalStorageKey tls_key_;
+};
+
+// Creating a WorkerThreadRuntimeCallStatsScope will provide a thread-local
+// runtime call stats table, and will dump the table to an immediate trace event
+// when it is destroyed.
+class WorkerThreadRuntimeCallStatsScope final {
+ public:
+ WorkerThreadRuntimeCallStatsScope(
+ WorkerThreadRuntimeCallStats* off_thread_stats);
+ ~WorkerThreadRuntimeCallStatsScope();
+
+ RuntimeCallStats* Get() const { return table_; }
+
+ private:
+ RuntimeCallStats* table_;
+};
+
#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_id) \
do { \
if (V8_UNLIKELY(FLAG_runtime_stats) && runtime_call_stats) { \
@@ -1136,6 +1180,8 @@ class RuntimeCallTimerScope {
HR(gc_finalize_mark, V8.GCFinalizeMC.Mark, 0, 10000, 101) \
HR(gc_finalize_prologue, V8.GCFinalizeMC.Prologue, 0, 10000, 101) \
HR(gc_finalize_sweep, V8.GCFinalizeMC.Sweep, 0, 10000, 101) \
+ HR(gc_scavenger_scavenge_main, V8.GCScavenger.ScavengeMain, 0, 10000, 101) \
+ HR(gc_scavenger_scavenge_roots, V8.GCScavenger.ScavengeRoots, 0, 10000, 101) \
HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
@@ -1176,7 +1222,7 @@ class RuntimeCallTimerScope {
HR(wasm_memory_allocation_result, V8.WasmMemoryAllocationResult, 0, 3, 4) \
HR(wasm_address_space_usage_mb, V8.WasmAddressSpaceUsageMiB, 0, 1 << 20, \
128) \
- HR(wasm_module_code_size_mb, V8.WasmModuleCodeSizeMiB, 0, 256, 64)
+ HR(wasm_module_code_size_mb, V8.WasmModuleCodeSizeMiB, 0, 1024, 64)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
@@ -1267,6 +1313,8 @@ class RuntimeCallTimerScope {
V8.CompileScriptMicroSeconds.NoCache.CacheTooCold, 1000000, MICROSECOND) \
HT(compile_script_on_background, \
V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
+ HT(compile_function_on_background, \
+ V8.CompileFunctionMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
HT(gc_parallel_task_latency, V8.GC.ParallelTaskLatencyMicroSeconds, 1000000, \
MICROSECOND)
@@ -1518,6 +1566,10 @@ class Counters : public std::enable_shared_from_this<Counters> {
RuntimeCallStats* runtime_call_stats() { return &runtime_call_stats_; }
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats() {
+ return &worker_thread_runtime_call_stats_;
+ }
+
private:
friend class StatsTable;
friend class StatsCounterBase;
@@ -1597,6 +1649,7 @@ class Counters : public std::enable_shared_from_this<Counters> {
#undef SC
RuntimeCallStats runtime_call_stats_;
+ WorkerThreadRuntimeCallStats worker_thread_runtime_call_stats_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
diff --git a/deps/v8/src/d8-console.cc b/deps/v8/src/d8-console.cc
index cb7334af78..f08aa3bfae 100644
--- a/deps/v8/src/d8-console.cc
+++ b/deps/v8/src/d8-console.cc
@@ -39,16 +39,9 @@ D8Console::D8Console(Isolate* isolate) : isolate_(isolate) {
void D8Console::Assert(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
- Local<Boolean> arg;
- if (args.Length() > 0) {
- if (!args[0]->ToBoolean(isolate_->GetCurrentContext()).ToLocal(&arg)) {
- return;
- }
- } else {
- // No arguments given, the "first" argument is undefined which is false-ish.
- arg = v8::False(isolate_);
- }
- if (arg->IsTrue()) return;
+ // If no arguments given, the "first" argument is undefined which is
+ // false-ish.
+ if (args.Length() > 0 && args[0]->BooleanValue(isolate_)) return;
WriteToFile("console.assert", stdout, isolate_, args);
isolate_->ThrowException(v8::Exception::Error(
v8::String::NewFromUtf8(isolate_, "console.assert failed",
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index a1fc3b5782..57a8a0d5a5 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -204,7 +204,7 @@ class ExecArgs {
return;
}
delete [] exec_args_[i];
- exec_args_[i] = 0;
+ exec_args_[i] = nullptr;
}
}
static const unsigned kMaxArgs = 1000;
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 349021d38c..5295f3957c 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -62,7 +62,7 @@ namespace {
const int kMB = 1024 * 1024;
-const int kMaxWorkers = 50;
+const int kMaxWorkers = 100;
const int kMaxSerializerMemoryUsage =
1 * kMB; // Arbitrary maximum for testing.
@@ -118,23 +118,26 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
// store their lengths as a SMI internally.
if (length >= kTwoGB) return nullptr;
- size_t page_size = i::AllocatePageSize();
+ v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+ size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
// Rounding up could go over the limit.
if (allocated >= kTwoGB) return nullptr;
- return i::AllocatePages(nullptr, allocated, page_size,
+ return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
PageAllocator::kReadWrite);
}
void FreeVM(void* data, size_t length) {
- size_t page_size = i::AllocatePageSize();
+ v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+ size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
- CHECK(i::FreePages(data, allocated));
+ CHECK(i::FreePages(page_allocator, data, allocated));
}
};
// ArrayBuffer allocator that never allocates over 10MB.
class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
+ protected:
void* Allocate(size_t length) override {
return ArrayBufferAllocatorBase::Allocate(Adjust(length));
}
@@ -154,6 +157,39 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
};
+// ArrayBuffer allocator that can be equipped with a limit to simulate system
+// OOM.
+class MockArrayBufferAllocatiorWithLimit : public MockArrayBufferAllocator {
+ public:
+ explicit MockArrayBufferAllocatiorWithLimit(size_t allocation_limit)
+ : space_left_(allocation_limit) {}
+
+ protected:
+ void* Allocate(size_t length) override {
+ if (length > space_left_) {
+ return nullptr;
+ }
+ space_left_ -= length;
+ return MockArrayBufferAllocator::Allocate(length);
+ }
+
+ void* AllocateUninitialized(size_t length) override {
+ if (length > space_left_) {
+ return nullptr;
+ }
+ space_left_ -= length;
+ return MockArrayBufferAllocator::AllocateUninitialized(length);
+ }
+
+ void Free(void* data, size_t length) override {
+ space_left_ += length;
+ return MockArrayBufferAllocator::Free(data, length);
+ }
+
+ private:
+ std::atomic<size_t> space_left_;
+};
+
// Predictable v8::Platform implementation. Worker threads are disabled, idle
// tasks are disallowed, and the time reported by {MonotonicallyIncreasingTime}
// is deterministic.
@@ -195,12 +231,14 @@ class PredictablePlatform : public Platform {
}
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- platform_->CallOnForegroundThread(isolate, task);
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
}
void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
double delay_in_seconds) override {
- platform_->CallDelayedOnForegroundThread(isolate, task, delay_in_seconds);
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
}
void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
@@ -244,6 +282,14 @@ static Local<Value> Throw(Isolate* isolate, const char* message) {
.ToLocalChecked());
}
+static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context,
+ Local<v8::Object> object, const char* property) {
+ Local<String> v8_str =
+ String::NewFromUtf8(isolate, property, NewStringType::kNormal)
+ .ToLocalChecked();
+ return object->Get(context, v8_str).ToLocalChecked();
+}
+
Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
if (object->InternalFieldCount() != 1) {
Throw(isolate, "this is not a Worker");
@@ -319,8 +365,7 @@ class TraceConfigParser {
Local<v8::Object> object, const char* property) {
Local<Value> value = GetValue(isolate, context, object, property);
if (value->IsNumber()) {
- Local<Boolean> v8_boolean = value->ToBoolean(context).ToLocalChecked();
- return v8_boolean->Value();
+ return value->BooleanValue(isolate);
}
return false;
}
@@ -361,14 +406,6 @@ class TraceConfigParser {
}
return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL;
}
-
- static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context,
- Local<v8::Object> object, const char* property) {
- Local<String> v8_str =
- String::NewFromUtf8(isolate, property, NewStringType::kNormal)
- .ToLocalChecked();
- return object->Get(context, v8_str).ToLocalChecked();
- }
};
} // namespace
@@ -433,7 +470,7 @@ class DummySourceStream : public v8::ScriptCompiler::ExternalSourceStream {
source_length_);
}
- virtual size_t GetMoreData(const uint8_t** src) {
+ size_t GetMoreData(const uint8_t** src) override {
if (done_) {
return 0;
}
@@ -1405,6 +1442,39 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
+ // d8 honors `options={type: string}`, which means the first argument is
+ // not a filename but string of script to be run.
+ bool load_from_file = true;
+ if (args.Length() > 1 && args[1]->IsObject()) {
+ Local<Object> object = args[1].As<Object>();
+ Local<Context> context = isolate->GetCurrentContext();
+ Local<Value> value = GetValue(args.GetIsolate(), context, object, "type");
+ if (value->IsString()) {
+ Local<String> worker_type = value->ToString(context).ToLocalChecked();
+ String::Utf8Value str(isolate, worker_type);
+ if (strcmp("string", *str) == 0) {
+ load_from_file = false;
+ } else if (strcmp("classic", *str) == 0) {
+ load_from_file = true;
+ } else {
+ Throw(args.GetIsolate(), "Unsupported worker type");
+ return;
+ }
+ }
+ }
+
+ Local<Value> source;
+ if (load_from_file) {
+ String::Utf8Value filename(args.GetIsolate(), args[0]);
+ source = ReadFile(args.GetIsolate(), *filename);
+ if (source.IsEmpty()) {
+ Throw(args.GetIsolate(), "Error loading worker script");
+ return;
+ }
+ } else {
+ source = args[0];
+ }
+
if (!args.IsConstructCall()) {
Throw(args.GetIsolate(), "Worker must be constructed with new");
return;
@@ -1428,7 +1498,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.Holder()->SetAlignedPointerInInternalField(0, worker);
workers_.push_back(worker);
- String::Utf8Value script(args.GetIsolate(), args[0]);
+ String::Utf8Value script(args.GetIsolate(), source);
if (!*script) {
Throw(args.GetIsolate(), "Can't get worker script");
return;
@@ -2016,8 +2086,9 @@ void WriteLcovDataForRange(std::vector<uint32_t>& lines, int start_line,
}
void WriteLcovDataForNamedRange(std::ostream& sink,
- std::vector<uint32_t>& lines, std::string name,
- int start_line, int end_line, uint32_t count) {
+ std::vector<uint32_t>& lines,
+ const std::string& name, int start_line,
+ int end_line, uint32_t count) {
WriteLcovDataForRange(lines, start_line, end_line, count);
sink << "FN:" << start_line + 1 << "," << name << std::endl;
sink << "FNDA:" << count << "," << name << std::endl;
@@ -2295,7 +2366,7 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
isolate_ = context->GetIsolate();
context_.Reset(isolate_, context);
}
- virtual ~InspectorFrontend() = default;
+ ~InspectorFrontend() override = default;
private:
void sendResponse(
@@ -2904,6 +2975,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
options.mock_arraybuffer_allocator = i::FLAG_mock_arraybuffer_allocator;
+ options.mock_arraybuffer_allocator_limit =
+ i::FLAG_mock_arraybuffer_allocator_limit;
// Set up isolated source groups.
options.isolate_sources = new SourceGroup[options.num_isolates];
@@ -3001,10 +3074,15 @@ void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
}
namespace {
-bool ProcessMessages(Isolate* isolate,
- std::function<platform::MessageLoopBehavior()> behavior) {
+bool ProcessMessages(
+ Isolate* isolate,
+ const std::function<platform::MessageLoopBehavior()>& behavior) {
Platform* platform = GetDefaultPlatform();
while (true) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::SaveContext saved_context(i_isolate);
+ i_isolate->set_context(nullptr);
+ SealHandleScope shs(isolate);
while (v8::platform::PumpMessageLoop(platform, isolate, behavior())) {
isolate->RunMicrotasks();
}
@@ -3156,6 +3234,14 @@ class Serializer : public ValueSerializer::Delegate {
}
Local<ArrayBuffer> array_buffer = Local<ArrayBuffer>::Cast(element);
+
+ if (std::find(array_buffers_.begin(), array_buffers_.end(),
+ array_buffer) != array_buffers_.end()) {
+ Throw(isolate_,
+ "ArrayBuffer occurs in the transfer array more than once");
+ return Nothing<bool>();
+ }
+
serializer_.TransferArrayBuffer(
static_cast<uint32_t>(array_buffers_.size()), array_buffer);
array_buffers_.emplace_back(isolate_, array_buffer);
@@ -3344,6 +3430,12 @@ int Shell::Main(int argc, char* argv[]) {
g_platform.reset(new PredictablePlatform(std::move(g_platform)));
}
+ if (i::FLAG_trace_turbo_cfg_file == nullptr) {
+ SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
+ }
+ if (i::FLAG_redirect_code_traces_to == nullptr) {
+ SetFlagsFromString("--redirect-code-traces-to=code.asm");
+ }
v8::V8::InitializePlatform(g_platform.get());
v8::V8::Initialize();
if (options.natives_blob || options.snapshot_blob) {
@@ -3352,18 +3444,22 @@ int Shell::Main(int argc, char* argv[]) {
} else {
v8::V8::InitializeExternalStartupData(argv[0]);
}
- if (i::FLAG_trace_turbo_cfg_file == nullptr) {
- SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
- }
- if (i::FLAG_redirect_code_traces_to == nullptr) {
- SetFlagsFromString("--redirect-code-traces-to=code.asm");
- }
int result = 0;
Isolate::CreateParams create_params;
ShellArrayBufferAllocator shell_array_buffer_allocator;
MockArrayBufferAllocator mock_arraybuffer_allocator;
+ const size_t memory_limit =
+ options.mock_arraybuffer_allocator_limit * options.num_isolates;
+ MockArrayBufferAllocatiorWithLimit mock_arraybuffer_allocator_with_limit(
+ memory_limit >= options.mock_arraybuffer_allocator_limit
+ ? memory_limit
+ : std::numeric_limits<size_t>::max());
if (options.mock_arraybuffer_allocator) {
- Shell::array_buffer_allocator = &mock_arraybuffer_allocator;
+ if (memory_limit) {
+ Shell::array_buffer_allocator = &mock_arraybuffer_allocator_with_limit;
+ } else {
+ Shell::array_buffer_allocator = &mock_arraybuffer_allocator;
+ }
} else {
Shell::array_buffer_allocator = &shell_array_buffer_allocator;
}
@@ -3454,6 +3550,7 @@ int Shell::Main(int argc, char* argv[]) {
Shell::HostInitializeImportMetaObject);
{
D8Console console(isolate2);
+ Initialize(isolate2);
debug::SetConsoleDelegate(isolate2, &console);
PerIsolateData data(isolate2);
Isolate::Scope isolate_scope(isolate2);
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 2d60cb8327..29f693bcb0 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -105,9 +105,7 @@ class SourceGroup {
public:
explicit IsolateThread(SourceGroup* group);
- virtual void Run() {
- group_->ExecuteInThread();
- }
+ void Run() override { group_->ExecuteInThread(); }
private:
SourceGroup* group_;
@@ -257,7 +255,7 @@ class Worker {
: base::Thread(base::Thread::Options("WorkerThread")),
worker_(worker) {}
- virtual void Run() { worker_->ExecuteInThread(); }
+ void Run() override { worker_->ExecuteInThread(); }
private:
Worker* worker_;
@@ -369,6 +367,7 @@ class ShellOptions {
bool test_shell;
bool expected_to_throw;
bool mock_arraybuffer_allocator;
+ size_t mock_arraybuffer_allocator_limit = 0;
bool enable_inspector;
int num_isolates;
v8::ScriptCompiler::CompileOptions compile_options;
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index a5486ccd1e..9dfb966902 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -15,7 +15,7 @@ function JSProxyGetTarget(proxy) { };
function JSProxyGetHandler(proxy) { };
try {
- isProxy = Function(['object'], 'return %_IsJSProxy(object)');
+ isProxy = Function(['object'], 'return %IsJSProxy(object)');
JSProxyGetTarget = Function(['proxy'],
'return %JSProxyGetTarget(proxy)');
JSProxyGetHandler = Function(['proxy'],
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index d98d4f6f87..88a056b367 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -27,7 +27,7 @@ static const char kDaysInMonths[] =
{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
DateCache::DateCache()
- : stamp_(0),
+ : stamp_(nullptr),
tz_cache_(
#ifdef V8_INTL_SUPPORT
FLAG_icu_timezone_data ? new ICUTimezoneCache()
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 709c1cbaf2..e26b8688f1 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -47,7 +47,7 @@ class DateParser : public AllStatic {
// InputReader provides basic string parsing and character classification.
template <typename Char>
- class InputReader BASE_EMBEDDED {
+ class InputReader {
public:
InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
: index_(0),
@@ -268,7 +268,7 @@ class DateParser : public AllStatic {
static const int8_t array[][kEntrySize];
};
- class TimeZoneComposer BASE_EMBEDDED {
+ class TimeZoneComposer {
public:
TimeZoneComposer() : sign_(kNone), hour_(kNone), minute_(kNone) {}
void Set(int offset_in_hours) {
@@ -291,7 +291,7 @@ class DateParser : public AllStatic {
int minute_;
};
- class TimeComposer BASE_EMBEDDED {
+ class TimeComposer {
public:
TimeComposer() : index_(0), hour_offset_(kNone) {}
bool IsEmpty() const { return index_ == 0; }
@@ -325,7 +325,7 @@ class DateParser : public AllStatic {
int hour_offset_;
};
- class DayComposer BASE_EMBEDDED {
+ class DayComposer {
public:
DayComposer() : index_(0), named_month_(kNone), is_iso_date_(false) {}
bool IsEmpty() const { return index_ == 0; }
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index f8b716f7c9..57c5f31079 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -81,6 +81,11 @@ std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo* shared) {
std::vector<CoverageBlock> result;
if (coverage_info->SlotCount() == 0) return result;
+ if (FLAG_trace_block_coverage) {
+ PrintF("Collecting coverage data\n");
+ coverage_info->Print(shared->DebugName()->ToCString());
+ }
+
for (int i = 0; i < coverage_info->SlotCount(); i++) {
const int start_pos = coverage_info->StartSourcePosition(i);
const int until_pos = coverage_info->EndSourcePosition(i);
@@ -171,12 +176,6 @@ class CoverageBlockIterator final {
return function_->blocks[read_index_ + 1];
}
- CoverageBlock& GetPreviousBlock() {
- DCHECK(IsActive());
- DCHECK_GT(read_index_, 0);
- return function_->blocks[read_index_ - 1];
- }
-
CoverageBlock& GetParent() {
DCHECK(IsActive());
return nesting_stack_.back();
@@ -331,30 +330,6 @@ void MergeNestedRanges(CoverageFunction* function) {
}
}
-void FilterAliasedSingletons(CoverageFunction* function) {
- CoverageBlockIterator iter(function);
-
- iter.Next(); // Advance once since we reference the previous block later.
-
- while (iter.Next()) {
- CoverageBlock& previous_block = iter.GetPreviousBlock();
- CoverageBlock& block = iter.GetBlock();
-
- bool is_singleton = block.end == kNoSourcePosition;
- bool aliases_start = block.start == previous_block.start;
-
- if (is_singleton && aliases_start) {
- // The previous block must have a full range since duplicate singletons
- // have already been merged.
- DCHECK_NE(previous_block.end, kNoSourcePosition);
- // Likewise, the next block must have another start position since
- // singletons are sorted to the end.
- DCHECK_IMPLIES(iter.HasNext(), iter.GetNextBlock().start != block.start);
- iter.DeleteBlock();
- }
- }
-}
-
void FilterUncoveredRanges(CoverageFunction* function) {
CoverageBlockIterator iter(function);
@@ -427,15 +402,6 @@ void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo* info,
// Remove duplicate singleton ranges, keeping the max count.
MergeDuplicateSingletons(function);
- // Remove singleton ranges with the same start position as a full range and
- // throw away their counts.
- // Singleton ranges are only intended to split existing full ranges and should
- // never expand into a full range. Consider 'if (cond) { ... } else { ... }'
- // as a problematic example; if the then-block produces a continuation
- // singleton, it would incorrectly expand into the else range.
- // For more context, see https://crbug.com/v8/8237.
- FilterAliasedSingletons(function);
-
// Rewrite all singletons (created e.g. by continuations and unconditional
// control flow) to ranges.
RewritePositionSingletonsToRanges(function);
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
index 359a813375..13816670f7 100644
--- a/deps/v8/src/debug/debug-coverage.h
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -63,7 +63,7 @@ class Coverage : public std::vector<CoverageScript> {
static std::unique_ptr<Coverage> Collect(
Isolate* isolate, v8::debug::Coverage::Mode collectionMode);
- Coverage() {}
+ Coverage() = default;
};
} // namespace internal
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 583b41f1b2..98e4c58fb9 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -16,7 +16,6 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
-#include "src/objects/api-callbacks.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -140,25 +139,13 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
Object);
Handle<Object> result;
- bool sucess = false;
+ bool success = false;
if (throw_on_side_effect) isolate->debug()->StartSideEffectCheckMode();
- sucess = Execution::Call(isolate, eval_fun, receiver, 0, nullptr)
- .ToHandle(&result);
+ success = Execution::Call(isolate, eval_fun, receiver, 0, nullptr)
+ .ToHandle(&result);
if (throw_on_side_effect) isolate->debug()->StopSideEffectCheckMode();
- if (!sucess) {
- DCHECK(isolate->has_pending_exception());
- return MaybeHandle<Object>();
- }
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (result->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, Handle<JSGlobalProxy>::cast(result));
- // TODO(verwaest): This will crash when the global proxy is detached.
- result = PrototypeIterator::GetCurrent<JSObject>(iter);
- }
-
- return result;
+ if (!success) DCHECK(isolate->has_pending_exception());
+ return success ? result : MaybeHandle<Object>();
}
Handle<SharedFunctionInfo> DebugEvaluate::ContextBuilder::outer_info() const {
@@ -254,19 +241,17 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
namespace {
bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
-// Use macro to include both inlined and non-inlined version of an intrinsic.
+// Use macro to include only the non-inlined version of an intrinsic.
#define INTRINSIC_WHITELIST(V) \
/* Conversions */ \
V(NumberToString) \
V(ToBigInt) \
- V(ToInteger) \
V(ToLength) \
V(ToNumber) \
V(ToObject) \
V(ToString) \
/* Type checks */ \
V(IsArray) \
- V(IsDate) \
V(IsFunction) \
V(IsJSProxy) \
V(IsJSReceiver) \
@@ -292,6 +277,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ReThrow) \
V(ThrowCalledNonCallable) \
V(ThrowInvalidStringLength) \
+ V(ThrowIteratorError) \
V(ThrowIteratorResultNotAnObject) \
V(ThrowReferenceError) \
V(ThrowSymbolIteratorInvalid) \
@@ -322,7 +308,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ArrayIndexOf) \
V(ArrayIsArray) \
V(ClassOf) \
- V(GenerateRandomNumbers) \
V(GetFunctionName) \
V(GetOwnPropertyDescriptor) \
V(GlobalPrint) \
@@ -363,12 +348,16 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(OptimizeOsr) \
V(UnblockConcurrentRecompilation)
-#define CASE(Name) \
- case Runtime::k##Name: \
- case Runtime::kInline##Name:
+// Intrinsics with inline versions have to be whitelisted here a second time.
+#define INLINE_INTRINSIC_WHITELIST(V) \
+ V(Call) \
+ V(IsJSReceiver)
+#define CASE(Name) case Runtime::k##Name:
+#define INLINE_CASE(Name) case Runtime::kInline##Name:
switch (id) {
INTRINSIC_WHITELIST(CASE)
+ INLINE_INTRINSIC_WHITELIST(INLINE_CASE)
return true;
default:
if (FLAG_trace_side_effect_free_debug_evaluate) {
@@ -379,7 +368,9 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
}
#undef CASE
+#undef INLINE_CASE
#undef INTRINSIC_WHITELIST
+#undef INLINE_INTRINSIC_WHITELIST
}
#ifdef DEBUG
@@ -389,24 +380,23 @@ bool BuiltinToIntrinsicHasNoSideEffect(Builtins::Name builtin_id,
if (IntrinsicHasNoSideEffect(intrinsic_id)) return true;
// Whitelist intrinsics called from specific builtins.
-#define BUILTIN_INTRINSIC_WHITELIST(V, W) \
- /* Arrays */ \
- V(Builtins::kArrayFilter, W(CreateDataProperty)) \
- V(Builtins::kArrayMap, W(CreateDataProperty)) \
- V(Builtins::kArrayPrototypeSlice, W(CreateDataProperty) W(SetProperty)) \
- /* TypedArrays */ \
- V(Builtins::kTypedArrayConstructor, \
- W(TypedArrayCopyElements) W(ThrowInvalidTypedArrayAlignment)) \
- V(Builtins::kTypedArrayPrototypeFilter, W(TypedArrayCopyElements)) \
- V(Builtins::kTypedArrayPrototypeMap, W(SetProperty))
+#define BUILTIN_INTRINSIC_WHITELIST(V, W) \
+ /* Arrays */ \
+ V(Builtins::kArrayFilter, W(CreateDataProperty)) \
+ V(Builtins::kArrayMap, W(CreateDataProperty)) \
+ V(Builtins::kArrayPrototypeSlice, \
+ W(CreateDataProperty) W(SetKeyedProperty) W(SetNamedProperty)) \
+ /* TypedArrays */ \
+ V(Builtins::kTypedArrayConstructor, \
+ W(TypedArrayCopyElements) W(ThrowInvalidTypedArrayAlignment)) \
+ V(Builtins::kTypedArrayPrototypeFilter, W(TypedArrayCopyElements)) \
+ V(Builtins::kTypedArrayPrototypeMap, W(SetKeyedProperty) W(SetNamedProperty))
#define CASE(Builtin, ...) \
case Builtin: \
return (__VA_ARGS__ false);
-#define MATCH(Intrinsic) \
- intrinsic_id == Runtime::k##Intrinsic || \
- intrinsic_id == Runtime::kInline##Intrinsic ||
+#define MATCH(Intrinsic) intrinsic_id == Runtime::k##Intrinsic ||
switch (builtin_id) {
BUILTIN_INTRINSIC_WHITELIST(CASE, MATCH)
@@ -433,6 +423,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kLdaLookupSlot:
case Bytecode::kLdaGlobal:
case Bytecode::kLdaNamedProperty:
+ case Bytecode::kLdaNamedPropertyNoFeedback:
case Bytecode::kLdaKeyedProperty:
case Bytecode::kLdaGlobalInsideTypeof:
case Bytecode::kLdaLookupSlotInsideTypeof:
@@ -477,6 +468,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
// Literals.
case Bytecode::kCreateArrayLiteral:
case Bytecode::kCreateEmptyArrayLiteral:
+ case Bytecode::kCreateArrayFromIterable:
case Bytecode::kCreateObjectLiteral:
case Bytecode::kCreateEmptyObjectLiteral:
case Bytecode::kCreateRegExpLiteral:
@@ -561,6 +553,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayPrototypeFlat:
case Builtins::kArrayPrototypeFlatMap:
case Builtins::kArrayPrototypeKeys:
+ case Builtins::kArrayPrototypeLastIndexOf:
case Builtins::kArrayPrototypeSlice:
case Builtins::kArrayPrototypeSort:
case Builtins::kArrayForEach:
@@ -807,7 +800,10 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kMakeURIError:
// RegExp builtins.
case Builtins::kRegExpConstructor:
+ // Internal.
+ case Builtins::kStrictPoisonPillThrower:
return DebugInfo::kHasNoSideEffect;
+
// Set builtins.
case Builtins::kSetIteratorPrototypeNext:
case Builtins::kSetPrototypeAdd:
@@ -819,6 +815,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayPrototypePush:
case Builtins::kArrayPrototypeReverse:
case Builtins::kArrayPrototypeShift:
+ case Builtins::kArrayPrototypeUnshift:
case Builtins::kArraySplice:
case Builtins::kArrayUnshift:
// Map builtins.
@@ -851,6 +848,7 @@ bool BytecodeRequiresRuntimeCheck(interpreter::Bytecode bytecode) {
typedef interpreter::Bytecode Bytecode;
switch (bytecode) {
case Bytecode::kStaNamedProperty:
+ case Bytecode::kStaNamedPropertyNoFeedback:
case Bytecode::kStaNamedOwnProperty:
case Bytecode::kStaKeyedProperty:
case Bytecode::kStaInArrayLiteral:
@@ -962,34 +960,6 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
}
// static
-bool DebugEvaluate::CallbackHasNoSideEffect(Object* callback_info) {
- DisallowHeapAllocation no_gc;
- if (callback_info->IsAccessorInfo()) {
- // List of whitelisted internal accessors can be found in accessors.h.
- AccessorInfo* info = AccessorInfo::cast(callback_info);
- if (info->has_no_side_effect()) return true;
- if (FLAG_trace_side_effect_free_debug_evaluate) {
- PrintF("[debug-evaluate] API Callback '");
- info->name()->ShortPrint();
- PrintF("' may cause side effect.\n");
- }
- } else if (callback_info->IsInterceptorInfo()) {
- InterceptorInfo* info = InterceptorInfo::cast(callback_info);
- if (info->has_no_side_effect()) return true;
- if (FLAG_trace_side_effect_free_debug_evaluate) {
- PrintF("[debug-evaluate] API Interceptor may cause side effect.\n");
- }
- } else if (callback_info->IsCallHandlerInfo()) {
- CallHandlerInfo* info = CallHandlerInfo::cast(callback_info);
- if (info->IsSideEffectFreeCallHandlerInfo()) return true;
- if (FLAG_trace_side_effect_free_debug_evaluate) {
- PrintF("[debug-evaluate] API CallHandlerInfo may cause side effect.\n");
- }
- }
- return false;
-}
-
-// static
void DebugEvaluate::ApplySideEffectChecks(
Handle<BytecodeArray> bytecode_array) {
for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 420c6c208b..470a4900a7 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -41,7 +41,6 @@ class DebugEvaluate : public AllStatic {
static DebugInfo::SideEffectState FunctionGetSideEffectState(
Isolate* isolate, Handle<SharedFunctionInfo> info);
- static bool CallbackHasNoSideEffect(Object* callback_info);
static void ApplySideEffectChecks(Handle<BytecodeArray> bytecode_array);
private:
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index e474056107..a67ca5bc6b 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -51,6 +51,7 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
}
}
+// NOLINTNEXTLINE
FrameInspector::~FrameInspector() {
// Destructor needs to be defined in the .cc file, because it instantiates
// std::unique_ptr destructors but the types are not known in the header.
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 6a613dbae9..34f3226890 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -19,7 +19,7 @@ class FrameInspector {
FrameInspector(StandardFrame* frame, int inlined_frame_index,
Isolate* isolate);
- ~FrameInspector();
+ ~FrameInspector(); // NOLINT (modernize-use-equals-default)
int GetParametersCount();
Handle<JSFunction> GetFunction() const { return function_; }
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index ac8073e02c..14ccf2c20a 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -154,9 +154,11 @@ void GetLoadedScripts(Isolate* isolate, PersistentValueVector<Script>& scripts);
MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
Local<String> source);
+enum ExceptionType { kException, kPromiseRejection };
+
class DebugDelegate {
public:
- virtual ~DebugDelegate() {}
+ virtual ~DebugDelegate() = default;
virtual void ScriptCompiled(v8::Local<Script> script, bool is_live_edited,
bool has_compile_error) {}
// |inspector_break_points_hit| contains id of breakpoints installed with
@@ -166,8 +168,8 @@ class DebugDelegate {
const std::vector<debug::BreakpointId>& inspector_break_points_hit) {}
virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
- v8::Local<v8::Value> promise, bool is_uncaught) {
- }
+ v8::Local<v8::Value> promise, bool is_uncaught,
+ ExceptionType exception_type) {}
virtual bool IsFunctionBlackboxed(v8::Local<debug::Script> script,
const debug::Location& start,
const debug::Location& end) {
@@ -179,7 +181,7 @@ void SetDebugDelegate(Isolate* isolate, DebugDelegate* listener);
class AsyncEventDelegate {
public:
- virtual ~AsyncEventDelegate() {}
+ virtual ~AsyncEventDelegate() = default;
virtual void AsyncEventOccurred(debug::DebugAsyncActionType type, int id,
bool is_blackboxed) = 0;
};
@@ -502,6 +504,20 @@ class PostponeInterruptsScope {
std::unique_ptr<i::PostponeInterruptsScope> scope_;
};
+class WeakMap : public v8::Object {
+ public:
+ V8_WARN_UNUSED_RESULT v8::MaybeLocal<v8::Value> Get(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> key);
+ V8_WARN_UNUSED_RESULT v8::MaybeLocal<WeakMap> Set(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> key,
+ v8::Local<v8::Value> value);
+
+ static Local<WeakMap> New(v8::Isolate* isolate);
+ V8_INLINE static WeakMap* Cast(Value* obj);
+
+ private:
+ WeakMap();
+};
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 14d2850b69..5f2d657194 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -35,7 +35,7 @@ DebugStackTraceIterator::DebugStackTraceIterator(Isolate* isolate, int index)
for (; !Done() && index > 0; --index) Advance();
}
-DebugStackTraceIterator::~DebugStackTraceIterator() {}
+DebugStackTraceIterator::~DebugStackTraceIterator() = default;
bool DebugStackTraceIterator::Done() const { return iterator_.done(); }
@@ -117,7 +117,9 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
v8::Local<v8::Value> DebugStackTraceIterator::GetReturnValue() const {
DCHECK(!Done());
- if (frame_inspector_->IsWasm()) return v8::Local<v8::Value>();
+ if (frame_inspector_ && frame_inspector_->IsWasm()) {
+ return v8::Local<v8::Value>();
+ }
bool is_optimized = iterator_.frame()->is_optimized();
if (is_optimized || !is_top_frame_ ||
!isolate_->debug()->IsBreakAtReturn(iterator_.javascript_frame())) {
diff --git a/deps/v8/src/debug/debug-type-profile.h b/deps/v8/src/debug/debug-type-profile.h
index de18951381..0d2e88a7d5 100644
--- a/deps/v8/src/debug/debug-type-profile.h
+++ b/deps/v8/src/debug/debug-type-profile.h
@@ -36,7 +36,7 @@ class TypeProfile : public std::vector<TypeProfileScript> {
static void SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode);
private:
- TypeProfile() {}
+ TypeProfile() = default;
};
} // namespace internal
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 3a3a48b699..8a5a9b6eb0 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -29,6 +29,7 @@
#include "src/isolate-inl.h"
#include "src/log.h"
#include "src/messages.h"
+#include "src/objects/api-callbacks-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
@@ -42,7 +43,7 @@ namespace internal {
class Debug::TemporaryObjectsTracker : public HeapObjectAllocationTracker {
public:
TemporaryObjectsTracker() = default;
- ~TemporaryObjectsTracker() = default;
+ ~TemporaryObjectsTracker() override = default;
void AllocationEvent(Address addr, int) override { objects_.insert(addr); }
@@ -417,9 +418,6 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// Enter the debugger.
DebugScope debug_scope(this);
-
- // Postpone interrupt during breakpoint processing.
- PostponeInterruptsScope postpone(isolate_);
DisableBreak no_recursive_break(this);
// Return if we fail to retrieve debug info.
@@ -1142,7 +1140,7 @@ class RedirectActiveFunctions : public ThreadVisitor {
DCHECK(shared->HasBytecodeArray());
}
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
@@ -1165,9 +1163,9 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
// inlining.
isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
- // Make sure we abort incremental marking.
- isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- GarbageCollectionReason::kDebugger);
+ // TODO(mlippautz): Try to remove this call.
+ isolate_->heap()->PreciseCollectAllGarbage(
+ Heap::kNoGCFlags, GarbageCollectionReason::kDebugger);
bool found_something = false;
Code::OptimizedCodeIterator iterator(isolate_);
@@ -1542,7 +1540,7 @@ void Debug::FindDebugInfo(Handle<DebugInfo> debug_info,
UNREACHABLE();
}
-void Debug::ClearAllDebugInfos(DebugInfoClearFunction clear_function) {
+void Debug::ClearAllDebugInfos(const DebugInfoClearFunction& clear_function) {
DebugInfoListNode* prev = nullptr;
DebugInfoListNode* current = debug_info_list_;
while (current != nullptr) {
@@ -1625,7 +1623,7 @@ void Debug::ScheduleFrameRestart(StackFrame* frame) {
}
Handle<FixedArray> Debug::GetLoadedScripts() {
- isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kDebugger);
Factory* factory = isolate_->factory();
if (!factory->script_list()->IsWeakArrayList()) {
@@ -1655,7 +1653,10 @@ void Debug::OnThrow(Handle<Object> exception) {
scheduled_exception = handle(isolate_->scheduled_exception(), isolate_);
isolate_->clear_scheduled_exception();
}
- OnException(exception, isolate_->GetPromiseOnStackOnThrow());
+ Handle<Object> maybe_promise = isolate_->GetPromiseOnStackOnThrow();
+ OnException(exception, maybe_promise,
+ maybe_promise->IsJSPromise() ? v8::debug::kPromiseRejection
+ : v8::debug::kException);
if (!scheduled_exception.is_null()) {
isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
}
@@ -1670,7 +1671,7 @@ void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
if (!promise->IsJSObject() ||
JSReceiver::GetDataProperty(Handle<JSObject>::cast(promise), key)
->IsUndefined(isolate_)) {
- OnException(value, promise);
+ OnException(value, promise, v8::debug::kPromiseRejection);
}
}
@@ -1695,7 +1696,8 @@ bool Debug::IsFrameBlackboxed(JavaScriptFrame* frame) {
return true;
}
-void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
+void Debug::OnException(Handle<Object> exception, Handle<Object> promise,
+ v8::debug::ExceptionType exception_type) {
// TODO(kozyatinskiy): regress-662674.js test fails on arm without this.
if (!AllowJavascriptExecution::IsAllowed(isolate_)) return;
@@ -1741,9 +1743,9 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
DisableBreak no_recursive_break(this);
Handle<Context> native_context(isolate_->native_context());
- debug_delegate_->ExceptionThrown(v8::Utils::ToLocal(native_context),
- v8::Utils::ToLocal(exception),
- v8::Utils::ToLocal(promise), uncaught);
+ debug_delegate_->ExceptionThrown(
+ v8::Utils::ToLocal(native_context), v8::Utils::ToLocal(exception),
+ v8::Utils::ToLocal(promise), uncaught, exception_type);
}
void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
@@ -1758,8 +1760,8 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
#endif // DEBUG
if (!debug_delegate_) return;
+ DCHECK(in_debug_scope());
HandleScope scope(isolate_);
- PostponeInterruptsScope no_interrupts(isolate_);
DisableBreak no_recursive_break(this);
std::vector<int> inspector_break_points_hit;
@@ -2182,16 +2184,55 @@ Handle<Object> Debug::return_value_handle() {
return handle(thread_local_.return_value_, isolate_);
}
-bool Debug::PerformSideEffectCheckForCallback(Handle<Object> callback_info) {
+bool Debug::PerformSideEffectCheckForCallback(
+ Handle<Object> callback_info, Handle<Object> receiver,
+ Debug::AccessorKind accessor_kind) {
+ DCHECK_EQ(!receiver.is_null(), callback_info->IsAccessorInfo());
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
if (!callback_info.is_null() && callback_info->IsCallHandlerInfo() &&
i::CallHandlerInfo::cast(*callback_info)->NextCallHasNoSideEffect()) {
return true;
}
// TODO(7515): always pass a valid callback info object.
- if (!callback_info.is_null() &&
- DebugEvaluate::CallbackHasNoSideEffect(*callback_info)) {
- return true;
+ if (!callback_info.is_null()) {
+ if (callback_info->IsAccessorInfo()) {
+ // List of whitelisted internal accessors can be found in accessors.h.
+ AccessorInfo* info = AccessorInfo::cast(*callback_info);
+ DCHECK_NE(kNotAccessor, accessor_kind);
+ switch (accessor_kind == kSetter ? info->setter_side_effect_type()
+ : info->getter_side_effect_type()) {
+ case SideEffectType::kHasNoSideEffect:
+ // We do not support setter accessors with no side effects, since
+ // calling set accessors go through a store bytecode. Store bytecodes
+ // are considered to cause side effects (to non-temporary objects).
+ DCHECK_NE(kSetter, accessor_kind);
+ return true;
+ case SideEffectType::kHasSideEffectToReceiver:
+ DCHECK(!receiver.is_null());
+ if (PerformSideEffectCheckForObject(receiver)) return true;
+ isolate_->OptionalRescheduleException(false);
+ return false;
+ case SideEffectType::kHasSideEffect:
+ break;
+ }
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] API Callback '");
+ info->name()->ShortPrint();
+ PrintF("' may cause side effect.\n");
+ }
+ } else if (callback_info->IsInterceptorInfo()) {
+ InterceptorInfo* info = InterceptorInfo::cast(*callback_info);
+ if (info->has_no_side_effect()) return true;
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] API Interceptor may cause side effect.\n");
+ }
+ } else if (callback_info->IsCallHandlerInfo()) {
+ CallHandlerInfo* info = CallHandlerInfo::cast(*callback_info);
+ if (info->IsSideEffectFreeCallHandlerInfo()) return true;
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] API CallHandlerInfo may cause side effect.\n");
+ }
+ }
}
side_effect_check_failed_ = true;
// Throw an uncatchable termination exception.
@@ -2228,11 +2269,14 @@ bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
bool Debug::PerformSideEffectCheckForObject(Handle<Object> object) {
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
- if (object->IsHeapObject()) {
- if (temporary_objects_->HasObject(Handle<HeapObject>::cast(object))) {
- return true;
- }
+ // We expect no side-effects for primitives.
+ if (object->IsNumber()) return true;
+ if (object->IsName()) return true;
+
+ if (temporary_objects_->HasObject(Handle<HeapObject>::cast(object))) {
+ return true;
}
+
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] failed runtime side effect check.\n");
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index a6ad7bd4da..3b6748851b 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -326,7 +326,11 @@ class Debug {
bool PerformSideEffectCheck(Handle<JSFunction> function,
Handle<Object> receiver);
- bool PerformSideEffectCheckForCallback(Handle<Object> callback_info);
+
+ enum AccessorKind { kNotAccessor, kGetter, kSetter };
+ bool PerformSideEffectCheckForCallback(Handle<Object> callback_info,
+ Handle<Object> receiver,
+ AccessorKind accessor_kind);
bool PerformSideEffectCheckAtBytecode(InterpretedFrame* frame);
bool PerformSideEffectCheckForObject(Handle<Object> object);
@@ -406,7 +410,8 @@ class Debug {
bool IsExceptionBlackboxed(bool uncaught);
- void OnException(Handle<Object> exception, Handle<Object> promise);
+ void OnException(Handle<Object> exception, Handle<Object> promise,
+ v8::debug::ExceptionType exception_type);
void ProcessCompileEvent(bool has_compile_error, Handle<Script> script);
@@ -447,7 +452,7 @@ class Debug {
// Wraps logic for clearing and maybe freeing all debug infos.
typedef std::function<void(Handle<DebugInfo>)> DebugInfoClearFunction;
- void ClearAllDebugInfos(DebugInfoClearFunction clear_function);
+ void ClearAllDebugInfos(const DebugInfoClearFunction& clear_function);
void FindDebugInfo(Handle<DebugInfo> debug_info, DebugInfoListNode** prev,
DebugInfoListNode** curr);
@@ -551,7 +556,7 @@ class Debug {
// This scope is used to load and enter the debug context and create a new
// break state. Leaving the scope will restore the previous state.
-class DebugScope BASE_EMBEDDED {
+class DebugScope {
public:
explicit DebugScope(Debug* debug);
~DebugScope();
@@ -580,7 +585,7 @@ class ReturnValueScope {
};
// Stack allocated class for disabling break.
-class DisableBreak BASE_EMBEDDED {
+class DisableBreak {
public:
explicit DisableBreak(Debug* debug, bool disable = true)
: debug_(debug), previous_break_disabled_(debug->break_disabled_) {
@@ -596,8 +601,7 @@ class DisableBreak BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(DisableBreak);
};
-
-class SuppressDebug BASE_EMBEDDED {
+class SuppressDebug {
public:
explicit SuppressDebug(Debug* debug)
: debug_(debug), old_state_(debug->is_suppressed_) {
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 03a60d269e..1e3ab38966 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -16,6 +16,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
@@ -27,24 +29,28 @@ void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
}
void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// Frame is being dropped:
- // - Drop to the target frame specified by ebx.
+ // - Drop to the target frame specified by eax.
// - Look up current function on the frame.
// - Leave the frame.
// - Restart the frame by calling the function.
- __ mov(ebp, ebx);
+ __ mov(ebp, eax);
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ leave();
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ movzx_w(
- ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+ eax, FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
- ParameterCount dummy(ebx);
- __ InvokeFunction(edi, dummy, dummy, JUMP_FUNCTION);
+ // The expected and actual argument counts don't matter as long as they match
+ // and we don't enter the ArgumentsAdaptorTrampoline.
+ ParameterCount dummy(0);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ __ InvokeFunctionCode(edi, no_reg, dummy, dummy, JUMP_FUNCTION);
}
-
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index f0b47e6238..2d38120da5 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -60,7 +60,7 @@ struct WasmDisassemblyOffsetTableEntry {
struct WasmDisassembly {
using OffsetTable = std::vector<WasmDisassemblyOffsetTableEntry>;
- WasmDisassembly() {}
+ WasmDisassembly() = default;
WasmDisassembly(std::string disassembly, OffsetTable offset_table)
: disassembly(std::move(disassembly)),
offset_table(std::move(offset_table)) {}
@@ -161,6 +161,8 @@ class ConsoleDelegate {
const ConsoleContext& context) {}
virtual void Time(const ConsoleCallArguments& args,
const ConsoleContext& context) {}
+ virtual void TimeLog(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
virtual void TimeEnd(const ConsoleCallArguments& args,
const ConsoleContext& context) {}
virtual void TimeStamp(const ConsoleCallArguments& args,
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index a2e59caf15..abda40bf51 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -253,7 +253,7 @@ class ActivationsFinder : public ThreadVisitor {
// Find the frames with activations of codes marked for deoptimization, search
// for the trampoline to the deoptimizer call respective to each code, and use
// it to replace the current pc on the stack.
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
@@ -1173,7 +1173,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
BailoutId bailout_id = translated_frame->node_id();
unsigned height = translated_frame->height();
- unsigned height_in_bytes = height * kPointerSize;
+ unsigned parameter_count = height - 1; // Exclude the context.
+ unsigned height_in_bytes = parameter_count * kPointerSize;
// If the construct frame appears to be topmost we should ensure that the
// value of result register is preserved during continuation execution.
@@ -1185,7 +1186,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
}
- int parameter_count = height;
if (ShouldPadArguments(parameter_count)) height_in_bytes += kPointerSize;
TranslatedFrame::iterator function_iterator = value_iterator++;
@@ -1227,7 +1227,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
TranslatedFrame::iterator receiver_iterator = value_iterator;
// Compute the incoming parameter translation.
- for (int i = 0; i < parameter_count; ++i, ++value_iterator) {
+ for (unsigned i = 0; i < parameter_count; ++i, ++value_iterator) {
frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
}
@@ -1259,13 +1259,10 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
intptr_t marker = StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
frame_writer.PushRawValue(marker, "context (construct stub sentinel)\n");
- // The context can be gotten from the previous frame.
- Object* context =
- reinterpret_cast<Object*>(output_[frame_index - 1]->GetContext());
- frame_writer.PushRawObject(context, "context\n");
+ frame_writer.PushTranslatedValue(value_iterator++, "context\n");
// Number of incoming arguments.
- frame_writer.PushRawObject(Smi::FromInt(height - 1), "argc\n");
+ frame_writer.PushRawObject(Smi::FromInt(parameter_count - 1), "argc\n");
// The constructor function was mentioned explicitly in the
// CONSTRUCT_STUB_FRAME.
@@ -1476,7 +1473,13 @@ void Deoptimizer::DoComputeBuiltinContinuation(
const bool must_handle_result =
!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy;
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+ // TODO(v8:6666): Fold into Default config once root is fully supported.
+ const RegisterConfiguration* config(
+ RegisterConfiguration::PreserveRootIA32());
+#else
const RegisterConfiguration* config(RegisterConfiguration::Default());
+#endif
const int allocatable_register_count =
config->num_allocatable_general_registers();
const int padding_slot_count =
@@ -1739,7 +1742,7 @@ void Deoptimizer::MaterializeHeapObjects() {
translated_state_.Prepare(static_cast<Address>(stack_fp_));
if (FLAG_deopt_every_n_times > 0) {
// Doing a GC here will find problems with the deoptimized frames.
- isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kTesting);
}
@@ -1998,6 +2001,10 @@ void Translation::StoreInt32Register(Register reg) {
buffer_->Add(reg.code());
}
+void Translation::StoreInt64Register(Register reg) {
+ buffer_->Add(INT64_REGISTER);
+ buffer_->Add(reg.code());
+}
void Translation::StoreUint32Register(Register reg) {
buffer_->Add(UINT32_REGISTER);
@@ -2032,6 +2039,10 @@ void Translation::StoreInt32StackSlot(int index) {
buffer_->Add(index);
}
+void Translation::StoreInt64StackSlot(int index) {
+ buffer_->Add(INT64_STACK_SLOT);
+ buffer_->Add(index);
+}
void Translation::StoreUint32StackSlot(int index) {
buffer_->Add(UINT32_STACK_SLOT);
@@ -2080,12 +2091,14 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case CAPTURED_OBJECT:
case REGISTER:
case INT32_REGISTER:
+ case INT64_REGISTER:
case UINT32_REGISTER:
case BOOL_REGISTER:
case FLOAT_REGISTER:
case DOUBLE_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
+ case INT64_STACK_SLOT:
case UINT32_STACK_SLOT:
case BOOL_STACK_SLOT:
case FLOAT_STACK_SLOT:
@@ -2350,6 +2363,13 @@ TranslatedValue TranslatedValue::NewInt32(TranslatedState* container,
return slot;
}
+// static
+TranslatedValue TranslatedValue::NewInt64(TranslatedState* container,
+ int64_t value) {
+ TranslatedValue slot(container, kInt64);
+ slot.int64_value_ = value;
+ return slot;
+}
// static
TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container,
@@ -2398,6 +2418,10 @@ int32_t TranslatedValue::int32_value() const {
return int32_value_;
}
+int64_t TranslatedValue::int64_value() const {
+ DCHECK_EQ(kInt64, kind());
+ return int64_value_;
+}
uint32_t TranslatedValue::uint32_value() const {
DCHECK(kind() == kUInt32 || kind() == kBoolBit);
@@ -2446,6 +2470,15 @@ Object* TranslatedValue::GetRawValue() const {
break;
}
+ case kInt64: {
+ bool is_smi = (int64_value() >= static_cast<int64_t>(Smi::kMinValue) &&
+ int64_value() <= static_cast<int64_t>(Smi::kMaxValue));
+ if (is_smi) {
+ return Smi::FromIntptr(static_cast<intptr_t>(int64_value()));
+ }
+ break;
+ }
+
case kUInt32: {
bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue));
if (is_smi) {
@@ -2486,6 +2519,7 @@ Handle<Object> TranslatedValue::GetValue() {
switch (kind()) {
case TranslatedValue::kTagged:
case TranslatedValue::kInt32:
+ case TranslatedValue::kInt64:
case TranslatedValue::kUInt32:
case TranslatedValue::kBoolBit:
case TranslatedValue::kFloat:
@@ -2539,6 +2573,11 @@ void TranslatedValue::MaterializeSimple() {
Handle<Object>(isolate()->factory()->NewNumber(int32_value())));
return;
+ case kInt64:
+ set_initialized_storage(Handle<Object>(
+ isolate()->factory()->NewNumber(static_cast<double>(int64_value()))));
+ return;
+
case kUInt32:
set_initialized_storage(
Handle<Object>(isolate()->factory()->NewNumber(uint32_value())));
@@ -2592,6 +2631,9 @@ int TranslatedValue::GetChildrenCount() const {
}
}
+uint64_t TranslatedState::GetUInt64Slot(Address fp, int slot_offset) {
+ return Memory<uint64_t>(fp + slot_offset);
+}
uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
Address address = fp + slot_offset;
@@ -2611,7 +2653,7 @@ Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) {
}
Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
- return Float64::FromBits(Memory<uint64_t>(fp + slot_offset));
+ return Float64::FromBits(GetUInt64Slot(fp, slot_offset));
}
void TranslatedValue::Handlify() {
@@ -2814,12 +2856,14 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::CAPTURED_OBJECT:
case Translation::REGISTER:
case Translation::INT32_REGISTER:
+ case Translation::INT64_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::BOOL_REGISTER:
case Translation::FLOAT_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::STACK_SLOT:
case Translation::INT32_STACK_SLOT:
+ case Translation::INT64_STACK_SLOT:
case Translation::UINT32_STACK_SLOT:
case Translation::BOOL_STACK_SLOT:
case Translation::FLOAT_STACK_SLOT:
@@ -3029,7 +3073,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
intptr_t value = registers->GetRegister(input_reg);
if (trace_file != nullptr) {
- PrintF(trace_file, "%" V8PRIdPTR " ; %s ", value,
+ PrintF(trace_file, "%" V8PRIdPTR " ; %s (int32)", value,
converter.NameOfCPURegister(input_reg));
}
TranslatedValue translated_value =
@@ -3038,6 +3082,24 @@ int TranslatedState::CreateNextTranslatedValue(
return translated_value.GetChildrenCount();
}
+ case Translation::INT64_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; %s (int64)", value,
+ converter.NameOfCPURegister(input_reg));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewInt64(this, static_cast<int64_t>(value));
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
case Translation::UINT32_REGISTER: {
int input_reg = iterator->Next();
if (registers == nullptr) {
@@ -3047,7 +3109,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
intptr_t value = registers->GetRegister(input_reg);
if (trace_file != nullptr) {
- PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint)", value,
+ PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint32)", value,
converter.NameOfCPURegister(input_reg));
}
TranslatedValue translated_value =
@@ -3131,7 +3193,7 @@ int TranslatedState::CreateNextTranslatedValue(
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
- PrintF(trace_file, "%d ; (int) [fp %c %3d] ",
+ PrintF(trace_file, "%d ; (int32) [fp %c %3d] ",
static_cast<int32_t>(value), slot_offset < 0 ? '-' : '+',
std::abs(slot_offset));
}
@@ -3140,12 +3202,26 @@ int TranslatedState::CreateNextTranslatedValue(
return translated_value.GetChildrenCount();
}
+ case Translation::INT64_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ uint64_t value = GetUInt64Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; (int64) [fp %c %3d] ",
+ static_cast<intptr_t>(value), slot_offset < 0 ? '-' : '+',
+ std::abs(slot_offset));
+ }
+ TranslatedValue translated_value = TranslatedValue::NewInt64(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
case Translation::UINT32_STACK_SLOT: {
int slot_offset =
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
- PrintF(trace_file, "%u ; (uint) [fp %c %3d] ", value,
+ PrintF(trace_file, "%u ; (uint32) [fp %c %3d] ", value,
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
}
TranslatedValue translated_value =
@@ -3389,6 +3465,7 @@ void TranslatedState::InitializeCapturedObjectAt(
return;
case FIXED_ARRAY_TYPE:
+ case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -3532,6 +3609,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
case FIXED_ARRAY_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index d981a86253..0c5254e773 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -53,6 +53,7 @@ class TranslatedValue {
kInvalid,
kTagged,
kInt32,
+ kInt64,
kUInt32,
kBoolBit,
kFloat,
@@ -88,6 +89,7 @@ class TranslatedValue {
static TranslatedValue NewFloat(TranslatedState* container, Float32 value);
static TranslatedValue NewDouble(TranslatedState* container, Float64 value);
static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
+ static TranslatedValue NewInt64(TranslatedState* container, int64_t value);
static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
static TranslatedValue NewBool(TranslatedState* container, uint32_t value);
static TranslatedValue NewTagged(TranslatedState* container, Object* literal);
@@ -128,6 +130,8 @@ class TranslatedValue {
uint32_t uint32_value_;
// kind is kInt32.
int32_t int32_value_;
+ // kind is kInt64.
+ int64_t int64_value_;
// kind is kFloat
Float32 float_value_;
// kind is kDouble
@@ -139,6 +143,7 @@ class TranslatedValue {
// Checked accessors for the union members.
Object* raw_literal() const;
int32_t int32_value() const;
+ int64_t int64_value() const;
uint32_t uint32_value() const;
Float32 float_value() const;
Float64 double_value() const;
@@ -284,7 +289,7 @@ class TranslatedFrame {
class TranslatedState {
public:
- TranslatedState() {}
+ TranslatedState() = default;
explicit TranslatedState(const JavaScriptFrame* frame);
void Prepare(Address stack_frame_pointer);
@@ -368,6 +373,7 @@ class TranslatedState {
Handle<Object> GetValueAndAdvance(TranslatedFrame* frame, int* value_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
+ static uint64_t GetUInt64Slot(Address fp, int slot_index);
static Float32 GetFloatSlot(Address fp, int slot_index);
static Float64 GetDoubleSlot(Address fp, int slot_index);
@@ -386,10 +392,9 @@ class TranslatedState {
FeedbackSlot feedback_slot_;
};
-
-class OptimizedFunctionVisitor BASE_EMBEDDED {
+class OptimizedFunctionVisitor {
public:
- virtual ~OptimizedFunctionVisitor() {}
+ virtual ~OptimizedFunctionVisitor() = default;
virtual void VisitFunction(JSFunction* function) = 0;
};
@@ -501,7 +506,7 @@ class Deoptimizer : public Malloced {
static const int kNotDeoptimizationEntry = -1;
// Generators for the deoptimization entry code.
- class TableEntryGenerator BASE_EMBEDDED {
+ class TableEntryGenerator {
public:
TableEntryGenerator(MacroAssembler* masm, DeoptimizeKind kind, int count)
: masm_(masm), deopt_kind_(kind), count_(count) {}
@@ -864,8 +869,7 @@ class DeoptimizerData {
DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
};
-
-class TranslationBuffer BASE_EMBEDDED {
+class TranslationBuffer {
public:
explicit TranslationBuffer(Zone* zone) : contents_(zone) {}
@@ -878,8 +882,7 @@ class TranslationBuffer BASE_EMBEDDED {
ZoneChunkList<uint8_t> contents_;
};
-
-class TranslationIterator BASE_EMBEDDED {
+class TranslationIterator {
public:
TranslationIterator(ByteArray* buffer, int index);
@@ -910,12 +913,14 @@ class TranslationIterator BASE_EMBEDDED {
V(CAPTURED_OBJECT) \
V(REGISTER) \
V(INT32_REGISTER) \
+ V(INT64_REGISTER) \
V(UINT32_REGISTER) \
V(BOOL_REGISTER) \
V(FLOAT_REGISTER) \
V(DOUBLE_REGISTER) \
V(STACK_SLOT) \
V(INT32_STACK_SLOT) \
+ V(INT64_STACK_SLOT) \
V(UINT32_STACK_SLOT) \
V(BOOL_STACK_SLOT) \
V(FLOAT_STACK_SLOT) \
@@ -923,7 +928,7 @@ class TranslationIterator BASE_EMBEDDED {
V(LITERAL) \
V(UPDATE_FEEDBACK)
-class Translation BASE_EMBEDDED {
+class Translation {
public:
#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
enum Opcode {
@@ -963,12 +968,14 @@ class Translation BASE_EMBEDDED {
void DuplicateObject(int object_index);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
+ void StoreInt64Register(Register reg);
void StoreUint32Register(Register reg);
void StoreBoolRegister(Register reg);
void StoreFloatRegister(FloatRegister reg);
void StoreDoubleRegister(DoubleRegister reg);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
+ void StoreInt64StackSlot(int index);
void StoreUint32StackSlot(int index);
void StoreBoolStackSlot(int index);
void StoreFloatStackSlot(int index);
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index f8ef304d2c..81a0055cc5 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -16,7 +16,7 @@ typedef unsigned char byte;
// specific.
class NameConverter {
public:
- virtual ~NameConverter() {}
+ virtual ~NameConverter() = default;
virtual const char* NameOfCPURegister(int reg) const;
virtual const char* NameOfByteCPURegister(int reg) const;
virtual const char* NameOfXMMRegister(int reg) const;
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 4ccddc289c..0bb59ec0fd 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -5,6 +5,7 @@
#include "src/disassembler.h"
#include <memory>
+#include <unordered_map>
#include <vector>
#include "src/assembler-inl.h"
@@ -38,12 +39,39 @@ class V8NameConverter: public disasm::NameConverter {
const CodeReference& code() const { return code_; }
private:
+ void InitExternalRefsCache() const;
+
Isolate* isolate_;
CodeReference code_;
EmbeddedVector<char, 128> v8_buffer_;
+
+ // Map from root-register relative offset of the external reference value to
+ // the external reference name (stored in the external reference table).
+ // This cache is used to recognize [root_reg + offs] patterns as direct
+ // access to certain external reference's value.
+ mutable std::unordered_map<int, const char*> directly_accessed_external_refs_;
};
+void V8NameConverter::InitExternalRefsCache() const {
+ ExternalReferenceTable* external_reference_table =
+ isolate_->heap()->external_reference_table();
+ if (!external_reference_table->is_initialized()) return;
+
+ base::AddressRegion addressable_region =
+ isolate_->root_register_addressable_region();
+ Address roots_start =
+ reinterpret_cast<Address>(isolate_->heap()->roots_array_start());
+
+ for (uint32_t i = 0; i < external_reference_table->size(); i++) {
+ Address address = external_reference_table->address(i);
+ if (addressable_region.contains(address)) {
+ int offset = static_cast<int>(address - roots_start);
+ const char* name = external_reference_table->name(i);
+ directly_accessed_external_refs_.insert({offset, name});
+ }
+ }
+}
const char* V8NameConverter::NameOfAddress(byte* pc) const {
if (!code_.is_null()) {
@@ -90,8 +118,11 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
const int kRootsStart = 0;
const int kRootsEnd = Heap::roots_to_external_reference_table_offset();
- const int kExtRefsStart = Heap::roots_to_external_reference_table_offset();
+ const int kExtRefsStart = kRootsEnd;
const int kExtRefsEnd = Heap::roots_to_builtins_offset();
+ const int kBuiltinsStart = kExtRefsEnd;
+ const int kBuiltinsEnd =
+ kBuiltinsStart + Builtins::builtin_count * kPointerSize;
if (kRootsStart <= offset && offset < kRootsEnd) {
uint32_t offset_in_roots_table = offset - kRootsStart;
@@ -99,8 +130,8 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
// Fail safe in the unlikely case of an arbitrary root-relative offset.
if (offset_in_roots_table % kPointerSize != 0) return nullptr;
- Heap::RootListIndex root_index =
- static_cast<Heap::RootListIndex>(offset_in_roots_table / kPointerSize);
+ RootIndex root_index =
+ static_cast<RootIndex>(offset_in_roots_table / kPointerSize);
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
@@ -109,6 +140,7 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
SNPrintF(v8_buffer_, "root (%s)", obj_name.get());
return v8_buffer_.start();
+
} else if (kExtRefsStart <= offset && offset < kExtRefsEnd) {
uint32_t offset_in_extref_table = offset - kExtRefsStart;
@@ -126,8 +158,29 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
isolate_->heap()->external_reference_table()->NameFromOffset(
offset_in_extref_table));
return v8_buffer_.start();
+
+ } else if (kBuiltinsStart <= offset && offset < kBuiltinsEnd) {
+ uint32_t offset_in_builtins_table = (offset - kBuiltinsStart);
+
+ Builtins::Name builtin_id =
+ static_cast<Builtins::Name>(offset_in_builtins_table / kPointerSize);
+
+ const char* name = Builtins::name(builtin_id);
+ SNPrintF(v8_buffer_, "builtin (%s)", name);
+ return v8_buffer_.start();
+
} else {
- return nullptr;
+ // It must be a direct access to one of the external values.
+ if (directly_accessed_external_refs_.empty()) {
+ InitExternalRefsCache();
+ }
+
+ auto iter = directly_accessed_external_refs_.find(offset);
+ if (iter != directly_accessed_external_refs_.end()) {
+ SNPrintF(v8_buffer_, "external value (%s)", iter->second);
+ return v8_buffer_.start();
+ }
+ return "WAAT??? What are we accessing here???";
}
}
@@ -142,8 +195,8 @@ static const int kRelocInfoPosition = 57;
static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
const ExternalReferenceEncoder* ref_encoder,
- std::ostream* os, RelocInfo* relocinfo,
- bool first_reloc_info = true) {
+ std::ostream* os, CodeReference host,
+ RelocInfo* relocinfo, bool first_reloc_info = true) {
// Indent the printing of the reloc info.
if (first_reloc_info) {
// The first reloc info is printed after the disassembled instruction.
@@ -199,6 +252,11 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
} else {
out->AddFormatted(" %s", Code::Kind2String(kind));
}
+ } else if (RelocInfo::IsWasmStubCall(rmode) && !isolate) {
+ // Host is isolate-independent, try wasm native module instead.
+ wasm::WasmCode* code = host.as_wasm_code()->native_module()->Lookup(
+ relocinfo->wasm_stub_call_address());
+ out->AddFormatted(" ;; wasm stub: %s", code->GetRuntimeStubName());
} else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
isolate->deoptimizer_data() != nullptr) {
// A runtime entry relocinfo might be a deoptimization bailout.
@@ -313,7 +371,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], nullptr, constant_pool);
bool first_reloc_info = (i == 0);
- PrintRelocInfo(&out, isolate, ref_encoder, os, &relocinfo,
+ PrintRelocInfo(&out, isolate, ref_encoder, os, code, &relocinfo,
first_reloc_info);
}
@@ -331,7 +389,8 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
if (reloc_it.rinfo()->IsInConstantPool() &&
(reloc_it.rinfo()->constant_pool_entry_address() ==
constant_pool_entry_address)) {
- PrintRelocInfo(&out, isolate, ref_encoder, os, reloc_it.rinfo());
+ PrintRelocInfo(&out, isolate, ref_encoder, os, code,
+ reloc_it.rinfo());
break;
}
reloc_it.next();
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index e5d55c246a..473c4ebd85 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -25,7 +25,7 @@ namespace internal {
V(BigUint64, biguint64, BIGUINT64, uint64_t) \
V(BigInt64, bigint64, BIGINT64, int64_t)
-enum ElementsKind {
+enum ElementsKind : uint8_t {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
PACKED_SMI_ELEMENTS,
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 6c4222385c..5fad30711d 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -10,6 +10,7 @@
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/isolate-inl.h"
+#include "src/keys.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
@@ -529,11 +530,11 @@ class InternalElementsAccessor : public ElementsAccessor {
explicit InternalElementsAccessor(const char* name)
: ElementsAccessor(name) {}
- virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
- FixedArrayBase* backing_store,
- uint32_t index) = 0;
+ uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index) override = 0;
- virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+ PropertyDetails GetDetails(JSObject* holder, uint32_t entry) override = 0;
};
// Base class for element handler implementations. Contains the
@@ -724,18 +725,6 @@ class ElementsAccessorBase : public InternalElementsAccessor {
UNREACHABLE();
}
- Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
- uint32_t delete_count, Arguments* args,
- uint32_t add_count) final {
- return Subclass::SpliceImpl(receiver, start, delete_count, args, add_count);
- }
-
- static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
- uint32_t start, uint32_t delete_count,
- Arguments* args, uint32_t add_count) {
- UNREACHABLE();
- }
-
Handle<Object> Pop(Handle<JSArray> receiver) final {
return Subclass::PopImpl(receiver);
}
@@ -1026,14 +1015,14 @@ class ElementsAccessorBase : public InternalElementsAccessor {
void CopyElements(Isolate* isolate, Handle<FixedArrayBase> source,
ElementsKind source_kind,
- Handle<FixedArrayBase> destination, int size) {
+ Handle<FixedArrayBase> destination, int size) override {
Subclass::CopyElementsImpl(isolate, *source, 0, *destination, source_kind,
0, kPackedSizeNotKnown, size);
}
void CopyTypedArrayElementsSlice(JSTypedArray* source,
JSTypedArray* destination, size_t start,
- size_t end) {
+ size_t end) override {
Subclass::CopyTypedArrayElementsSliceImpl(source, destination, start, end);
}
@@ -1068,7 +1057,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Maybe<bool> CollectValuesOrEntries(Isolate* isolate, Handle<JSObject> object,
Handle<FixedArray> values_or_entries,
bool get_entries, int* nof_items,
- PropertyFilter filter) {
+ PropertyFilter filter) override {
return Subclass::CollectValuesOrEntriesImpl(
isolate, object, values_or_entries, get_entries, nof_items, filter);
}
@@ -1298,7 +1287,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
Object* Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
- uint32_t start, uint32_t end) {
+ uint32_t start, uint32_t end) override {
return Subclass::FillImpl(receiver, obj_value, start, end);
}
@@ -2227,58 +2216,6 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return result_array;
}
- static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
- uint32_t start, uint32_t delete_count,
- Arguments* args, uint32_t add_count) {
- Isolate* isolate = receiver->GetIsolate();
- Heap* heap = isolate->heap();
- uint32_t length = Smi::ToInt(receiver->length());
- uint32_t new_length = length - delete_count + add_count;
-
- ElementsKind kind = KindTraits::Kind;
- if (new_length <= static_cast<uint32_t>(receiver->elements()->length()) &&
- IsSmiOrObjectElementsKind(kind)) {
- HandleScope scope(isolate);
- JSObject::EnsureWritableFastElements(receiver);
- }
-
- Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
-
- if (new_length == 0) {
- receiver->set_elements(ReadOnlyRoots(heap).empty_fixed_array());
- receiver->set_length(Smi::kZero);
- return isolate->factory()->NewJSArrayWithElements(
- backing_store, KindTraits::Kind, delete_count);
- }
-
- // Construct the result array which holds the deleted elements.
- Handle<JSArray> deleted_elements = isolate->factory()->NewJSArray(
- KindTraits::Kind, delete_count, delete_count);
- if (delete_count > 0) {
- DisallowHeapAllocation no_gc;
- Subclass::CopyElementsImpl(isolate, *backing_store, start,
- deleted_elements->elements(), KindTraits::Kind,
- 0, kPackedSizeNotKnown, delete_count);
- }
-
- // Delete and move elements to make space for add_count new elements.
- if (add_count < delete_count) {
- Subclass::SpliceShrinkStep(isolate, receiver, backing_store, start,
- delete_count, add_count, length, new_length);
- } else if (add_count > delete_count) {
- backing_store =
- Subclass::SpliceGrowStep(isolate, receiver, backing_store, start,
- delete_count, add_count, length, new_length);
- }
-
- // Copy over the arguments.
- Subclass::CopyArguments(args, backing_store, add_count, 3, start);
-
- receiver->set_length(Smi::FromInt(new_length));
- Subclass::TryTransitionResultArrayToPacked(deleted_elements);
- return deleted_elements;
- }
-
static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store, int dst_index,
int src_index, int len, int hole_start,
@@ -2503,50 +2440,6 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return result;
}
- private:
- // SpliceShrinkStep might modify the backing_store.
- static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
- uint32_t start, uint32_t delete_count,
- uint32_t add_count, uint32_t len,
- uint32_t new_length) {
- const int move_left_count = len - delete_count - start;
- const int move_left_dst_index = start + add_count;
- Subclass::MoveElements(isolate, receiver, backing_store,
- move_left_dst_index, start + delete_count,
- move_left_count, new_length, len);
- }
-
- // SpliceGrowStep might modify the backing_store.
- static Handle<FixedArrayBase> SpliceGrowStep(
- Isolate* isolate, Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, uint32_t start,
- uint32_t delete_count, uint32_t add_count, uint32_t length,
- uint32_t new_length) {
- // Check we do not overflow the new_length.
- DCHECK((add_count - delete_count) <= (Smi::kMaxValue - length));
- // Check if backing_store is big enough.
- if (new_length <= static_cast<uint32_t>(backing_store->length())) {
- Subclass::MoveElements(isolate, receiver, backing_store,
- start + add_count, start + delete_count,
- (length - delete_count - start), 0, 0);
- // MoveElements updates the backing_store in-place.
- return backing_store;
- }
- // New backing storage is needed.
- int capacity = JSObject::NewElementsCapacity(new_length);
- // Partially copy all elements up to start.
- Handle<FixedArrayBase> new_elms = Subclass::ConvertElementsWithCapacity(
- receiver, backing_store, KindTraits::Kind, capacity, start);
- // Copy the trailing elements after start + delete_count
- Subclass::CopyElementsImpl(isolate, *backing_store, start + delete_count,
- *new_elms, KindTraits::Kind, start + add_count,
- kPackedSizeNotKnown,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
- receiver->set_elements(*new_elms);
- return new_elms;
- }
-
static Handle<Object> RemoveElement(Handle<JSArray> receiver,
Where remove_position) {
Isolate* isolate = receiver->GetIsolate();
@@ -3285,8 +3178,8 @@ class TypedElementsAccessor
size_t start, size_t end) {
DisallowHeapAllocation no_gc;
DCHECK_EQ(destination->GetElementsKind(), AccessorClass::kind());
- DCHECK(!source->WasNeutered());
- DCHECK(!destination->WasNeutered());
+ CHECK(!source->WasNeutered());
+ CHECK(!destination->WasNeutered());
DCHECK_LE(start, end);
DCHECK_LE(end, source->length_value());
@@ -3356,6 +3249,9 @@ class TypedElementsAccessor
// side-effects, as the source elements will always be a number.
DisallowHeapAllocation no_gc;
+ CHECK(!source->WasNeutered());
+ CHECK(!destination->WasNeutered());
+
FixedTypedArrayBase* source_elements =
FixedTypedArrayBase::cast(source->elements());
BackingStore* destination_elements =
@@ -3377,8 +3273,8 @@ class TypedElementsAccessor
uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
uint8_t* dest_data = static_cast<uint8_t*>(destination_elements->DataPtr());
- size_t source_byte_length = NumberToSize(source->byte_length());
- size_t dest_byte_length = NumberToSize(destination->byte_length());
+ size_t source_byte_length = source->byte_length();
+ size_t dest_byte_length = destination->byte_length();
// We can simply copy the backing store if the types are the same, or if
// we are converting e.g. Uint8 <-> Int8, as the binary representation
@@ -3446,6 +3342,8 @@ class TypedElementsAccessor
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
+ CHECK(!destination->WasNeutered());
+
size_t current_length;
DCHECK(source->length()->IsNumber() &&
TryNumberToSize(source->length(), &current_length) &&
@@ -3566,6 +3464,7 @@ class TypedElementsAccessor
Handle<JSTypedArray> destination_ta =
Handle<JSTypedArray>::cast(destination);
DCHECK_LE(offset + length, destination_ta->length_value());
+ CHECK(!destination_ta->WasNeutered());
if (length == 0) return *isolate->factory()->undefined_value();
@@ -3593,7 +3492,6 @@ class TypedElementsAccessor
// If we have to copy more elements than we have in the source, we need to
// do special handling and conversion; that happens in the slow case.
if (length + offset <= source_ta->length_value()) {
- DCHECK(length == 0 || !source_ta->WasNeutered());
CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
return *isolate->factory()->undefined_value();
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index b0aa911f32..8cdbf331ef 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -19,7 +19,7 @@ class JSTypedArray;
class ElementsAccessor {
public:
explicit ElementsAccessor(const char* name) : name_(name) { }
- virtual ~ElementsAccessor() { }
+ virtual ~ElementsAccessor() = default;
const char* name() const { return name_; }
@@ -135,10 +135,6 @@ class ElementsAccessor {
virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
uint32_t end) = 0;
- virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
- uint32_t start, uint32_t delete_count,
- Arguments* args, uint32_t add_count) = 0;
-
virtual Handle<Object> Pop(Handle<JSArray> receiver) = 0;
virtual Handle<Object> Shift(Handle<JSArray> receiver) = 0;
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index de1530ba27..b19128a941 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -20,11 +20,11 @@ class SimpleStringResource : public Base {
: data_(data),
length_(length) {}
- virtual ~SimpleStringResource() { delete[] data_; }
+ ~SimpleStringResource() override { delete[] data_; }
- virtual const Char* data() const { return data_; }
+ const Char* data() const override { return data_; }
- virtual size_t length() const { return length_; }
+ size_t length() const override { return length_; }
private:
Char* const data_;
@@ -70,10 +70,7 @@ void ExternalizeStringExtension::Externalize(
bool force_two_byte = false;
if (args.Length() >= 2) {
if (args[1]->IsBoolean()) {
- force_two_byte =
- args[1]
- ->BooleanValue(args.GetIsolate()->GetCurrentContext())
- .FromJust();
+ force_two_byte = args[1]->BooleanValue(args.GetIsolate());
} else {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index c8907b42ed..8d08a7474a 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -13,8 +13,8 @@ namespace internal {
class ExternalizeStringExtension : public v8::Extension {
public:
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
static void IsOneByte(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/src/extensions/free-buffer-extension.h b/deps/v8/src/extensions/free-buffer-extension.h
index 6bc5e57cbc..51c620d171 100644
--- a/deps/v8/src/extensions/free-buffer-extension.h
+++ b/deps/v8/src/extensions/free-buffer-extension.h
@@ -14,8 +14,8 @@ class FreeBufferExtension : public v8::Extension {
public:
FreeBufferExtension()
: v8::Extension("v8/free-buffer", "native function freeBuffer();") {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
};
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 9eb453b986..4f446627fd 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -18,9 +18,7 @@ v8::Local<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate(
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetIsolate()->RequestGarbageCollectionForTesting(
- args[0]
- ->BooleanValue(args.GetIsolate()->GetCurrentContext())
- .FromMaybe(false)
+ args[0]->BooleanValue(args.GetIsolate())
? v8::Isolate::kMinorGarbageCollection
: v8::Isolate::kFullGarbageCollection);
}
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 9be0d4b701..4fd35d4233 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -16,8 +16,8 @@ class GCExtension : public v8::Extension {
explicit GCExtension(const char* fun_name)
: v8::Extension("v8/gc",
BuildSource(buffer_, sizeof(buffer_), fun_name)) {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
private:
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index aa9d5c4364..25081b69e0 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -63,10 +63,7 @@ void StatisticsExtension::GetCounters(
Heap* heap = isolate->heap();
if (args.Length() > 0) { // GC if first argument evaluates to true.
- if (args[0]->IsBoolean() &&
- args[0]
- ->BooleanValue(args.GetIsolate()->GetCurrentContext())
- .FromMaybe(false)) {
+ if (args[0]->IsBoolean() && args[0]->BooleanValue(args.GetIsolate())) {
heap->CollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kCountersExtension);
}
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 714f86aeba..4c53cbfdea 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -13,8 +13,8 @@ namespace internal {
class StatisticsExtension : public v8::Extension {
public:
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
private:
diff --git a/deps/v8/src/extensions/trigger-failure-extension.h b/deps/v8/src/extensions/trigger-failure-extension.h
index 7c7ecf882c..e2cfac1eb3 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.h
+++ b/deps/v8/src/extensions/trigger-failure-extension.h
@@ -13,8 +13,8 @@ namespace internal {
class TriggerFailureExtension : public v8::Extension {
public:
TriggerFailureExtension() : v8::Extension("v8/trigger-failure", kSource) {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void TriggerCheckFalse(
const v8::FunctionCallbackInfo<v8::Value>& args);
static void TriggerAssertFalse(
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 4d555e1829..47bc1b9ee4 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -159,10 +159,10 @@ void ExternalReferenceTable::AddAccessors(int* index) {
};
static const AccessorRefTable getters[] = {
-#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName) \
- {FUNCTION_ADDR(&Accessors::AccessorName##Getter), \
+#define ACCESSOR_INFO_DECLARATION(_, accessor_name, AccessorName, ...) \
+ {FUNCTION_ADDR(&Accessors::AccessorName##Getter), \
"Accessors::" #AccessorName "Getter"}, /* NOLINT(whitespace/indent) */
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+ ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_INFO_DECLARATION, /* not used */)
#undef ACCESSOR_INFO_DECLARATION
};
static const AccessorRefTable setters[] = {
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index 38acac2b7a..4fa1088ab7 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -30,7 +30,8 @@ class ExternalReferenceTable {
BUILTIN_LIST_C(COUNT_C_BUILTIN);
#undef COUNT_C_BUILTIN
static constexpr int kRuntimeReferenceCount =
- Runtime::kNumFunctions / 2; // Don't count dupe kInline... functions.
+ Runtime::kNumFunctions -
+ Runtime::kNumInlineFunctions; // Don't count dupe kInline... functions.
static constexpr int kIsolateAddressReferenceCount = kIsolateAddressCount;
static constexpr int kAccessorReferenceCount =
Accessors::kAccessorInfoCount + Accessors::kAccessorSetterCount;
@@ -73,7 +74,7 @@ class ExternalReferenceTable {
return OffsetOfEntry(size()) + 2 * kUInt32Size;
}
- ExternalReferenceTable() {}
+ ExternalReferenceTable() = default;
void Init(Isolate* isolate);
private:
diff --git a/deps/v8/src/external-reference.cc b/deps/v8/src/external-reference.cc
index 0fbde95a1c..806fbb8af5 100644
--- a/deps/v8/src/external-reference.cc
+++ b/deps/v8/src/external-reference.cc
@@ -16,6 +16,7 @@
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
+#include "src/math-random.h"
#include "src/objects-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/simulator-base.h"
@@ -134,11 +135,6 @@ ExternalReference ExternalReference::handle_scope_implementer_address(
return ExternalReference(isolate->handle_scope_implementer_address());
}
-ExternalReference ExternalReference::pending_microtask_count_address(
- Isolate* isolate) {
- return ExternalReference(isolate->pending_microtask_count_address());
-}
-
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
return ExternalReference(isolate->interpreter()->dispatch_table_address());
@@ -463,6 +459,11 @@ ExternalReference ExternalReference::abort_with_reason() {
return ExternalReference(Redirect(FUNCTION_ADDR(i::abort_with_reason)));
}
+ExternalReference
+ExternalReference::address_of_harmony_await_optimization_flag() {
+ return ExternalReference(&FLAG_harmony_await_optimization);
+}
+
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<Address>(&double_min_int_constant));
}
@@ -724,6 +725,10 @@ ExternalReference ExternalReference::printf_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(std::printf)));
}
+ExternalReference ExternalReference::refill_math_random() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(MathRandom::RefillCache)));
+}
+
template <typename SubjectChar, typename PatternChar>
ExternalReference ExternalReference::search_string_raw() {
auto f = SearchStringRaw<SubjectChar, PatternChar>;
@@ -751,14 +756,13 @@ ExternalReference ExternalReference::orderedhashmap_gethash_raw() {
return ExternalReference(Redirect(FUNCTION_ADDR(f)));
}
-ExternalReference ExternalReference::get_or_create_hash_raw(Isolate* isolate) {
+ExternalReference ExternalReference::get_or_create_hash_raw() {
typedef Smi* (*GetOrCreateHash)(Isolate * isolate, Object * key);
GetOrCreateHash f = Object::GetOrCreateHash;
return ExternalReference(Redirect(FUNCTION_ADDR(f)));
}
-ExternalReference ExternalReference::jsreceiver_create_identity_hash(
- Isolate* isolate) {
+ExternalReference ExternalReference::jsreceiver_create_identity_hash() {
typedef Smi* (*CreateIdentityHash)(Isolate * isolate, JSReceiver * key);
CreateIdentityHash f = JSReceiver::CreateIdentityHash;
return ExternalReference(Redirect(FUNCTION_ADDR(f)));
@@ -795,6 +799,10 @@ ExternalReference ExternalReference::try_internalize_string_function() {
Redirect(FUNCTION_ADDR(StringTable::LookupStringIfExists_NoAllocate)));
}
+ExternalReference ExternalReference::smi_lexicographic_compare_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(Smi::LexicographicCompare)));
+}
+
ExternalReference ExternalReference::check_object_type() {
return ExternalReference(Redirect(FUNCTION_ADDR(CheckObjectType)));
}
@@ -825,8 +833,8 @@ ExternalReference ExternalReference::page_flags(Page* page) {
MemoryChunk::kFlagsOffset);
}
-ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
- return ExternalReference(entry);
+ExternalReference ExternalReference::FromRawAddress(Address address) {
+ return ExternalReference(address);
}
ExternalReference ExternalReference::cpu_features() {
@@ -850,6 +858,11 @@ ExternalReference::promise_hook_or_async_event_delegate_address(
isolate->promise_hook_or_async_event_delegate_address());
}
+ExternalReference ExternalReference::debug_execution_mode_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug_execution_mode_address());
+}
+
ExternalReference ExternalReference::debug_is_active_address(Isolate* isolate) {
return ExternalReference(isolate->debug()->is_active_address());
}
@@ -870,21 +883,19 @@ ExternalReference ExternalReference::invalidate_prototype_chains_function() {
Redirect(FUNCTION_ADDR(JSObject::InvalidatePrototypeChains)));
}
-double power_helper(Isolate* isolate, double x, double y) {
+double power_helper(double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1 if exponent is 0.
}
if (y == 0.5) {
- lazily_initialize_fast_sqrt(isolate);
+ lazily_initialize_fast_sqrt();
return (std::isinf(x)) ? V8_INFINITY
- : fast_sqrt(x + 0.0, isolate); // Convert -0 to +0.
+ : fast_sqrt(x + 0.0); // Convert -0 to +0.
}
if (y == -0.5) {
- lazily_initialize_fast_sqrt(isolate);
- return (std::isinf(x)) ? 0
- : 1.0 / fast_sqrt(x + 0.0,
- isolate); // Convert -0 to +0.
+ lazily_initialize_fast_sqrt();
+ return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
}
return power_double_double(x, y);
}
@@ -949,6 +960,25 @@ ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
}
+static uint64_t atomic_pair_compare_exchange(intptr_t address,
+ int old_value_low,
+ int old_value_high,
+ int new_value_low,
+ int new_value_high) {
+ uint64_t old_value = static_cast<uint64_t>(old_value_high) << 32 |
+ (old_value_low & 0xFFFFFFFF);
+ uint64_t new_value = static_cast<uint64_t>(new_value_high) << 32 |
+ (new_value_low & 0xFFFFFFFF);
+ std::atomic_compare_exchange_strong(
+ reinterpret_cast<std::atomic<uint64_t>*>(address), &old_value, new_value);
+ return old_value;
+}
+
+ExternalReference ExternalReference::atomic_pair_compare_exchange_function() {
+ return ExternalReference(
+ Redirect(FUNCTION_ADDR(atomic_pair_compare_exchange)));
+}
+
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
diff --git a/deps/v8/src/external-reference.h b/deps/v8/src/external-reference.h
index fb3ec07c8b..eb4b235cb0 100644
--- a/deps/v8/src/external-reference.h
+++ b/deps/v8/src/external-reference.h
@@ -27,8 +27,6 @@ class StatsCounter;
V(builtins_address, "builtins") \
V(handle_scope_implementer_address, \
"Isolate::handle_scope_implementer_address") \
- V(pending_microtask_count_address, \
- "Isolate::pending_microtask_count_address()") \
V(interpreter_dispatch_counters, "Interpreter::dispatch_counters") \
V(interpreter_dispatch_table_address, "Interpreter::dispatch_table_address") \
V(date_cache_stamp, "date_cache_stamp") \
@@ -51,12 +49,11 @@ class StatsCounter;
V(handle_scope_limit_address, "HandleScope::limit") \
V(scheduled_exception_address, "Isolate::scheduled_exception") \
V(address_of_pending_message_obj, "address_of_pending_message_obj") \
- V(get_or_create_hash_raw, "get_or_create_hash_raw") \
- V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
V(promise_hook_address, "Isolate::promise_hook_address()") \
V(async_event_delegate_address, "Isolate::async_event_delegate_address()") \
V(promise_hook_or_async_event_delegate_address, \
"Isolate::promise_hook_or_async_event_delegate_address()") \
+ V(debug_execution_mode_address, "Isolate::debug_execution_mode_address()") \
V(debug_is_active_address, "Debug::is_active_address()") \
V(debug_hook_on_function_call_address, \
"Debug::hook_on_function_call_address()") \
@@ -76,6 +73,8 @@ class StatsCounter;
V(address_of_double_neg_constant, "double_negate_constant") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
+ V(address_of_harmony_await_optimization_flag, \
+ "FLAG_harmony_await_optimization") \
V(address_of_min_int, "LDoubleConstant::min_int") \
V(address_of_one_half, "LDoubleConstant::one_half") \
V(address_of_runtime_stats_flag, "FLAG_runtime_stats") \
@@ -97,6 +96,7 @@ class StatsCounter;
V(f64_mod_wrapper_function, "f64_mod_wrapper") \
V(fixed_typed_array_base_data_offset, "fixed_typed_array_base_data_offset") \
V(get_date_field_function, "JSDate::GetField") \
+ V(get_or_create_hash_raw, "get_or_create_hash_raw") \
V(ieee754_acos_function, "base::ieee754::acos") \
V(ieee754_acosh_function, "base::ieee754::acosh") \
V(ieee754_asin_function, "base::ieee754::asin") \
@@ -123,6 +123,7 @@ class StatsCounter;
"JSObject::InvalidatePrototypeChains()") \
V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
V(invoke_function_callback, "InvokeFunctionCallback") \
+ V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
V(libc_memchr_function, "libc_memchr") \
V(libc_memcpy_function, "libc_memcpy") \
V(libc_memmove_function, "libc_memmove") \
@@ -134,12 +135,14 @@ class StatsCounter;
V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
V(power_double_double_function, "power_double_double_function") \
V(printf_function, "printf") \
+ V(refill_math_random, "MathRandom::RefillCache") \
V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \
V(search_string_raw_one_one, "search_string_raw_one_one") \
V(search_string_raw_one_two, "search_string_raw_one_two") \
V(search_string_raw_two_one, "search_string_raw_two_one") \
V(search_string_raw_two_two, "search_string_raw_two_two") \
V(try_internalize_string_function, "try_internalize_string_function") \
+ V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \
V(wasm_call_trap_callback_for_testing, \
"wasm::call_trap_callback_for_testing") \
V(wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
@@ -169,6 +172,8 @@ class StatsCounter;
V(wasm_word32_ror, "wasm::word32_ror") \
V(wasm_word64_ctz, "wasm::word64_ctz") \
V(wasm_word64_popcnt, "wasm::word64_popcnt") \
+ V(atomic_pair_compare_exchange_function, \
+ "atomic_pair_compare_exchange_function") \
EXTERNAL_REFERENCE_LIST_INTL(V)
#ifndef V8_INTERPRETED_REGEXP
@@ -200,7 +205,7 @@ class StatsCounter;
// in an ExternalReference instance. This is done in order to track the
// origin of all external references in the code so that they can be bound
// to the correct addresses when deserializing a heap.
-class ExternalReference BASE_EMBEDDED {
+class ExternalReference {
public:
// Used in the simulator to support different native api calls.
enum Type {
@@ -268,7 +273,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference page_flags(Page* page);
- static ExternalReference ForDeoptEntry(Address entry);
+ static ExternalReference FromRawAddress(Address address);
#define DECL_EXTERNAL_REFERENCE(name, desc) static ExternalReference name();
EXTERNAL_REFERENCE_LIST(DECL_EXTERNAL_REFERENCE)
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index d539eef57b..c9cdb0a157 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -116,11 +116,9 @@ void FeedbackVector::increment_deopt_count() {
Code* FeedbackVector::optimized_code() const {
MaybeObject* slot = optimized_code_weak_or_smi();
- DCHECK(slot->IsSmi() || slot->IsClearedWeakHeapObject() ||
- slot->IsWeakHeapObject());
+ DCHECK(slot->IsSmi() || slot->IsWeakOrCleared());
HeapObject* heap_object;
- return slot->ToStrongOrWeakHeapObject(&heap_object) ? Code::cast(heap_object)
- : nullptr;
+ return slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : nullptr;
}
OptimizationMarker FeedbackVector::optimization_marker() const {
@@ -279,8 +277,8 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kTypeProfile: {
HeapObject* heap_object;
- if (obj->IsWeakOrClearedHeapObject() ||
- (obj->ToStrongHeapObject(&heap_object) &&
+ if (obj->IsWeakOrCleared() ||
+ (obj->GetHeapObjectIfStrong(&heap_object) &&
(heap_object->IsWeakFixedArray() || heap_object->IsString()))) {
with++;
} else if (obj == megamorphic_sentinel) {
@@ -291,7 +289,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kBinaryOp: {
- int const feedback = Smi::ToInt(obj->ToSmi());
+ int const feedback = Smi::ToInt(obj->cast<Smi>());
BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
if (hint == BinaryOperationHint::kAny) {
gen++;
@@ -303,7 +301,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kCompareOp: {
- int const feedback = Smi::ToInt(obj->ToSmi());
+ int const feedback = Smi::ToInt(obj->cast<Smi>());
CompareOperationHint hint = CompareOperationHintFromFeedback(feedback);
if (hint == CompareOperationHint::kAny) {
gen++;
@@ -315,7 +313,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kForIn: {
- int const feedback = Smi::ToInt(obj->ToSmi());
+ int const feedback = Smi::ToInt(obj->cast<Smi>());
ForInHint hint = ForInHintFromFeedback(feedback);
if (hint == ForInHint::kAny) {
gen++;
@@ -327,7 +325,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kInstanceOf: {
- if (obj->IsWeakOrClearedHeapObject()) {
+ if (obj->IsWeakOrCleared()) {
with++;
} else if (obj == megamorphic_sentinel) {
gen++;
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 90ae08b0ba..eaea7a978c 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -42,7 +42,7 @@ bool FeedbackVectorSpec::HasTypeProfileSlot() const {
static bool IsPropertyNameFeedback(MaybeObject* feedback) {
HeapObject* heap_object;
- if (!feedback->ToStrongHeapObject(&heap_object)) return false;
+ if (!feedback->GetHeapObjectIfStrong(&heap_object)) return false;
if (heap_object->IsString()) return true;
if (!heap_object->IsSymbol()) return false;
Symbol* symbol = Symbol::cast(heap_object);
@@ -331,7 +331,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
return;
}
- if (slot->IsClearedWeakHeapObject()) {
+ if (slot->IsCleared()) {
ClearOptimizationMarker();
return;
}
@@ -373,7 +373,7 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
void FeedbackVector::AssertNoLegacyTypes(MaybeObject* object) {
#ifdef DEBUG
HeapObject* heap_object;
- if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (object->GetHeapObject(&heap_object)) {
// Instead of FixedArray, the Feedback and the Extra should contain
// WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
DCHECK_IMPLIES(heap_object->IsFixedArray(), heap_object->IsHashTable());
@@ -384,7 +384,7 @@ void FeedbackVector::AssertNoLegacyTypes(MaybeObject* object) {
Handle<WeakFixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
HeapObject* heap_object;
- if (GetFeedback()->ToStrongHeapObject(&heap_object) &&
+ if (GetFeedback()->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray() &&
WeakFixedArray::cast(heap_object)->length() == length) {
return handle(WeakFixedArray::cast(heap_object), isolate);
@@ -397,7 +397,7 @@ Handle<WeakFixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Handle<WeakFixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
HeapObject* heap_object;
- if (GetFeedbackExtra()->ToStrongHeapObject(&heap_object) &&
+ if (GetFeedbackExtra()->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray() &&
WeakFixedArray::cast(heap_object)->length() == length) {
return handle(WeakFixedArray::cast(heap_object), isolate);
@@ -546,10 +546,11 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
switch (kind()) {
case FeedbackSlotKind::kCreateClosure:
+ return MONOMORPHIC;
+
case FeedbackSlotKind::kLiteral:
- // CreateClosure and literal slots don't have a notion of state.
- UNREACHABLE();
- break;
+ if (feedback->IsSmi()) return UNINITIALIZED;
+ return MONOMORPHIC;
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
@@ -557,9 +558,9 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
if (feedback->IsSmi()) return MONOMORPHIC;
- DCHECK(feedback->IsWeakOrClearedHeapObject());
+ DCHECK(feedback->IsWeakOrCleared());
MaybeObject* extra = GetFeedbackExtra();
- if (!feedback->IsClearedWeakHeapObject() ||
+ if (!feedback->IsCleared() ||
extra != MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
return MONOMORPHIC;
@@ -587,12 +588,12 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
*FeedbackVector::PremonomorphicSentinel(isolate))) {
return PREMONOMORPHIC;
}
- if (feedback->IsWeakOrClearedHeapObject()) {
+ if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
return MONOMORPHIC;
}
HeapObject* heap_object;
- if (feedback->ToStrongHeapObject(&heap_object)) {
+ if (feedback->GetHeapObjectIfStrong(&heap_object)) {
if (heap_object->IsWeakFixedArray()) {
// Determine state purely by our structure, don't check if the maps
// are cleared.
@@ -600,7 +601,7 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
}
if (heap_object->IsName()) {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()));
- Object* extra = GetFeedbackExtra()->ToStrongHeapObject();
+ Object* extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
WeakFixedArray* extra_array = WeakFixedArray::cast(extra);
return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
}
@@ -612,8 +613,8 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
if (feedback == MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(isolate))) {
return GENERIC;
- } else if (feedback->IsWeakOrClearedHeapObject() ||
- (feedback->ToStrongHeapObject(&heap_object) &&
+ } else if (feedback->IsWeakOrCleared() ||
+ (feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsAllocationSite())) {
return MONOMORPHIC;
}
@@ -666,7 +667,7 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
if (feedback == MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
return UNINITIALIZED;
- } else if (feedback->IsWeakOrClearedHeapObject()) {
+ } else if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
return MONOMORPHIC;
}
@@ -690,11 +691,11 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
*FeedbackVector::MegamorphicSentinel(isolate))) {
return MEGAMORPHIC;
}
- if (feedback->IsWeakOrClearedHeapObject()) {
+ if (feedback->IsWeakOrCleared()) {
return MONOMORPHIC;
}
- DCHECK(feedback->ToStrongHeapObject()->IsWeakFixedArray());
+ DCHECK(feedback->GetHeapObjectAssumeStrong()->IsWeakFixedArray());
return POLYMORPHIC;
}
@@ -744,7 +745,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
Handle<Map> result_map) {
Isolate* isolate = GetIsolate();
MaybeObject* maybe_feedback = GetFeedback();
- Handle<HeapObject> feedback(maybe_feedback->IsStrongOrWeakHeapObject()
+ Handle<HeapObject> feedback(maybe_feedback->IsStrongOrWeak()
? maybe_feedback->GetHeapObject()
: nullptr,
isolate);
@@ -755,8 +756,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
SetFeedbackExtra(*result_map);
break;
case MONOMORPHIC:
- if (maybe_feedback->IsClearedWeakHeapObject() ||
- feedback.is_identical_to(source_map) ||
+ if (maybe_feedback->IsCleared() || feedback.is_identical_to(source_map) ||
Map::cast(*feedback)->is_deprecated()) {
// Remain in MONOMORPHIC state if previous feedback has been collected.
SetFeedback(HeapObjectReference::Weak(*source_map));
@@ -779,7 +779,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
int i = 0;
for (; i < array->length(); i += kCloneObjectPolymorphicEntrySize) {
MaybeObject* feedback = array->Get(i);
- if (feedback->IsClearedWeakHeapObject()) break;
+ if (feedback->IsCleared()) break;
Handle<Map> cached_map(Map::cast(feedback->GetHeapObject()), isolate);
if (cached_map.is_identical_to(source_map) ||
cached_map->is_deprecated())
@@ -818,7 +818,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
int FeedbackNexus::GetCallCount() {
DCHECK(IsCallICKind(kind()));
- Object* call_count = GetFeedbackExtra()->ToObject();
+ Object* call_count = GetFeedbackExtra()->cast<Object>();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return CallCountField::decode(value);
@@ -827,7 +827,7 @@ int FeedbackNexus::GetCallCount() {
void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
DCHECK(IsCallICKind(kind()));
- Object* call_count = GetFeedbackExtra()->ToObject();
+ Object* call_count = GetFeedbackExtra()->cast<Object>();
CHECK(call_count->IsSmi());
uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
uint32_t value = CallCountField::encode(CallCountField::decode(count));
@@ -838,7 +838,7 @@ void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
SpeculationMode FeedbackNexus::GetSpeculationMode() {
DCHECK(IsCallICKind(kind()));
- Object* call_count = GetFeedbackExtra()->ToObject();
+ Object* call_count = GetFeedbackExtra()->cast<Object>();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return SpeculationModeField::decode(value);
@@ -910,35 +910,36 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
MaybeObject* feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject* heap_object;
- if ((feedback->ToStrongHeapObject(&heap_object) &&
+ if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray()) ||
is_named_feedback) {
int found = 0;
WeakFixedArray* array;
if (is_named_feedback) {
- array = WeakFixedArray::cast(GetFeedbackExtra()->ToStrongHeapObject());
+ array =
+ WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
} else {
array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
HeapObject* heap_object;
for (int i = 0; i < array->length(); i += increment) {
- DCHECK(array->Get(i)->IsWeakOrClearedHeapObject());
- if (array->Get(i)->ToWeakHeapObject(&heap_object)) {
+ DCHECK(array->Get(i)->IsWeakOrCleared());
+ if (array->Get(i)->GetHeapObjectIfWeak(&heap_object)) {
Map* map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
found++;
}
}
return found;
- } else if (feedback->ToWeakHeapObject(&heap_object)) {
+ } else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
Map* map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
return 1;
- } else if (feedback->ToStrongHeapObject(&heap_object) &&
+ } else if (feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object ==
heap_object->GetReadOnlyRoots().premonomorphic_symbol()) {
- if (GetFeedbackExtra()->ToWeakHeapObject(&heap_object)) {
+ if (GetFeedbackExtra()->GetHeapObjectIfWeak(&heap_object)) {
Map* map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
return 1;
@@ -957,32 +958,32 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Isolate* isolate = GetIsolate();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject* heap_object;
- if ((feedback->ToStrongHeapObject(&heap_object) &&
+ if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray()) ||
is_named_feedback) {
WeakFixedArray* array;
if (is_named_feedback) {
- array = WeakFixedArray::cast(GetFeedbackExtra()->ToStrongHeapObject());
+ array =
+ WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
} else {
array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
HeapObject* heap_object;
for (int i = 0; i < array->length(); i += increment) {
- DCHECK(array->Get(i)->IsWeakOrClearedHeapObject());
- if (array->Get(i)->ToWeakHeapObject(&heap_object)) {
+ DCHECK(array->Get(i)->IsWeakOrCleared());
+ if (array->Get(i)->GetHeapObjectIfWeak(&heap_object)) {
Map* array_map = Map::cast(heap_object);
- if (array_map == *map &&
- !array->Get(i + increment - 1)->IsClearedWeakHeapObject()) {
+ if (array_map == *map && !array->Get(i + increment - 1)->IsCleared()) {
MaybeObject* handler = array->Get(i + increment - 1);
DCHECK(IC::IsHandler(handler));
return handle(handler, isolate);
}
}
}
- } else if (feedback->ToWeakHeapObject(&heap_object)) {
+ } else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
Map* cell_map = Map::cast(heap_object);
- if (cell_map == *map && !GetFeedbackExtra()->IsClearedWeakHeapObject()) {
+ if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) {
MaybeObject* handler = GetFeedbackExtra();
DCHECK(IC::IsHandler(handler));
return handle(handler, isolate);
@@ -1004,12 +1005,13 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
int count = 0;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject* heap_object;
- if ((feedback->ToStrongHeapObject(&heap_object) &&
+ if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray()) ||
is_named_feedback) {
WeakFixedArray* array;
if (is_named_feedback) {
- array = WeakFixedArray::cast(GetFeedbackExtra()->ToStrongHeapObject());
+ array =
+ WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
} else {
array = WeakFixedArray::cast(heap_object);
}
@@ -1017,18 +1019,18 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
HeapObject* heap_object;
for (int i = 0; i < array->length(); i += increment) {
// Be sure to skip handlers whose maps have been cleared.
- DCHECK(array->Get(i)->IsWeakOrClearedHeapObject());
- if (array->Get(i)->ToWeakHeapObject(&heap_object) &&
- !array->Get(i + increment - 1)->IsClearedWeakHeapObject()) {
+ DCHECK(array->Get(i)->IsWeakOrCleared());
+ if (array->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
+ !array->Get(i + increment - 1)->IsCleared()) {
MaybeObject* handler = array->Get(i + increment - 1);
DCHECK(IC::IsHandler(handler));
code_list->push_back(handle(handler, isolate));
count++;
}
}
- } else if (feedback->ToWeakHeapObject(&heap_object)) {
+ } else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
MaybeObject* extra = GetFeedbackExtra();
- if (!extra->IsClearedWeakHeapObject()) {
+ if (!extra->IsCleared()) {
DCHECK(IC::IsHandler(extra));
code_list->push_back(handle(extra, isolate));
count++;
@@ -1041,7 +1043,7 @@ Name* FeedbackNexus::FindFirstName() const {
if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind())) {
MaybeObject* feedback = GetFeedback();
if (IsPropertyNameFeedback(feedback)) {
- return Name::cast(feedback->ToStrongHeapObject());
+ return Name::cast(feedback->GetHeapObjectAssumeStrong());
}
}
return nullptr;
@@ -1115,32 +1117,33 @@ IcCheckType FeedbackNexus::GetKeyType() const {
MaybeObject* feedback = GetFeedback();
if (feedback == MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
- return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()->ToObject()));
+ return static_cast<IcCheckType>(
+ Smi::ToInt(GetFeedbackExtra()->cast<Object>()));
}
return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
}
BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kBinaryOp);
- int feedback = Smi::ToInt(GetFeedback()->ToSmi());
+ int feedback = Smi::ToInt(GetFeedback()->cast<Smi>());
return BinaryOperationHintFromFeedback(feedback);
}
CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kCompareOp);
- int feedback = Smi::ToInt(GetFeedback()->ToSmi());
+ int feedback = Smi::ToInt(GetFeedback()->cast<Smi>());
return CompareOperationHintFromFeedback(feedback);
}
ForInHint FeedbackNexus::GetForInFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
- int feedback = Smi::ToInt(GetFeedback()->ToSmi());
+ int feedback = Smi::ToInt(GetFeedback()->cast<Smi>());
return ForInHintFromFeedback(feedback);
}
Handle<FeedbackCell> FeedbackNexus::GetFeedbackCell() const {
DCHECK_EQ(FeedbackSlotKind::kCreateClosure, kind());
- return handle(FeedbackCell::cast(GetFeedback()->ToObject()),
+ return handle(FeedbackCell::cast(GetFeedback()->cast<Object>()),
vector()->GetIsolate());
}
@@ -1149,7 +1152,7 @@ MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
Isolate* isolate = GetIsolate();
MaybeObject* feedback = GetFeedback();
HeapObject* heap_object;
- if (feedback->ToWeakHeapObject(&heap_object)) {
+ if (feedback->GetHeapObjectIfWeak(&heap_object)) {
return handle(JSObject::cast(heap_object), isolate);
}
return MaybeHandle<JSObject>();
@@ -1182,8 +1185,9 @@ void FeedbackNexus::Collect(Handle<String> type, int position) {
*FeedbackVector::UninitializedSentinel(isolate))) {
types = SimpleNumberDictionary::New(isolate, 1);
} else {
- types = handle(SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()),
- isolate);
+ types = handle(
+ SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()),
+ isolate);
}
Handle<ArrayList> position_specific_types;
@@ -1220,7 +1224,8 @@ std::vector<int> FeedbackNexus::GetSourcePositions() const {
}
Handle<SimpleNumberDictionary> types(
- SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()), isolate);
+ SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()),
+ isolate);
for (int index = SimpleNumberDictionary::kElementsStartIndex;
index < types->length(); index += SimpleNumberDictionary::kEntrySize) {
@@ -1247,7 +1252,8 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
}
Handle<SimpleNumberDictionary> types(
- SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()), isolate);
+ SimpleNumberDictionary::cast(feedback->GetHeapObjectAssumeStrong()),
+ isolate);
int entry = types->FindEntry(isolate, position);
if (entry == SimpleNumberDictionary::kNotFound) {
@@ -1305,10 +1311,10 @@ JSObject* FeedbackNexus::GetTypeProfile() const {
return *isolate->factory()->NewJSObject(isolate->object_function());
}
- return *ConvertToJSObject(
- isolate,
- handle(SimpleNumberDictionary::cast(feedback->ToStrongHeapObject()),
- isolate));
+ return *ConvertToJSObject(isolate,
+ handle(SimpleNumberDictionary::cast(
+ feedback->GetHeapObjectAssumeStrong()),
+ isolate));
}
void FeedbackNexus::ResetTypeProfile() {
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 3721ffec9a..71d84534b6 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -147,11 +147,6 @@ class FeedbackMetadata;
// metadata.
class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
public:
- // Use the mixin methods over the HeapObject methods.
- // TODO(v8:7786) Remove once the HeapObject methods are gone.
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
// Casting.
static inline FeedbackVector* cast(Object* obj);
@@ -294,12 +289,10 @@ class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
#undef FEEDBACK_VECTOR_FIELDS
static const int kHeaderSize =
- RoundUp<kPointerAlignment>(kUnalignedHeaderSize);
+ RoundUp<kPointerAlignment>(int{kUnalignedHeaderSize});
static const int kFeedbackSlotsOffset = kHeaderSize;
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
// Garbage collection support.
static constexpr int SizeFor(int length) {
@@ -352,11 +345,14 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
return AddSlot(FeedbackSlotKind::kLoadKeyed);
}
- FeedbackSlot AddStoreICSlot(LanguageMode language_mode) {
+ FeedbackSlotKind GetStoreICSlot(LanguageMode language_mode) {
STATIC_ASSERT(LanguageModeSize == 2);
- return AddSlot(is_strict(language_mode)
- ? FeedbackSlotKind::kStoreNamedStrict
- : FeedbackSlotKind::kStoreNamedSloppy);
+ return is_strict(language_mode) ? FeedbackSlotKind::kStoreNamedStrict
+ : FeedbackSlotKind::kStoreNamedSloppy;
+ }
+
+ FeedbackSlot AddStoreICSlot(LanguageMode language_mode) {
+ return AddSlot(GetStoreICSlot(language_mode));
}
FeedbackSlot AddStoreOwnICSlot() {
@@ -370,11 +366,14 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
: FeedbackSlotKind::kStoreGlobalSloppy);
}
- FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) {
+ FeedbackSlotKind GetKeyedStoreICSlotKind(LanguageMode language_mode) {
STATIC_ASSERT(LanguageModeSize == 2);
- return AddSlot(is_strict(language_mode)
- ? FeedbackSlotKind::kStoreKeyedStrict
- : FeedbackSlotKind::kStoreKeyedSloppy);
+ return is_strict(language_mode) ? FeedbackSlotKind::kStoreKeyedStrict
+ : FeedbackSlotKind::kStoreKeyedSloppy;
+ }
+
+ FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) {
+ return AddSlot(GetKeyedStoreICSlotKind(language_mode));
}
FeedbackSlot AddStoreInArrayLiteralICSlot() {
@@ -422,6 +421,26 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
}
ZoneVector<unsigned char> slot_kinds_;
+
+ friend class SharedFeedbackSlot;
+};
+
+// Helper class that creates a feedback slot on-demand.
+class SharedFeedbackSlot {
+ public:
+ // FeedbackSlot default constructor constructs an invalid slot.
+ SharedFeedbackSlot(FeedbackVectorSpec* spec, FeedbackSlotKind kind)
+ : kind_(kind), spec_(spec) {}
+
+ FeedbackSlot Get() {
+ if (slot_.IsInvalid()) slot_ = spec_->AddSlot(kind_);
+ return slot_;
+ }
+
+ private:
+ FeedbackSlotKind kind_;
+ FeedbackSlot slot_;
+ FeedbackVectorSpec* spec_;
};
// FeedbackMetadata is an array-like object with a slot count (indicating how
@@ -471,8 +490,6 @@ class FeedbackMetadata : public HeapObject {
static const int kHeaderSize = kSlotCountOffset + kInt32Size;
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
friend class AccessorAssembler;
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 69ec7472bb..170a777c72 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -209,18 +209,18 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
// Update bootstrapper.cc whenever adding a new feature flag.
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS_BASE(V) \
- V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_class_fields, "harmony fields in class literals") \
- V(harmony_static_fields, "harmony static fields in class literals") \
- V(harmony_await_optimization, "harmony await taking 1 tick")
+#define HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_class_fields, "harmony fields in class literals") \
+ V(harmony_await_optimization, "harmony await taking 1 tick") \
+ V(harmony_regexp_sequence, "RegExp Unicode sequence properties")
#ifdef V8_INTL_SUPPORT
#define HARMONY_INPROGRESS(V) \
HARMONY_INPROGRESS_BASE(V) \
V(harmony_locale, "Intl.Locale") \
V(harmony_intl_list_format, "Intl.ListFormat") \
- V(harmony_intl_relative_time_format, "Intl.RelativeTimeFormat")
+ V(harmony_intl_segmenter, "Intl.Segmenter")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
@@ -231,19 +231,26 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
V(harmony_private_fields, "harmony private fields in class literals") \
V(harmony_numeric_separator, "harmony numeric separator between digits") \
V(harmony_string_matchall, "harmony String.prototype.matchAll") \
- V(harmony_global, "harmony global")
+ V(harmony_static_fields, "harmony static fields in class literals") \
+ V(harmony_json_stringify, "Well-formed JSON.stringify")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_string_trimming, "harmony String.prototype.trim{Start,End}") \
+#define HARMONY_SHIPPING_BASE(V) \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_function_tostring, "harmony Function.prototype.toString") \
V(harmony_import_meta, "harmony import.meta property") \
- V(harmony_bigint, "harmony arbitrary precision integers") \
V(harmony_dynamic_import, "harmony dynamic import") \
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
V(harmony_array_flat, "harmony Array.prototype.{flat,flatMap}") \
- V(harmony_symbol_description, "harmony Symbol.prototype.description")
+ V(harmony_symbol_description, "harmony Symbol.prototype.description") \
+ V(harmony_global, "harmony global")
+
+#ifdef V8_INTL_SUPPORT
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_relative_time_format, "Intl.RelativeTimeFormat")
+#else
+#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
+#endif
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -518,9 +525,6 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
"Enable mitigations for executing untrusted code")
#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
-DEFINE_BOOL(branch_load_poisoning, false, "Mask loads with branch conditions.")
-DEFINE_IMPLICATION(future, branch_load_poisoning)
-
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
"simplifies execution model to make porting "
@@ -536,11 +540,11 @@ DEFINE_BOOL(wasm_disable_structured_cloning, false,
"disable wasm structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
-DEFINE_DEBUG_BOOL(wasm_trace_native_heap, false,
+DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false,
"trace wasm native heap events")
DEFINE_BOOL(wasm_write_protect_code_memory, false,
"write protect code memory on the wasm native heap")
-DEFINE_BOOL(wasm_trace_serialization, false,
+DEFINE_BOOL(trace_wasm_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
@@ -580,7 +584,7 @@ DEFINE_DEBUG_BOOL(trace_liftoff, false,
"trace Liftoff, the baseline compiler for WebAssembly")
DEFINE_DEBUG_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
-DEFINE_BOOL(wasm_trace_memory, false,
+DEFINE_BOOL(trace_wasm_memory, false,
"print all memory updates performed in wasm code")
// Fuzzers use {wasm_tier_mask_for_testing} together with {liftoff} and
// {no_wasm_tier_up} to force some functions to be compiled with Turbofan.
@@ -874,8 +878,6 @@ DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_INT(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_BOOL(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
-DEFINE_BOOL(enable_experimental_builtins, false,
- "enable new csa-based experimental builtins")
DEFINE_BOOL(disallow_code_generation_from_strings, false,
"disallow eval and friends")
DEFINE_BOOL(expose_async_hooks, false, "expose async_hooks object")
@@ -922,11 +924,6 @@ DEFINE_BOOL(compiler_dispatcher, false, "enable compiler dispatcher")
DEFINE_BOOL(trace_compiler_dispatcher, false,
"trace compiler dispatcher activity")
-// compiler-dispatcher-job.cc
-DEFINE_BOOL(
- trace_compiler_dispatcher_jobs, false,
- "trace progress of individual jobs managed by the compiler dispatcher")
-
// cpu-profiler.cc
DEFINE_INT(cpu_profiler_sampling_interval, 1000,
"CPU profiler sampling interval in microseconds")
@@ -1043,6 +1040,9 @@ DEFINE_BOOL(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
+DEFINE_BOOL(async_stack_traces, false,
+ "include async stack traces in Error.stack")
+DEFINE_IMPLICATION(async_stack_traces, harmony_await_optimization)
DEFINE_BOOL(stack_trace_on_illegal, false,
"print stack trace when an illegal exception is thrown")
DEFINE_BOOL(abort_on_uncaught_exception, false,
@@ -1093,10 +1093,6 @@ DEFINE_BOOL(print_embedded_builtin_candidates, false,
"Prints builtins that are not yet embedded but could be.")
DEFINE_BOOL(lazy_deserialization, true,
"Deserialize code lazily from the snapshot.")
-DEFINE_BOOL(lazy_handler_deserialization, true,
- "Deserialize bytecode handlers lazily from the snapshot.")
-DEFINE_IMPLICATION(lazy_handler_deserialization, lazy_deserialization)
-DEFINE_IMPLICATION(future, lazy_handler_deserialization)
DEFINE_BOOL(trace_lazy_deserialization, false, "Trace lazy deserialization.")
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
@@ -1154,6 +1150,9 @@ DEFINE_ARGS(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
DEFINE_BOOL(mock_arraybuffer_allocator, false,
"Use a mock ArrayBuffer allocator for testing.")
+DEFINE_SIZE_T(mock_arraybuffer_allocator_limit, 0,
+ "Memory limit for mock ArrayBuffer allocator used to simulate "
+ "OOM for testing.")
//
// GDB JIT integration flags.
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 9dee01698e..a4d2561cd3 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -29,7 +29,7 @@ ReturnAddressLocationResolver StackFrame::return_address_location_resolver_ =
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
-class StackHandlerIterator BASE_EMBEDDED {
+class StackHandlerIterator {
public:
StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
: limit_(frame->fp()), handler_(handler) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 40fce95e7f..a8c0989036 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -42,9 +42,7 @@ class InnerPointerToCodeCache {
Flush();
}
- void Flush() {
- memset(&cache_[0], 0, sizeof(cache_));
- }
+ void Flush() { memset(static_cast<void*>(&cache_[0]), 0, sizeof(cache_)); }
InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
@@ -69,8 +67,7 @@ class StackHandlerConstants : public AllStatic {
static const int kSlotCount = kSize >> kPointerSizeLog2;
};
-
-class StackHandler BASE_EMBEDDED {
+class StackHandler {
public:
// Get the address of this stack handler.
inline Address address() const;
@@ -110,7 +107,7 @@ class StackHandler BASE_EMBEDDED {
V(NATIVE, NativeFrame)
// Abstract base class for all stack frames.
-class StackFrame BASE_EMBEDDED {
+class StackFrame {
public:
#define DECLARE_TYPE(type, ignore) type,
enum Type {
@@ -262,7 +259,7 @@ class StackFrame BASE_EMBEDDED {
}
// Get the id of this stack frame.
- Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
+ Id id() const { return static_cast<Id>(caller_sp()); }
// Get the top handler from the current stack iterator.
inline StackHandler* top_handler() const;
@@ -301,7 +298,7 @@ class StackFrame BASE_EMBEDDED {
protected:
inline explicit StackFrame(StackFrameIteratorBase* iterator);
- virtual ~StackFrame() { }
+ virtual ~StackFrame() = default;
// Compute the stack pointer for the calling frame.
virtual Address GetCallerStackPointer() const = 0;
@@ -476,7 +473,7 @@ class BuiltinExitFrame : public ExitFrame {
class StandardFrame;
-class FrameSummary BASE_EMBEDDED {
+class FrameSummary {
public:
// Subclasses for the different summary kinds:
#define FRAME_SUMMARY_VARIANTS(F) \
@@ -1202,7 +1199,7 @@ class JavaScriptBuiltinContinuationWithCatchFrame
friend class StackFrameIteratorBase;
};
-class StackFrameIteratorBase BASE_EMBEDDED {
+class StackFrameIteratorBase {
public:
Isolate* isolate() const { return isolate_; }
@@ -1257,7 +1254,7 @@ class StackFrameIterator: public StackFrameIteratorBase {
};
// Iterator that supports iterating through all JavaScript frames.
-class JavaScriptFrameIterator BASE_EMBEDDED {
+class JavaScriptFrameIterator {
public:
inline explicit JavaScriptFrameIterator(Isolate* isolate);
inline JavaScriptFrameIterator(Isolate* isolate, ThreadLocalTop* top);
@@ -1275,7 +1272,7 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
// NOTE: The stack trace frame iterator is an iterator that only traverse proper
// JavaScript frames that have proper JavaScript functions and WebAssembly
// frames.
-class StackTraceFrameIterator BASE_EMBEDDED {
+class StackTraceFrameIterator {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
// Skip frames until the frame with the given id is reached.
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index c0704ee9a2..c1dd523984 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -89,7 +89,7 @@ void AtomicsWaitWakeHandle::Wake() {
Object* FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
- DCHECK(addr < NumberToSize(array_buffer->byte_length()));
+ DCHECK_LT(addr, array_buffer->byte_length());
void* backing_store = array_buffer->backing_store();
int32_t* p =
@@ -223,7 +223,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
}
wait_list_.Pointer()->RemoveNode(node);
- } while (0);
+ } while (false);
isolate->RunAtomicsWaitCallback(callback_result, array_buffer, addr, value,
rel_timeout_ms, nullptr);
@@ -238,7 +238,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
uint32_t num_waiters_to_wake) {
- DCHECK(addr < NumberToSize(array_buffer->byte_length()));
+ DCHECK_LT(addr, array_buffer->byte_length());
int waiters_woken = 0;
void* backing_store = array_buffer->backing_store();
@@ -263,7 +263,7 @@ Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
Object* FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
size_t addr) {
- DCHECK(addr < NumberToSize(array_buffer->byte_length()));
+ DCHECK_LT(addr, array_buffer->byte_length());
void* backing_store = array_buffer->backing_store();
base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 4fa7ce0699..712408ab7e 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -41,7 +41,7 @@ typedef ELF DebugObject;
typedef ELFSection DebugSection;
#endif
-class Writer BASE_EMBEDDED {
+class Writer {
public:
explicit Writer(DebugObject* debug_object)
: debug_object_(debug_object),
@@ -177,7 +177,7 @@ class ELFStringTable;
template<typename THeader>
class DebugSectionBase : public ZoneObject {
public:
- virtual ~DebugSectionBase() { }
+ virtual ~DebugSectionBase() = default;
virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) {
uintptr_t start = writer->position();
@@ -238,7 +238,7 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
}
}
- virtual ~MachOSection() { }
+ ~MachOSection() override = default;
virtual void PopulateHeader(Writer::Slot<Header> header) {
header->addr = 0;
@@ -314,11 +314,11 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
ELFSection(const char* name, Type type, uintptr_t align)
: name_(name), type_(type), align_(align) { }
- virtual ~ELFSection() { }
+ ~ELFSection() override = default;
void PopulateHeader(Writer::Slot<Header> header, ELFStringTable* strtab);
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
+ void WriteBody(Writer::Slot<Header> header, Writer* w) override {
uintptr_t start = w->position();
if (WriteBodyInternal(w)) {
uintptr_t end = w->position();
@@ -327,9 +327,7 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
}
}
- virtual bool WriteBodyInternal(Writer* w) {
- return false;
- }
+ bool WriteBodyInternal(Writer* w) override { return false; }
uint16_t index() const { return index_; }
void set_index(uint16_t index) { index_ = index; }
@@ -396,7 +394,7 @@ class FullHeaderELFSection : public ELFSection {
flags_(flags) { }
protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
+ void PopulateHeader(Writer::Slot<Header> header) override {
ELFSection::PopulateHeader(header);
header->address = addr_;
header->offset = offset_;
@@ -438,7 +436,7 @@ class ELFStringTable : public ELFSection {
void DetachWriter() { writer_ = nullptr; }
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
+ void WriteBody(Writer::Slot<Header> header, Writer* w) override {
DCHECK_NULL(writer_);
header->offset = offset_;
header->size = size_;
@@ -472,7 +470,7 @@ void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
#if defined(__MACH_O)
-class MachO BASE_EMBEDDED {
+class MachO {
public:
explicit MachO(Zone* zone) : sections_(zone) {}
@@ -604,7 +602,7 @@ class MachO BASE_EMBEDDED {
#if defined(__ELF)
-class ELF BASE_EMBEDDED {
+class ELF {
public:
explicit ELF(Zone* zone) : sections_(zone) {
sections_.push_back(new (zone) ELFSection("", ELFSection::TYPE_NULL, 0));
@@ -746,8 +744,7 @@ class ELF BASE_EMBEDDED {
ZoneChunkList<ELFSection*> sections_;
};
-
-class ELFSymbol BASE_EMBEDDED {
+class ELFSymbol {
public:
enum Type {
TYPE_NOTYPE = 0,
@@ -862,7 +859,7 @@ class ELFSymbolTable : public ELFSection {
locals_(zone),
globals_(zone) {}
- virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
+ void WriteBody(Writer::Slot<Header> header, Writer* w) override {
w->Align(header->alignment);
size_t total_symbols = locals_.size() + globals_.size() + 1;
header->offset = w->position();
@@ -899,7 +896,7 @@ class ELFSymbolTable : public ELFSection {
}
protected:
- virtual void PopulateHeader(Writer::Slot<Header> header) {
+ void PopulateHeader(Writer::Slot<Header> header) override {
ELFSection::PopulateHeader(header);
// We are assuming that string table will follow symbol table.
header->link = index() + 1;
@@ -946,8 +943,7 @@ class LineInfo : public Malloced {
std::vector<PCInfo> pc_info_;
};
-
-class CodeDescription BASE_EMBEDDED {
+class CodeDescription {
public:
#if V8_TARGET_ARCH_X64
enum StackState {
@@ -1115,7 +1111,7 @@ class DebugInfoSection : public DebugSection {
DW_ATE_SIGNED = 0x5
};
- bool WriteBodyInternal(Writer* w) {
+ bool WriteBodyInternal(Writer* w) override {
uintptr_t cu_start = w->position();
Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
@@ -1318,7 +1314,7 @@ class DebugAbbrevSection : public DebugSection {
w->WriteULEB128(0);
}
- bool WriteBodyInternal(Writer* w) {
+ bool WriteBodyInternal(Writer* w) override {
int current_abbreviation = 1;
bool extra_info = desc_->has_scope_info();
DCHECK(desc_->IsLineInfoAvailable());
@@ -1435,7 +1431,7 @@ class DebugLineSection : public DebugSection {
DW_LNE_DEFINE_FILE = 3
};
- bool WriteBodyInternal(Writer* w) {
+ bool WriteBodyInternal(Writer* w) override {
// Write prologue.
Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
@@ -1571,7 +1567,7 @@ class DebugLineSection : public DebugSection {
class UnwindInfoSection : public DebugSection {
public:
explicit UnwindInfoSection(CodeDescription* desc);
- virtual bool WriteBodyInternal(Writer* w);
+ bool WriteBodyInternal(Writer* w) override;
int WriteCIE(Writer* w);
void WriteFDE(Writer* w, int);
@@ -1840,7 +1836,7 @@ extern "C" {
// GDB will inspect contents of this descriptor.
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
- JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+ JITDescriptor __jit_debug_descriptor = {1, 0, nullptr, nullptr};
#ifdef OBJECT_PRINT
void __gdb_print_v8_object(Object* object) {
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index a3c146cc70..e8f72c7177 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -500,22 +500,6 @@ class GlobalHandles::NodeIterator {
DISALLOW_COPY_AND_ASSIGN(NodeIterator);
};
-class GlobalHandles::PendingPhantomCallbacksSecondPassTask
- : public v8::internal::CancelableTask {
- public:
- PendingPhantomCallbacksSecondPassTask(GlobalHandles* global_handles,
- Isolate* isolate)
- : CancelableTask(isolate), global_handles_(global_handles) {}
-
- void RunInternal() override {
- global_handles_->InvokeSecondPassPhantomCallbacksFromTask();
- }
-
- private:
- GlobalHandles* global_handles_;
- DISALLOW_COPY_AND_ASSIGN(PendingPhantomCallbacksSecondPassTask);
-};
-
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
number_of_global_handles_(0),
@@ -871,9 +855,10 @@ int GlobalHandles::DispatchPendingPhantomCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
} else if (!second_pass_callbacks_task_posted_) {
second_pass_callbacks_task_posted_ = true;
- auto task = new PendingPhantomCallbacksSecondPassTask(this, isolate());
- V8::GetCurrentPlatform()->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate()), task);
+ auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(isolate()));
+ taskrunner->PostTask(MakeCancelableLambdaTask(
+ isolate(), [this] { InvokeSecondPassPhantomCallbacksFromTask(); }));
}
}
return freed_nodes;
@@ -913,6 +898,7 @@ int GlobalHandles::PostGarbageCollectionProcessing(
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
int freed_nodes = 0;
bool synchronous_second_pass =
+ isolate_->heap()->IsTearingDown() ||
(gc_callback_flags &
(kGCCallbackFlagForced | kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagSynchronousPhantomCallbackProcessing)) != 0;
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 246dc0c469..d5e5628c3d 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -184,7 +184,6 @@ class GlobalHandles {
class NodeBlock;
class NodeIterator;
class PendingPhantomCallback;
- class PendingPhantomCallbacksSecondPassTask;
explicit GlobalHandles(Isolate* isolate);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index e9142276e0..c20e6086ee 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -11,7 +11,7 @@
#include <limits>
#include <ostream>
-#include "include/v8.h"
+#include "include/v8-internal.h"
#include "src/base/build_config.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
@@ -55,9 +55,9 @@ namespace internal {
// Determine whether the architecture uses an embedded constant pool
// (contiguous constant pool embedded in code object).
#if V8_TARGET_ARCH_PPC
-#define V8_EMBEDDED_CONSTANT_POOL 1
+#define V8_EMBEDDED_CONSTANT_POOL true
#else
-#define V8_EMBEDDED_CONSTANT_POOL 0
+#define V8_EMBEDDED_CONSTANT_POOL false
#endif
#ifdef V8_TARGET_ARCH_ARM
@@ -76,14 +76,14 @@ constexpr int kStackSpaceRequiredForCompilation = 40;
// Determine whether double field unboxing feature is enabled.
#if V8_TARGET_ARCH_64_BIT
-#define V8_DOUBLE_FIELDS_UNBOXING 1
+#define V8_DOUBLE_FIELDS_UNBOXING true
#else
-#define V8_DOUBLE_FIELDS_UNBOXING 0
+#define V8_DOUBLE_FIELDS_UNBOXING false
#endif
// Some types of tracing require the SFI to store a unique ID.
#if defined(V8_TRACE_MAPS) || defined(V8_TRACE_IGNITION)
-#define V8_SFI_HAS_UNIQUE_ID 1
+#define V8_SFI_HAS_UNIQUE_ID true
#endif
// Superclass for classes only using static method functions.
@@ -95,10 +95,6 @@ class AllStatic {
#endif
};
-// DEPRECATED
-// TODO(leszeks): Delete this during a quiet period
-#define BASE_EMBEDDED
-
typedef uint8_t byte;
typedef uintptr_t Address;
static const Address kNullAddress = 0;
@@ -166,13 +162,7 @@ constexpr intptr_t kIntptrSignBit =
static_cast<intptr_t>(uintptr_t{0x8000000000000000});
constexpr uintptr_t kUintptrAllBitsSet = uintptr_t{0xFFFFFFFFFFFFFFFF};
constexpr bool kRequiresCodeRange = true;
-#if V8_TARGET_ARCH_MIPS64
-// To use pseudo-relative jumps such as j/jal instructions which have 28-bit
-// encoded immediate, the addresses have to be in range of 256MB aligned
-// region. Used only for large object space.
-constexpr size_t kMaximalCodeRangeSize = 256 * MB;
-constexpr size_t kCodeRangeAreaAlignment = 256 * MB;
-#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr size_t kMaximalCodeRangeSize = 512 * MB;
constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_ARM64
@@ -213,8 +203,8 @@ constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
constexpr size_t kReservedCodeRangePages = 0;
#endif
-// Trigger an incremental GCs once the external memory reaches this limit.
-constexpr int kExternalAllocationSoftLimit = 64 * MB;
+constexpr int kExternalAllocationSoftLimit =
+ internal::Internals::kExternalAllocationSoftLimit;
// Maximum object size that gets allocated into regular pages. Objects larger
// than that size are allocated in large object space and are never moved in
@@ -339,6 +329,10 @@ inline LanguageMode stricter_language_mode(LanguageMode mode1,
static_cast<int>(mode2));
}
+// A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
+// a keyed store is of the form a[expression] = foo.
+enum class StoreOrigin { kMaybeKeyed, kNamed };
+
enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// Enums used by CEntry.
@@ -629,11 +623,13 @@ enum Movability { kMovable, kImmovable };
enum VisitMode {
VISIT_ALL,
+ VISIT_ALL_BUT_READ_ONLY,
VISIT_ALL_IN_MINOR_MC_MARK,
VISIT_ALL_IN_MINOR_MC_UPDATE,
VISIT_ALL_IN_SCAVENGE,
VISIT_ALL_IN_SWEEP_NEWSPACE,
VISIT_ONLY_STRONG,
+ VISIT_ONLY_STRONG_FOR_SERIALIZATION,
VISIT_FOR_SERIALIZATION,
};
@@ -709,6 +705,27 @@ enum InlineCacheState {
GENERIC,
};
+// Printing support.
+inline const char* InlineCacheState2String(InlineCacheState state) {
+ switch (state) {
+ case UNINITIALIZED:
+ return "UNINITIALIZED";
+ case PREMONOMORPHIC:
+ return "PREMONOMORPHIC";
+ case MONOMORPHIC:
+ return "MONOMORPHIC";
+ case RECOMPUTE_HANDLER:
+ return "RECOMPUTE_HANDLER";
+ case POLYMORPHIC:
+ return "POLYMORPHIC";
+ case MEGAMORPHIC:
+ return "MEGAMORPHIC";
+ case GENERIC:
+ return "GENERIC";
+ }
+ UNREACHABLE();
+}
+
enum WhereToStart { kStartAtReceiver, kStartAtPrototype };
enum ResultSentinel { kNotFound = -1, kUnsupported = -2 };
@@ -930,6 +947,8 @@ enum AllocationSiteMode {
LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
};
+enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
+
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
@@ -1578,6 +1597,65 @@ enum class LoadSensitivity {
V(TrapFuncInvalid) \
V(TrapFuncSigMismatch)
+enum KeyedAccessLoadMode {
+ STANDARD_LOAD,
+ LOAD_IGNORE_OUT_OF_BOUNDS,
+};
+
+enum KeyedAccessStoreMode {
+ STANDARD_STORE,
+ STORE_TRANSITION_TO_OBJECT,
+ STORE_TRANSITION_TO_DOUBLE,
+ STORE_AND_GROW_NO_TRANSITION_HANDLE_COW,
+ STORE_AND_GROW_TRANSITION_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_TO_DOUBLE,
+ STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
+ STORE_NO_TRANSITION_HANDLE_COW
+};
+
+enum MutableMode { MUTABLE, IMMUTABLE };
+
+static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
+ return store_mode == STORE_TRANSITION_TO_OBJECT ||
+ store_mode == STORE_TRANSITION_TO_DOUBLE ||
+ store_mode == STORE_AND_GROW_TRANSITION_TO_OBJECT ||
+ store_mode == STORE_AND_GROW_TRANSITION_TO_DOUBLE;
+}
+
+static inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
+ return store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
+}
+
+static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
+ KeyedAccessStoreMode store_mode, bool receiver_was_cow) {
+ switch (store_mode) {
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_TRANSITION_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_TO_DOUBLE:
+ store_mode = STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
+ break;
+ case STANDARD_STORE:
+ case STORE_TRANSITION_TO_OBJECT:
+ case STORE_TRANSITION_TO_DOUBLE:
+ store_mode =
+ receiver_was_cow ? STORE_NO_TRANSITION_HANDLE_COW : STANDARD_STORE;
+ break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ break;
+ }
+ DCHECK(!IsTransitionStoreMode(store_mode));
+ DCHECK_IMPLIES(receiver_was_cow, IsCOWHandlingStoreMode(store_mode));
+ return store_mode;
+}
+
+static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
+ return store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW &&
+ store_mode <= STORE_AND_GROW_TRANSITION_TO_DOUBLE;
+}
+
+enum IcCheckType { ELEMENT, PROPERTY };
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index b0ffe6a13e..a9f4a0f0b4 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -30,11 +30,9 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
Isolate* isolate;
if (!Isolate::FromWritableHeapObject(heap_object, &isolate)) return true;
Heap* heap = isolate->heap();
- Object** roots_array_start = heap->roots_array_start();
- if (roots_array_start <= location_ &&
- location_ < roots_array_start + Heap::kStrongRootListLength &&
- heap->RootCanBeTreatedAsConstant(
- static_cast<Heap::RootListIndex>(location_ - roots_array_start))) {
+ RootIndex root_index;
+ if (heap->IsRootHandleLocation(location_, &root_index) &&
+ heap->RootCanBeTreatedAsConstant(root_index)) {
return true;
}
if (!AllowHandleDereference::IsAllowed()) return false;
@@ -157,11 +155,9 @@ Object** CanonicalHandleScope::Lookup(Object* object) {
return HandleScope::CreateHandle(isolate_, object);
}
if (object->IsHeapObject()) {
- int index = root_index_map_->Lookup(HeapObject::cast(object));
- if (index != RootIndexMap::kInvalidRootIndex) {
- return isolate_->heap()
- ->root_handle(static_cast<Heap::RootListIndex>(index))
- .location();
+ RootIndex root_index;
+ if (root_index_map_->Lookup(HeapObject::cast(object), &root_index)) {
+ return isolate_->heap()->root_handle(root_index).location();
}
}
Object*** entry = identity_map_->Get(object);
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index c0a7ac9420..f37162f70e 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -319,7 +319,7 @@ class SealHandleScope final {
public:
#ifndef DEBUG
explicit SealHandleScope(Isolate* isolate) {}
- ~SealHandleScope() {}
+ ~SealHandleScope() = default;
#else
explicit inline SealHandleScope(Isolate* isolate);
inline ~SealHandleScope();
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 9f13253219..152c894796 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -5,298 +5,334 @@
#ifndef V8_HEAP_SYMBOLS_H_
#define V8_HEAP_SYMBOLS_H_
-#define INTERNALIZED_STRING_LIST(V) \
- V(add_string, "add") \
- V(always_string, "always") \
- V(anonymous_function_string, "(anonymous function)") \
- V(anonymous_string, "anonymous") \
- V(apply_string, "apply") \
- V(Arguments_string, "Arguments") \
- V(arguments_string, "arguments") \
- V(arguments_to_string, "[object Arguments]") \
- V(Array_string, "Array") \
- V(array_to_string, "[object Array]") \
- V(ArrayBuffer_string, "ArrayBuffer") \
- V(ArrayIterator_string, "Array Iterator") \
- V(assign_string, "assign") \
- V(async_string, "async") \
- V(auto_string, "auto") \
- V(await_string, "await") \
- V(BigInt_string, "BigInt") \
- V(bigint_string, "bigint") \
- V(BigInt64Array_string, "BigInt64Array") \
- V(BigUint64Array_string, "BigUint64Array") \
- V(bind_string, "bind") \
- V(Boolean_string, "Boolean") \
- V(boolean_string, "boolean") \
- V(boolean_to_string, "[object Boolean]") \
- V(bound__string, "bound ") \
- V(buffer_string, "buffer") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(call_string, "call") \
- V(callee_string, "callee") \
- V(caller_string, "caller") \
- V(caseFirst_string, "caseFirst") \
- V(cell_value_string, "%cell_value") \
- V(char_at_string, "CharAt") \
- V(closure_string, "(closure)") \
- V(collation_string, "collation") \
- V(column_string, "column") \
- V(CompileError_string, "CompileError") \
- V(configurable_string, "configurable") \
- V(construct_string, "construct") \
- V(constructor_string, "constructor") \
- V(conjunction_string, "conjunction") \
- V(create_string, "create") \
- V(currency_string, "currency") \
- V(Date_string, "Date") \
- V(date_to_string, "[object Date]") \
- V(day_string, "day") \
- V(dayperiod_string, "dayperiod") \
- V(decimal_string, "decimal") \
- V(default_string, "default") \
- V(defineProperty_string, "defineProperty") \
- V(deleteProperty_string, "deleteProperty") \
- V(did_handle_string, "didHandle") \
- V(disjunction_string, "disjunction") \
- V(display_name_string, "displayName") \
- V(done_string, "done") \
- V(dot_catch_string, ".catch") \
- V(dot_for_string, ".for") \
- V(dot_generator_object_string, ".generator_object") \
- V(dot_iterator_string, ".iterator") \
- V(dot_result_string, ".result") \
- V(dot_string, ".") \
- V(dot_switch_tag_string, ".switch_tag") \
- V(dotAll_string, "dotAll") \
- V(enqueue_string, "enqueue") \
- V(entries_string, "entries") \
- V(enumerable_string, "enumerable") \
- V(element_string, "element") \
- V(era_string, "era") \
- V(Error_string, "Error") \
- V(error_to_string, "[object Error]") \
- V(eval_string, "eval") \
- V(EvalError_string, "EvalError") \
- V(exec_string, "exec") \
- V(false_string, "false") \
- V(flags_string, "flags") \
- V(Float32Array_string, "Float32Array") \
- V(Float64Array_string, "Float64Array") \
- V(fraction_string, "fraction") \
- V(Function_string, "Function") \
- V(function_native_code_string, "function () { [native code] }") \
- V(function_string, "function") \
- V(function_to_string, "[object Function]") \
- V(Generator_string, "Generator") \
- V(get_space_string, "get ") \
- V(get_string, "get") \
- V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
- V(getPrototypeOf_string, "getPrototypeOf") \
- V(global_string, "global") \
- V(globalThis_string, "globalThis") \
- V(group_string, "group") \
- V(groups_string, "groups") \
- V(has_string, "has") \
- V(hour_string, "hour") \
- V(ignoreCase_string, "ignoreCase") \
- V(ignorePunctuation_string, "ignorePunctuation") \
- V(illegal_access_string, "illegal access") \
- V(illegal_argument_string, "illegal argument") \
- V(index_string, "index") \
- V(Infinity_string, "Infinity") \
- V(infinity_string, "infinity") \
- V(input_string, "input") \
- V(Int16Array_string, "Int16Array") \
- V(Int32Array_string, "Int32Array") \
- V(Int8Array_string, "Int8Array") \
- V(integer_string, "integer") \
- V(isExtensible_string, "isExtensible") \
- V(isView_string, "isView") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(keys_string, "keys") \
- V(lastIndex_string, "lastIndex") \
- V(length_string, "length") \
- V(let_string, "let") \
- V(line_string, "line") \
- V(LinkError_string, "LinkError") \
- V(literal_string, "literal") \
- V(locale_string, "locale") \
- V(long_string, "long") \
- V(Map_string, "Map") \
- V(MapIterator_string, "Map Iterator") \
- V(message_string, "message") \
- V(minus_Infinity_string, "-Infinity") \
- V(minus_zero_string, "-0") \
- V(minusSign_string, "minusSign") \
- V(minute_string, "minute") \
- V(Module_string, "Module") \
- V(month_string, "month") \
- V(multiline_string, "multiline") \
- V(name_string, "name") \
- V(NaN_string, "NaN") \
- V(nan_string, "nan") \
- V(narrow_string, "narrow") \
- V(native_string, "native") \
- V(new_target_string, ".new.target") \
- V(next_string, "next") \
- V(NFC_string, "NFC") \
- V(NFD_string, "NFD") \
- V(NFKC_string, "NFKC") \
- V(NFKD_string, "NFKD") \
- V(not_equal, "not-equal") \
- V(null_string, "null") \
- V(null_to_string, "[object Null]") \
- V(Number_string, "Number") \
- V(number_string, "number") \
- V(number_to_string, "[object Number]") \
- V(numeric_string, "numeric") \
- V(Object_string, "Object") \
- V(object_string, "object") \
- V(object_to_string, "[object Object]") \
- V(ok, "ok") \
- V(one_string, "1") \
- V(ownKeys_string, "ownKeys") \
- V(percentSign_string, "percentSign") \
- V(plusSign_string, "plusSign") \
- V(position_string, "position") \
- V(preventExtensions_string, "preventExtensions") \
- V(Promise_string, "Promise") \
- V(promise_string, "promise") \
- V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
- V(proto_string, "__proto__") \
- V(prototype_string, "prototype") \
- V(proxy_string, "proxy") \
- V(Proxy_string, "Proxy") \
- V(query_colon_string, "(?:)") \
- V(RangeError_string, "RangeError") \
- V(raw_string, "raw") \
- V(ReconfigureToDataProperty_string, "ReconfigureToDataProperty") \
- V(ReferenceError_string, "ReferenceError") \
- V(RegExp_string, "RegExp") \
- V(regexp_to_string, "[object RegExp]") \
- V(reject_string, "reject") \
- V(resolve_string, "resolve") \
- V(return_string, "return") \
- V(revoke_string, "revoke") \
- V(RuntimeError_string, "RuntimeError") \
- V(Script_string, "Script") \
- V(script_string, "script") \
- V(short_string, "short") \
- V(second_string, "second") \
- V(Set_string, "Set") \
- V(sensitivity_string, "sensitivity") \
- V(set_space_string, "set ") \
- V(set_string, "set") \
- V(SetIterator_string, "Set Iterator") \
- V(setPrototypeOf_string, "setPrototypeOf") \
- V(SharedArrayBuffer_string, "SharedArrayBuffer") \
- V(source_string, "source") \
- V(sourceText_string, "sourceText") \
- V(stack_string, "stack") \
- V(stackTraceLimit_string, "stackTraceLimit") \
- V(star_default_star_string, "*default*") \
- V(sticky_string, "sticky") \
- V(String_string, "String") \
- V(string_string, "string") \
- V(string_to_string, "[object String]") \
- V(style_string, "style") \
- V(symbol_species_string, "[Symbol.species]") \
- V(Symbol_string, "Symbol") \
- V(symbol_string, "symbol") \
- V(SyntaxError_string, "SyntaxError") \
- V(then_string, "then") \
- V(this_function_string, ".this_function") \
- V(this_string, "this") \
- V(throw_string, "throw") \
- V(timed_out, "timed-out") \
- V(timeZoneName_string, "timeZoneName") \
- V(toJSON_string, "toJSON") \
- V(toString_string, "toString") \
- V(true_string, "true") \
- V(TypeError_string, "TypeError") \
- V(type_string, "type") \
- V(Uint16Array_string, "Uint16Array") \
- V(Uint32Array_string, "Uint32Array") \
- V(Uint8Array_string, "Uint8Array") \
- V(Uint8ClampedArray_string, "Uint8ClampedArray") \
- V(undefined_string, "undefined") \
- V(undefined_to_string, "[object Undefined]") \
- V(unicode_string, "unicode") \
- V(unit_string, "unit") \
- V(URIError_string, "URIError") \
- V(usage_string, "usage") \
- V(use_asm_string, "use asm") \
- V(use_strict_string, "use strict") \
- V(value_string, "value") \
- V(valueOf_string, "valueOf") \
- V(values_string, "values") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(weekday_string, "weekday") \
- V(will_handle_string, "willHandle") \
- V(writable_string, "writable") \
- V(year_string, "year") \
- V(zero_string, "0")
+#ifdef V8_INTL_SUPPORT
+#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
+ V(_, caseFirst_string, "caseFirst") \
+ V(_, day_string, "day") \
+ V(_, dayPeriod_string, "dayPeriod") \
+ V(_, decimal_string, "decimal") \
+ V(_, era_string, "era") \
+ V(_, fraction_string, "fraction") \
+ V(_, group_string, "group") \
+ V(_, h11_string, "h11") \
+ V(_, h12_string, "h12") \
+ V(_, h23_string, "h23") \
+ V(_, h24_string, "h24") \
+ V(_, hour_string, "hour") \
+ V(_, collation_string, "collation") \
+ V(_, currency_string, "currency") \
+ V(_, currencyDisplay_string, "currencyDisplay") \
+ V(_, ignorePunctuation_string, "ignorePunctuation") \
+ V(_, integer_string, "integer") \
+ V(_, literal_string, "literal") \
+ V(_, locale_string, "locale") \
+ V(_, lower_string, "lower") \
+ V(_, maximumFractionDigits_string, "maximumFractionDigits") \
+ V(_, maximumSignificantDigits_string, "maximumSignificantDigits") \
+ V(_, nan_string, "nan") \
+ V(_, minimumFractionDigits_string, "minimumFractionDigits") \
+ V(_, minimumIntegerDigits_string, "minimumIntegerDigits") \
+ V(_, minimumSignificantDigits_string, "minimumSignificantDigits") \
+ V(_, minusSign_string, "minusSign") \
+ V(_, minute_string, "minute") \
+ V(_, month_string, "month") \
+ V(_, numberingSystem_string, "numberingSystem") \
+ V(_, numeric_string, "numeric") \
+ V(_, percentSign_string, "percentSign") \
+ V(_, plusSign_string, "plusSign") \
+ V(_, quarter_string, "quarter") \
+ V(_, second_string, "second") \
+ V(_, sensitivity_string, "sensitivity") \
+ V(_, style_string, "style") \
+ V(_, timeZoneName_string, "timeZoneName") \
+ V(_, type_string, "type") \
+ V(_, upper_string, "upper") \
+ V(_, usage_string, "usage") \
+ V(_, useGrouping_string, "useGrouping") \
+ V(_, unit_string, "unit") \
+ V(_, weekday_string, "weekday") \
+ V(_, year_string, "year")
+#else // V8_INTL_SUPPORT
+#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _)
+#endif // V8_INTL_SUPPORT
-#define PRIVATE_SYMBOL_LIST(V) \
- V(call_site_frame_array_symbol) \
- V(call_site_frame_index_symbol) \
- V(console_context_id_symbol) \
- V(console_context_name_symbol) \
- V(class_fields_symbol) \
- V(class_positions_symbol) \
- V(detailed_stack_trace_symbol) \
- V(elements_transition_symbol) \
- V(error_end_pos_symbol) \
- V(error_script_symbol) \
- V(error_start_pos_symbol) \
- V(frozen_symbol) \
- V(generic_symbol) \
- V(home_object_symbol) \
- V(intl_initialized_marker_symbol) \
- V(intl_pattern_symbol) \
- V(intl_resolved_symbol) \
- V(interpreter_trampoline_symbol) \
- V(megamorphic_symbol) \
- V(native_context_index_symbol) \
- V(nonextensible_symbol) \
- V(not_mapped_symbol) \
- V(premonomorphic_symbol) \
- V(promise_async_stack_id_symbol) \
- V(promise_debug_marker_symbol) \
- V(promise_forwarding_handler_symbol) \
- V(promise_handled_by_symbol) \
- V(promise_async_id_symbol) \
- V(sealed_symbol) \
- V(stack_trace_symbol) \
- V(strict_function_transition_symbol) \
- V(uninitialized_symbol)
+#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
+ V(_, add_string, "add") \
+ V(_, always_string, "always") \
+ V(_, anonymous_function_string, "(anonymous function)") \
+ V(_, anonymous_string, "anonymous") \
+ V(_, apply_string, "apply") \
+ V(_, Arguments_string, "Arguments") \
+ V(_, arguments_string, "arguments") \
+ V(_, arguments_to_string, "[object Arguments]") \
+ V(_, Array_string, "Array") \
+ V(_, array_to_string, "[object Array]") \
+ V(_, ArrayBuffer_string, "ArrayBuffer") \
+ V(_, ArrayIterator_string, "Array Iterator") \
+ V(_, assign_string, "assign") \
+ V(_, async_string, "async") \
+ V(_, auto_string, "auto") \
+ V(_, await_string, "await") \
+ V(_, BigInt_string, "BigInt") \
+ V(_, bigint_string, "bigint") \
+ V(_, BigInt64Array_string, "BigInt64Array") \
+ V(_, BigUint64Array_string, "BigUint64Array") \
+ V(_, bind_string, "bind") \
+ V(_, Boolean_string, "Boolean") \
+ V(_, boolean_string, "boolean") \
+ V(_, boolean_to_string, "[object Boolean]") \
+ V(_, bound__string, "bound ") \
+ V(_, buffer_string, "buffer") \
+ V(_, byte_length_string, "byteLength") \
+ V(_, byte_offset_string, "byteOffset") \
+ V(_, call_string, "call") \
+ V(_, callee_string, "callee") \
+ V(_, caller_string, "caller") \
+ V(_, cell_value_string, "%cell_value") \
+ V(_, char_at_string, "CharAt") \
+ V(_, character_string, "character") \
+ V(_, closure_string, "(closure)") \
+ V(_, code_string, "code") \
+ V(_, column_string, "column") \
+ V(_, CompileError_string, "CompileError") \
+ V(_, configurable_string, "configurable") \
+ V(_, construct_string, "construct") \
+ V(_, constructor_string, "constructor") \
+ V(_, conjunction_string, "conjunction") \
+ V(_, create_string, "create") \
+ V(_, Date_string, "Date") \
+ V(_, date_to_string, "[object Date]") \
+ V(_, default_string, "default") \
+ V(_, defineProperty_string, "defineProperty") \
+ V(_, deleteProperty_string, "deleteProperty") \
+ V(_, did_handle_string, "didHandle") \
+ V(_, disjunction_string, "disjunction") \
+ V(_, display_name_string, "displayName") \
+ V(_, done_string, "done") \
+ V(_, dot_catch_string, ".catch") \
+ V(_, dot_for_string, ".for") \
+ V(_, dot_generator_object_string, ".generator_object") \
+ V(_, dot_iterator_string, ".iterator") \
+ V(_, dot_promise_string, ".promise") \
+ V(_, dot_result_string, ".result") \
+ V(_, dot_string, ".") \
+ V(_, dot_switch_tag_string, ".switch_tag") \
+ V(_, dotAll_string, "dotAll") \
+ V(_, enqueue_string, "enqueue") \
+ V(_, entries_string, "entries") \
+ V(_, enumerable_string, "enumerable") \
+ V(_, element_string, "element") \
+ V(_, Error_string, "Error") \
+ V(_, error_to_string, "[object Error]") \
+ V(_, eval_string, "eval") \
+ V(_, EvalError_string, "EvalError") \
+ V(_, exec_string, "exec") \
+ V(_, false_string, "false") \
+ V(_, flags_string, "flags") \
+ V(_, Float32Array_string, "Float32Array") \
+ V(_, Float64Array_string, "Float64Array") \
+ V(_, Function_string, "Function") \
+ V(_, function_native_code_string, "function () { [native code] }") \
+ V(_, function_string, "function") \
+ V(_, function_to_string, "[object Function]") \
+ V(_, Generator_string, "Generator") \
+ V(_, get_space_string, "get ") \
+ V(_, get_string, "get") \
+ V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(_, getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
+ V(_, getPrototypeOf_string, "getPrototypeOf") \
+ V(_, global_string, "global") \
+ V(_, globalThis_string, "globalThis") \
+ V(_, granularity_string, "granularity") \
+ V(_, grapheme_string, "grapheme") \
+ V(_, groups_string, "groups") \
+ V(_, has_string, "has") \
+ V(_, ignoreCase_string, "ignoreCase") \
+ V(_, illegal_access_string, "illegal access") \
+ V(_, illegal_argument_string, "illegal argument") \
+ V(_, index_string, "index") \
+ V(_, Infinity_string, "Infinity") \
+ V(_, infinity_string, "infinity") \
+ V(_, input_string, "input") \
+ V(_, Int16Array_string, "Int16Array") \
+ V(_, Int32Array_string, "Int32Array") \
+ V(_, Int8Array_string, "Int8Array") \
+ V(_, isExtensible_string, "isExtensible") \
+ V(_, isView_string, "isView") \
+ V(_, KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(_, KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(_, keys_string, "keys") \
+ V(_, lastIndex_string, "lastIndex") \
+ V(_, length_string, "length") \
+ V(_, let_string, "let") \
+ V(_, lineBreakStyle_string, "lineBreakStyle") \
+ V(_, line_string, "line") \
+ V(_, LinkError_string, "LinkError") \
+ V(_, long_string, "long") \
+ V(_, loose_string, "loose") \
+ V(_, Map_string, "Map") \
+ V(_, MapIterator_string, "Map Iterator") \
+ V(_, message_string, "message") \
+ V(_, minus_Infinity_string, "-Infinity") \
+ V(_, minus_zero_string, "-0") \
+ V(_, Module_string, "Module") \
+ V(_, multiline_string, "multiline") \
+ V(_, name_string, "name") \
+ V(_, NaN_string, "NaN") \
+ V(_, narrow_string, "narrow") \
+ V(_, native_string, "native") \
+ V(_, new_target_string, ".new.target") \
+ V(_, next_string, "next") \
+ V(_, NFC_string, "NFC") \
+ V(_, NFD_string, "NFD") \
+ V(_, NFKC_string, "NFKC") \
+ V(_, NFKD_string, "NFKD") \
+ V(_, not_equal, "not-equal") \
+ V(_, normal_string, "normal") \
+ V(_, null_string, "null") \
+ V(_, null_to_string, "[object Null]") \
+ V(_, Number_string, "Number") \
+ V(_, number_string, "number") \
+ V(_, number_to_string, "[object Number]") \
+ V(_, Object_string, "Object") \
+ V(_, object_string, "object") \
+ V(_, object_to_string, "[object Object]") \
+ V(_, ok, "ok") \
+ V(_, one_string, "1") \
+ V(_, ownKeys_string, "ownKeys") \
+ V(_, percent_string, "percent") \
+ V(_, position_string, "position") \
+ V(_, preventExtensions_string, "preventExtensions") \
+ V(_, Promise_string, "Promise") \
+ V(_, promise_string, "promise") \
+ V(_, PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
+ V(_, proto_string, "__proto__") \
+ V(_, prototype_string, "prototype") \
+ V(_, proxy_string, "proxy") \
+ V(_, Proxy_string, "Proxy") \
+ V(_, query_colon_string, "(?:)") \
+ V(_, RangeError_string, "RangeError") \
+ V(_, raw_string, "raw") \
+ V(_, ReconfigureToDataProperty_string, "ReconfigureToDataProperty") \
+ V(_, ReferenceError_string, "ReferenceError") \
+ V(_, RegExp_string, "RegExp") \
+ V(_, regexp_to_string, "[object RegExp]") \
+ V(_, reject_string, "reject") \
+ V(_, resolve_string, "resolve") \
+ V(_, return_string, "return") \
+ V(_, revoke_string, "revoke") \
+ V(_, RuntimeError_string, "RuntimeError") \
+ V(_, Script_string, "Script") \
+ V(_, script_string, "script") \
+ V(_, short_string, "short") \
+ V(_, Set_string, "Set") \
+ V(_, sentence_string, "sentence") \
+ V(_, set_space_string, "set ") \
+ V(_, set_string, "set") \
+ V(_, SetIterator_string, "Set Iterator") \
+ V(_, setPrototypeOf_string, "setPrototypeOf") \
+ V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
+ V(_, source_string, "source") \
+ V(_, sourceText_string, "sourceText") \
+ V(_, stack_string, "stack") \
+ V(_, stackTraceLimit_string, "stackTraceLimit") \
+ V(_, star_default_star_string, "*default*") \
+ V(_, sticky_string, "sticky") \
+ V(_, strict_string, "strict") \
+ V(_, String_string, "String") \
+ V(_, string_string, "string") \
+ V(_, string_to_string, "[object String]") \
+ V(_, symbol_species_string, "[Symbol.species]") \
+ V(_, Symbol_string, "Symbol") \
+ V(_, symbol_string, "symbol") \
+ V(_, SyntaxError_string, "SyntaxError") \
+ V(_, then_string, "then") \
+ V(_, this_function_string, ".this_function") \
+ V(_, this_string, "this") \
+ V(_, throw_string, "throw") \
+ V(_, timed_out, "timed-out") \
+ V(_, toJSON_string, "toJSON") \
+ V(_, toString_string, "toString") \
+ V(_, true_string, "true") \
+ V(_, TypeError_string, "TypeError") \
+ V(_, Uint16Array_string, "Uint16Array") \
+ V(_, Uint32Array_string, "Uint32Array") \
+ V(_, Uint8Array_string, "Uint8Array") \
+ V(_, Uint8ClampedArray_string, "Uint8ClampedArray") \
+ V(_, undefined_string, "undefined") \
+ V(_, undefined_to_string, "[object Undefined]") \
+ V(_, unicode_string, "unicode") \
+ V(_, URIError_string, "URIError") \
+ V(_, use_asm_string, "use asm") \
+ V(_, use_strict_string, "use strict") \
+ V(_, value_string, "value") \
+ V(_, valueOf_string, "valueOf") \
+ V(_, values_string, "values") \
+ V(_, WeakMap_string, "WeakMap") \
+ V(_, WeakSet_string, "WeakSet") \
+ V(_, week_string, "week") \
+ V(_, will_handle_string, "willHandle") \
+ V(_, word_string, "word") \
+ V(_, writable_string, "writable") \
+ V(_, zero_string, "0")
-#define PUBLIC_SYMBOL_LIST(V) \
- V(async_iterator_symbol, Symbol.asyncIterator) \
- V(iterator_symbol, Symbol.iterator) \
- V(intl_fallback_symbol, IntlFallback) \
- V(match_all_symbol, Symbol.matchAll) \
- V(match_symbol, Symbol.match) \
- V(replace_symbol, Symbol.replace) \
- V(search_symbol, Symbol.search) \
- V(species_symbol, Symbol.species) \
- V(split_symbol, Symbol.split) \
- V(to_primitive_symbol, Symbol.toPrimitive) \
- V(unscopables_symbol, Symbol.unscopables)
+#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
+ V(_, call_site_frame_array_symbol) \
+ V(_, call_site_frame_index_symbol) \
+ V(_, console_context_id_symbol) \
+ V(_, console_context_name_symbol) \
+ V(_, class_fields_symbol) \
+ V(_, class_positions_symbol) \
+ V(_, detailed_stack_trace_symbol) \
+ V(_, elements_transition_symbol) \
+ V(_, error_end_pos_symbol) \
+ V(_, error_script_symbol) \
+ V(_, error_start_pos_symbol) \
+ V(_, frozen_symbol) \
+ V(_, generic_symbol) \
+ V(_, home_object_symbol) \
+ V(_, intl_initialized_marker_symbol) \
+ V(_, intl_resolved_symbol) \
+ V(_, interpreter_trampoline_symbol) \
+ V(_, megamorphic_symbol) \
+ V(_, native_context_index_symbol) \
+ V(_, nonextensible_symbol) \
+ V(_, not_mapped_symbol) \
+ V(_, premonomorphic_symbol) \
+ V(_, promise_async_stack_id_symbol) \
+ V(_, promise_debug_marker_symbol) \
+ V(_, promise_forwarding_handler_symbol) \
+ V(_, promise_handled_by_symbol) \
+ V(_, promise_async_id_symbol) \
+ V(_, sealed_symbol) \
+ V(_, stack_trace_symbol) \
+ V(_, strict_function_transition_symbol) \
+ V(_, wasm_exception_tag_symbol) \
+ V(_, wasm_exception_values_symbol) \
+ V(_, uninitialized_symbol)
+
+#define PUBLIC_SYMBOL_LIST_GENERATOR(V, _) \
+ V(_, async_iterator_symbol, Symbol.asyncIterator) \
+ V(_, iterator_symbol, Symbol.iterator) \
+ V(_, intl_fallback_symbol, IntlFallback) \
+ V(_, match_all_symbol, Symbol.matchAll) \
+ V(_, match_symbol, Symbol.match) \
+ V(_, replace_symbol, Symbol.replace) \
+ V(_, search_symbol, Symbol.search) \
+ V(_, species_symbol, Symbol.species) \
+ V(_, split_symbol, Symbol.split) \
+ V(_, to_primitive_symbol, Symbol.toPrimitive) \
+ V(_, unscopables_symbol, Symbol.unscopables)
// Well-Known Symbols are "Public" symbols, which have a bit set which causes
// them to produce an undefined value when a load results in a failed access
// check. Because this behaviour is not specified properly as of yet, it only
// applies to a subset of spec-defined Well-Known Symbols.
-#define WELL_KNOWN_SYMBOL_LIST(V) \
- V(has_instance_symbol, Symbol.hasInstance) \
- V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
- V(to_string_tag_symbol, Symbol.toStringTag)
+#define WELL_KNOWN_SYMBOL_LIST_GENERATOR(V, _) \
+ V(_, has_instance_symbol, Symbol.hasInstance) \
+ V(_, is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
+ V(_, to_string_tag_symbol, Symbol.toStringTag)
#define INCREMENTAL_SCOPES(F) \
/* MC_INCREMENTAL is the top-level incremental marking scope. */ \
@@ -394,7 +430,8 @@
F(SCAVENGER_SCAVENGE_PARALLEL) \
F(SCAVENGER_SCAVENGE_ROOTS) \
F(SCAVENGER_SCAVENGE_UPDATE_REFS) \
- F(SCAVENGER_SCAVENGE_WEAK)
+ F(SCAVENGER_SCAVENGE_WEAK) \
+ F(SCAVENGER_SCAVENGE_FINALIZE)
#define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_ARRAY_BUFFER_FREE) \
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index 2c28f46a85..0cf4ae945d 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -12,50 +12,54 @@
namespace v8 {
namespace internal {
-void ArrayBufferCollector::AddGarbageAllocations(
+namespace {
+
+void FreeAllocationsHelper(
+ Heap* heap, const std::vector<JSArrayBuffer::Allocation>& allocations) {
+ for (JSArrayBuffer::Allocation alloc : allocations) {
+ JSArrayBuffer::FreeBackingStore(heap->isolate(), alloc);
+ }
+}
+
+} // namespace
+
+void ArrayBufferCollector::QueueOrFreeGarbageAllocations(
std::vector<JSArrayBuffer::Allocation> allocations) {
- base::LockGuard<base::Mutex> guard(&allocations_mutex_);
- allocations_.push_back(std::move(allocations));
+ if (heap_->ShouldReduceMemory()) {
+ FreeAllocationsHelper(heap_, allocations);
+ } else {
+ base::LockGuard<base::Mutex> guard(&allocations_mutex_);
+ allocations_.push_back(std::move(allocations));
+ }
}
-void ArrayBufferCollector::FreeAllocations() {
+void ArrayBufferCollector::PerformFreeAllocations() {
base::LockGuard<base::Mutex> guard(&allocations_mutex_);
for (const std::vector<JSArrayBuffer::Allocation>& allocations :
allocations_) {
- for (JSArrayBuffer::Allocation alloc : allocations) {
- JSArrayBuffer::FreeBackingStore(heap_->isolate(), alloc);
- }
+ FreeAllocationsHelper(heap_, allocations);
}
allocations_.clear();
}
-class ArrayBufferCollector::FreeingTask final : public CancelableTask {
- public:
- explicit FreeingTask(Heap* heap)
- : CancelableTask(heap->isolate()), heap_(heap) {}
-
- virtual ~FreeingTask() {}
-
- private:
- void RunInternal() final {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE);
- heap_->array_buffer_collector()->FreeAllocations();
- }
-
- Heap* heap_;
-};
-
-void ArrayBufferCollector::FreeAllocationsOnBackgroundThread() {
+void ArrayBufferCollector::FreeAllocations() {
// TODO(wez): Remove backing-store from external memory accounting.
heap_->account_external_memory_concurrently_freed();
- if (!heap_->IsTearingDown() && FLAG_concurrent_array_buffer_freeing) {
+ if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
+ FLAG_concurrent_array_buffer_freeing) {
V8::GetCurrentPlatform()->CallOnWorkerThread(
- base::make_unique<FreeingTask>(heap_));
+ MakeCancelableLambdaTask(heap_->isolate(), [this] {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE);
+ PerformFreeAllocations();
+ }));
} else {
- // Fallback for when concurrency is disabled/restricted.
- FreeAllocations();
+ // Fallback for when concurrency is disabled/restricted. This is e.g. the
+ // case when the GC should reduce memory. For such GCs the
+ // QueueOrFreeGarbageAllocations() call would immediately free the
+ // allocations and this call would free already queued ones.
+ PerformFreeAllocations();
}
}
diff --git a/deps/v8/src/heap/array-buffer-collector.h b/deps/v8/src/heap/array-buffer-collector.h
index 74a28c3d06..784092e936 100644
--- a/deps/v8/src/heap/array-buffer-collector.h
+++ b/deps/v8/src/heap/array-buffer-collector.h
@@ -23,24 +23,27 @@ class ArrayBufferCollector {
public:
explicit ArrayBufferCollector(Heap* heap) : heap_(heap) {}
- ~ArrayBufferCollector() { FreeAllocations(); }
-
- // These allocations will begin to be freed once FreeAllocations() is called,
- // or on TearDown.
- void AddGarbageAllocations(
+ ~ArrayBufferCollector() { PerformFreeAllocations(); }
+
+ // These allocations will be either
+ // - freed immediately when under memory pressure, or
+ // - queued for freeing in FreeAllocations() or during tear down.
+ //
+ // FreeAllocations() potentially triggers a background task for processing.
+ void QueueOrFreeGarbageAllocations(
std::vector<JSArrayBuffer::Allocation> allocations);
// Calls FreeAllocations() on a background thread.
- void FreeAllocationsOnBackgroundThread();
+ void FreeAllocations();
private:
class FreeingTask;
- // Begin freeing the allocations added through AddGarbageAllocations. Also
- // called by TearDown.
- void FreeAllocations();
+ // Begin freeing the allocations added through QueueOrFreeGarbageAllocations.
+ // Also called by TearDown.
+ void PerformFreeAllocations();
- Heap* heap_;
+ Heap* const heap_;
base::Mutex allocations_mutex_;
std::vector<std::vector<JSArrayBuffer::Allocation>> allocations_;
};
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index e0d862aed7..814cfce63a 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -18,7 +18,7 @@ namespace internal {
void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
if (buffer->backing_store() == nullptr) return;
- const size_t length = NumberToSize(buffer->byte_length());
+ const size_t length = buffer->byte_length();
Page* page = Page::FromAddress(buffer->address());
{
base::LockGuard<base::Mutex> guard(page->mutex());
@@ -42,7 +42,7 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
if (buffer->backing_store() == nullptr) return;
Page* page = Page::FromAddress(buffer->address());
- const size_t length = NumberToSize(buffer->byte_length());
+ const size_t length = buffer->byte_length();
{
base::LockGuard<base::Mutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
@@ -100,6 +100,11 @@ void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
page_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
+ AddInternal(buffer, length);
+}
+
+void LocalArrayBufferTracker::AddInternal(JSArrayBuffer* buffer,
+ size_t length) {
auto ret = array_buffers_.insert(
{buffer,
{buffer->backing_store(), length, buffer->backing_store(),
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 0a158e3543..f35f2b3754 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -26,11 +26,10 @@ void LocalArrayBufferTracker::Process(Callback callback) {
JSArrayBuffer* new_buffer = nullptr;
JSArrayBuffer* old_buffer = nullptr;
size_t freed_memory = 0;
- size_t moved_memory = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end(); ++it) {
old_buffer = it->first;
- Page* old_page = Page::FromAddress(old_buffer->address());
+ DCHECK_EQ(page_, Page::FromAddress(old_buffer->address()));
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
kept_array_buffers.insert(*it);
@@ -49,26 +48,25 @@ void LocalArrayBufferTracker::Process(Callback callback) {
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
DCHECK_EQ(it->first->is_wasm_memory(), it->second.is_wasm_memory);
- old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, length);
- tracker->Add(new_buffer, length);
+ tracker->AddInternal(new_buffer, length);
+ MemoryChunk::MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer,
+ static_cast<MemoryChunk*>(page_),
+ static_cast<MemoryChunk*>(target_page), length);
}
- moved_memory += it->second.length;
-
} else if (result == kRemoveEntry) {
- const size_t length = it->second.length;
- freed_memory += length;
+ freed_memory += it->second.length;
// We pass backing_store() and stored length to the collector for freeing
// the backing store. Wasm allocations will go through their own tracker
// based on the backing store.
backing_stores_to_free.push_back(it->second);
- old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, length);
} else {
UNREACHABLE();
}
}
- if (moved_memory || freed_memory) {
+ if (freed_memory) {
+ page_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, freed_memory);
// TODO(wez): Remove backing-store from external memory accounting.
page_->heap()->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
@@ -76,9 +74,9 @@ void LocalArrayBufferTracker::Process(Callback callback) {
array_buffers_.swap(kept_array_buffers);
- // Pass the backing stores that need to be freed to the main thread for later
- // distribution.
- page_->heap()->array_buffer_collector()->AddGarbageAllocations(
+ // Pass the backing stores that need to be freed to the main thread for
+ // potential later distribution.
+ page_->heap()->array_buffer_collector()->QueueOrFreeGarbageAllocations(
std::move(backing_stores_to_free));
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index e60fe6c6c0..3c00c2c486 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -113,6 +113,10 @@ class LocalArrayBufferTracker {
typedef std::unordered_map<JSArrayBuffer*, JSArrayBuffer::Allocation, Hasher>
TrackingData;
+ // Internal version of add that does not update counters. Requires separate
+ // logic for updating external memory counters.
+ inline void AddInternal(JSArrayBuffer* buffer, size_t length);
+
inline Space* space();
Page* page_;
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index f6eabbb021..5e147ca9a5 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -74,15 +74,19 @@ class ConcurrentMarkingVisitor final
public:
using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
- explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
- ConcurrentMarking::MarkingWorklist* bailout,
- LiveBytesMap* live_bytes,
- WeakObjects* weak_objects, int task_id)
+ explicit ConcurrentMarkingVisitor(
+ ConcurrentMarking::MarkingWorklist* shared,
+ ConcurrentMarking::MarkingWorklist* bailout, LiveBytesMap* live_bytes,
+ WeakObjects* weak_objects,
+ ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
+ bool embedder_tracing_enabled)
: shared_(shared, task_id),
bailout_(bailout, task_id),
weak_objects_(weak_objects),
+ embedder_objects_(embedder_objects, task_id),
marking_state_(live_bytes),
- task_id_(task_id) {}
+ task_id_(task_id),
+ embedder_tracing_enabled_(embedder_tracing_enabled) {}
template <typename T>
static V8_INLINE T* Cast(HeapObject* object) {
@@ -138,19 +142,24 @@ class ConcurrentMarkingVisitor final
for (MaybeObject** slot = start; slot < end; slot++) {
MaybeObject* object = base::AsAtomicPointer::Relaxed_Load(slot);
HeapObject* heap_object;
- if (object->ToStrongHeapObject(&heap_object)) {
+ if (object->GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
// barrier will treat the weak reference as strong, so we won't miss the
// weak reference.
ProcessStrongHeapObject(host, reinterpret_cast<Object**>(slot),
heap_object);
- } else if (object->ToWeakHeapObject(&heap_object)) {
+ } else if (object->GetHeapObjectIfWeak(&heap_object)) {
ProcessWeakHeapObject(
host, reinterpret_cast<HeapObjectReference**>(slot), heap_object);
}
}
}
+ // Weak list pointers should be ignored during marking. The lists are
+ // reconstructed after GC.
+ void VisitCustomWeakPointers(HeapObject* host, Object** start,
+ Object** end) override {}
+
void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
for (int i = 0; i < snapshot.number_of_slots(); i++) {
Object** slot = snapshot.slot(i);
@@ -175,31 +184,27 @@ class ConcurrentMarkingVisitor final
return VisitJSObjectSubclass(map, object);
}
- int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object) {
- return VisitJSObjectSubclass(map, object);
- }
-
int VisitWasmInstanceObject(Map* map, WasmInstanceObject* object) {
return VisitJSObjectSubclass(map, object);
}
+ // Some JS objects can carry back links to embedders that contain information
+ // relevant to the garbage collectors.
+
int VisitJSApiObject(Map* map, JSObject* object) {
- if (marking_state_.IsGrey(object)) {
- // The main thread will do wrapper tracing in Blink.
- bailout_.Push(object);
- }
- return 0;
+ return VisitEmbedderTracingSubclass(map, object);
}
- int VisitJSFunction(Map* map, JSFunction* object) {
- int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- int used_size = map->UsedInstanceSize();
- DCHECK_LE(used_size, size);
- DCHECK_GE(used_size, JSObject::kHeaderSize);
- const SlotSnapshot& snapshot = MakeSlotSnapshotWeak(map, object, used_size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object) {
+ return VisitEmbedderTracingSubclass(map, object);
+ }
+
+ int VisitJSDataView(Map* map, JSDataView* object) {
+ return VisitEmbedderTracingSubclass(map, object);
+ }
+
+ int VisitJSTypedArray(Map* map, JSTypedArray* object) {
+ return VisitEmbedderTracingSubclass(map, object);
}
// ===========================================================================
@@ -270,34 +275,18 @@ class ConcurrentMarkingVisitor final
}
// ===========================================================================
- // Objects with weak fields and/or side-effectiful visitation.
+ // Side-effectful visitation.
// ===========================================================================
int VisitBytecodeArray(Map* map, BytecodeArray* object) {
if (!ShouldVisit(object)) return 0;
- int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
+ int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- BytecodeArray::BodyDescriptorWeak::IterateBody(map, object, size, this);
+ BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
object->MakeOlder();
return size;
}
- int VisitAllocationSite(Map* map, AllocationSite* object) {
- if (!ShouldVisit(object)) return 0;
- int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- AllocationSite::BodyDescriptorWeak::IterateBody(map, object, size, this);
- return size;
- }
-
- int VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
- if (!ShouldVisit(object)) return 0;
- int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- CodeDataContainer::BodyDescriptorWeak::IterateBody(map, object, size, this);
- return size;
- }
-
int VisitMap(Map* meta_map, Map* map) {
if (marking_state_.IsGrey(map)) {
// Maps have ad-hoc weakness for descriptor arrays. They also clear the
@@ -315,14 +304,6 @@ class ConcurrentMarkingVisitor final
return 0;
}
- int VisitNativeContext(Map* map, Context* object) {
- if (!ShouldVisit(object)) return 0;
- int size = Context::BodyDescriptorWeak::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptorWeak::IterateBody(map, object, size, this);
- return size;
- }
-
int VisitTransitionArray(Map* map, TransitionArray* array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot());
@@ -443,6 +424,18 @@ class ConcurrentMarkingVisitor final
}
template <typename T>
+ int VisitEmbedderTracingSubclass(Map* map, T* object) {
+ DCHECK(object->IsApiWrapper());
+ int size = VisitJSObjectSubclass(map, object);
+ if (size && embedder_tracing_enabled_) {
+ // Success: The object needs to be processed for embedder references on
+ // the main thread.
+ embedder_objects_.Push(object);
+ }
+ return size;
+ }
+
+ template <typename T>
int VisitLeftTrimmableArray(Map* map, T* object) {
// The synchronized_length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
@@ -466,20 +459,14 @@ class ConcurrentMarkingVisitor final
return slot_snapshot_;
}
- template <typename T>
- const SlotSnapshot& MakeSlotSnapshotWeak(Map* map, T* object, int size) {
- SlotSnapshottingVisitor visitor(&slot_snapshot_);
- visitor.VisitPointer(object,
- reinterpret_cast<Object**>(object->map_slot()));
- T::BodyDescriptorWeak::IterateBody(map, object, size, &visitor);
- return slot_snapshot_;
- }
ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_;
+ ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_;
int task_id_;
SlotSnapshot slot_snapshot_;
+ bool embedder_tracing_enabled_;
};
// Strings can change maps due to conversion to thin string or external strings.
@@ -524,7 +511,7 @@ class ConcurrentMarking::Task : public CancelableTask {
task_state_(task_state),
task_id_(task_id) {}
- virtual ~Task() {}
+ ~Task() override = default;
private:
// v8::internal::CancelableTask overrides.
@@ -541,12 +528,14 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
MarkingWorklist* on_hold,
- WeakObjects* weak_objects)
+ WeakObjects* weak_objects,
+ EmbedderTracingWorklist* embedder_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
- weak_objects_(weak_objects) {
+ weak_objects_(weak_objects),
+ embedder_objects_(embedder_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
@@ -558,8 +547,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
- ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes,
- weak_objects_, task_id);
+ ConcurrentMarkingVisitor visitor(
+ shared_, bailout_, &task_state->live_bytes, weak_objects_,
+ embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse());
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
@@ -626,6 +616,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
+ embedder_objects_->FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 0b8ffd9336..34de02fea1 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -58,10 +58,12 @@ class ConcurrentMarking {
// task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
+ using EmbedderTracingWorklist = Worklist<HeapObject*, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold,
- WeakObjects* weak_objects);
+ WeakObjects* weak_objects,
+ EmbedderTracingWorklist* embedder_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
// heap should not be moved while these are active (can be stopped safely via
@@ -108,6 +110,7 @@ class ConcurrentMarking {
MarkingWorklist* const bailout_;
MarkingWorklist* const on_hold_;
WeakObjects* const weak_objects_;
+ EmbedderTracingWorklist* const embedder_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index bf6d5f3b90..198cdd4b1a 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -24,13 +24,6 @@ void LocalEmbedderHeapTracer::TraceEpilogue() {
remote_tracer_->TraceEpilogue();
}
-void LocalEmbedderHeapTracer::AbortTracing() {
- if (!InUse()) return;
-
- cached_wrappers_to_trace_.clear();
- remote_tracer_->AbortTracing();
-}
-
void LocalEmbedderHeapTracer::EnterFinalPause() {
if (!InUse()) return;
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index ab8a46bb53..2588200db9 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -24,6 +24,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
}
+ EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
+
void SetRemoteTracer(EmbedderHeapTracer* tracer) {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
@@ -36,7 +38,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void TracePrologue();
void TraceEpilogue();
- void AbortTracing();
void EnterFinalPause();
bool Trace(double deadline);
bool IsRemoteTracingDone();
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 614c6ec174..eb1661aaee 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -16,73 +16,14 @@
namespace v8 {
namespace internal {
-#define ROOT_ACCESSOR(type, name, camel_name) \
- Handle<type> Factory::name() { \
- return Handle<type>(bit_cast<type**>( \
- &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
+#define ROOT_ACCESSOR(type, name, CamelName) \
+ Handle<type> Factory::name() { \
+ return Handle<type>(bit_cast<type**>( \
+ &isolate()->heap()->roots_[RootIndex::k##CamelName])); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- Handle<Map> Factory::name##_map() { \
- return Handle<Map>(bit_cast<Map**>( \
- &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
- }
-STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
- Handle<Map> Factory::name##_map() { \
- return Handle<Map>(bit_cast<Map**>( \
- &isolate()->heap()->roots_[Heap::k##Name##Size##MapRootIndex])); \
- }
-ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
-#undef ALLOCATION_SITE_MAP_ACCESSOR
-
-#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
- Handle<Map> Factory::name##_map() { \
- return Handle<Map>(bit_cast<Map**>( \
- &isolate()->heap()->roots_[Heap::k##Name##Size##MapRootIndex])); \
- }
-DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
-#undef DATA_HANDLER_MAP_ACCESSOR
-
-#define STRING_ACCESSOR(name, str) \
- Handle<String> Factory::name() { \
- return Handle<String>(bit_cast<String**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
-INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name) \
- Handle<Symbol> Factory::name() { \
- return Handle<Symbol>(bit_cast<Symbol**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
-PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, description) \
- Handle<Symbol> Factory::name() { \
- return Handle<Symbol>(bit_cast<Symbol**>( \
- &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
- }
-PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
-WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
- Handle<AccessorInfo> Factory::accessor_name##_accessor() { \
- return Handle<AccessorInfo>(bit_cast<AccessorInfo**>( \
- &isolate() \
- ->heap() \
- ->roots_[Heap::k##AccessorName##AccessorRootIndex])); \
- }
-ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
-#undef ACCESSOR_INFO_ACCESSOR
-
Handle<String> Factory::InternalizeString(Handle<String> string) {
if (string->IsInternalizedString()) return string;
return StringTable::LookupString(isolate(), string);
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index c8528f9fdb..9535eb4b88 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -27,9 +27,11 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
+#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/scope-info.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/unicode-cache.h"
#include "src/unicode-decoder.h"
@@ -63,9 +65,9 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
bool is_turbofanned, int stack_slots,
int safepoint_table_offset, int handler_table_offset) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
- DCHECK(!heap->memory_allocator()->code_range()->valid() ||
- heap->memory_allocator()->code_range()->contains(code->address()) ||
- object_size <= heap->code_space()->AreaSize());
+ DCHECK_IMPLIES(
+ !heap->memory_allocator()->code_range().is_empty(),
+ heap->memory_allocator()->code_range().contains(code->address()));
bool has_unwinding_info = desc.unwinding_info != nullptr;
@@ -287,9 +289,9 @@ Handle<PropertyArray> Factory::NewPropertyArray(int length,
return array;
}
-Handle<FixedArray> Factory::NewFixedArrayWithFiller(
- Heap::RootListIndex map_root_index, int length, Object* filler,
- PretenureFlag pretenure) {
+Handle<FixedArray> Factory::NewFixedArrayWithFiller(RootIndex map_root_index,
+ int length, Object* filler,
+ PretenureFlag pretenure) {
HeapObject* result = AllocateRawFixedArray(length, pretenure);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
Map* map = Map::cast(isolate()->heap()->root(map_root_index));
@@ -301,8 +303,8 @@ Handle<FixedArray> Factory::NewFixedArrayWithFiller(
}
template <typename T>
-Handle<T> Factory::NewFixedArrayWithMap(Heap::RootListIndex map_root_index,
- int length, PretenureFlag pretenure) {
+Handle<T> Factory::NewFixedArrayWithMap(RootIndex map_root_index, int length,
+ PretenureFlag pretenure) {
static_assert(std::is_base_of<FixedArray, T>::value,
"T must be a descendant of FixedArray");
// Zero-length case must be handled outside, where the knowledge about
@@ -313,7 +315,7 @@ Handle<T> Factory::NewFixedArrayWithMap(Heap::RootListIndex map_root_index,
}
template <typename T>
-Handle<T> Factory::NewWeakFixedArrayWithMap(Heap::RootListIndex map_root_index,
+Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
int length,
PretenureFlag pretenure) {
static_assert(std::is_base_of<WeakFixedArray, T>::value,
@@ -336,16 +338,16 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(Heap::RootListIndex map_root_index,
}
template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
- Heap::RootListIndex, int, PretenureFlag);
+ RootIndex, int, PretenureFlag);
template Handle<DescriptorArray>
-Factory::NewWeakFixedArrayWithMap<DescriptorArray>(Heap::RootListIndex, int,
+Factory::NewWeakFixedArrayWithMap<DescriptorArray>(RootIndex, int,
PretenureFlag);
Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
- return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
*undefined_value(), pretenure);
}
@@ -355,7 +357,7 @@ Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
if (length == 0) return empty_weak_fixed_array();
HeapObject* result =
AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kWeakFixedArrayMapRootIndex));
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
result->set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
array->set_length(length);
@@ -391,7 +393,7 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
- return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
*the_hole_value(), pretenure);
}
@@ -403,7 +405,7 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(
// TODO(ulan): As an experiment this temporarily returns an initialized fixed
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
- return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
*undefined_value(), pretenure);
}
@@ -452,7 +454,7 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
Handle<ObjectBoilerplateDescription> description =
Handle<ObjectBoilerplateDescription>::cast(NewFixedArrayWithMap(
- Heap::kObjectBoilerplateDescriptionMapRootIndex, size, TENURED));
+ RootIndex::kObjectBoilerplateDescriptionMap, size, TENURED));
if (has_different_size_backing_store) {
DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
@@ -773,7 +775,7 @@ Handle<SeqOneByteString> Factory::AllocateRawOneByteInternalizedString(
// The canonical empty_string is the only zero-length string we allow.
DCHECK_IMPLIES(
length == 0,
- isolate()->heap()->roots_[Heap::kempty_stringRootIndex] == nullptr);
+ isolate()->heap()->roots_[RootIndex::kempty_string] == nullptr);
Map* map = *one_byte_internalized_string_map();
int size = SeqOneByteString::SizeFor(length);
@@ -900,12 +902,12 @@ MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
return f->external_one_byte_internalized_string_map();
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return f->external_internalized_string_with_one_byte_data_map();
- case SHORT_EXTERNAL_STRING_TYPE:
- return f->short_external_internalized_string_map();
- case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
- return f->short_external_one_byte_internalized_string_map();
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return f->short_external_internalized_string_with_one_byte_data_map();
+ case UNCACHED_EXTERNAL_STRING_TYPE:
+ return f->uncached_external_internalized_string_map();
+ case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ return f->uncached_external_one_byte_internalized_string_map();
+ case UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return f->uncached_external_internalized_string_with_one_byte_data_map();
default:
return MaybeHandle<Map>(); // No match found.
}
@@ -1083,7 +1085,7 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
bool is_one_byte_data_in_two_byte_string = false;
if (!is_one_byte) {
// At least one of the strings uses two-byte representation so we
- // can't use the fast case code for short one-byte strings below, but
+ // can't use the fast case code for uncached one-byte strings below, but
// we can try to save memory if all chars actually fit in one-byte.
is_one_byte_data_in_two_byte_string =
left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
@@ -1243,9 +1245,8 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
if (length == 0) return empty_string();
Handle<Map> map;
- if (resource->IsCompressible()) {
- // TODO(hajimehoshi): Rename this to 'uncached_external_one_byte_string_map'
- map = short_external_one_byte_string_map();
+ if (!resource->IsCacheable()) {
+ map = uncached_external_one_byte_string_map();
} else {
map = external_one_byte_string_map();
}
@@ -1274,10 +1275,9 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
length <= kOneByteCheckLengthLimit &&
String::IsOneByte(resource->data(), static_cast<int>(length));
Handle<Map> map;
- if (resource->IsCompressible()) {
- // TODO(hajimehoshi): Rename these to 'uncached_external_string_...'.
- map = is_one_byte ? short_external_string_with_one_byte_data_map()
- : short_external_string_map();
+ if (!resource->IsCacheable()) {
+ map = is_one_byte ? uncached_external_string_with_one_byte_data_map()
+ : uncached_external_string_map();
} else {
map = is_one_byte ? external_string_with_one_byte_data_map()
: external_string_map();
@@ -1309,7 +1309,7 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
}
Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
- Handle<Map> map(isolate()->native_context()->string_iterator_map(),
+ Handle<Map> map(isolate()->native_context()->initial_string_iterator_map(),
isolate());
Handle<String> flat_string = String::Flatten(isolate(), string);
Handle<JSStringIterator> iterator =
@@ -1355,7 +1355,7 @@ Handle<Symbol> Factory::NewPrivateFieldSymbol() {
Handle<NativeContext> Factory::NewNativeContext() {
Handle<NativeContext> context = NewFixedArrayWithMap<NativeContext>(
- Heap::kNativeContextMapRootIndex, Context::NATIVE_CONTEXT_SLOTS, TENURED);
+ RootIndex::kNativeContextMap, Context::NATIVE_CONTEXT_SLOTS, TENURED);
context->set_native_context(*context);
context->set_errors_thrown(Smi::kZero);
context->set_math_random_index(Smi::kZero);
@@ -1367,7 +1367,7 @@ Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
Handle<Context> context = NewFixedArrayWithMap<Context>(
- Heap::kScriptContextMapRootIndex, scope_info->ContextLength(), TENURED);
+ RootIndex::kScriptContextMap, scope_info->ContextLength(), TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*the_hole_value());
@@ -1379,8 +1379,7 @@ Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScriptContextTable> Factory::NewScriptContextTable() {
Handle<ScriptContextTable> context_table =
NewFixedArrayWithMap<ScriptContextTable>(
- Heap::kScriptContextTableMapRootIndex,
- ScriptContextTable::kMinLength);
+ RootIndex::kScriptContextTableMap, ScriptContextTable::kMinLength);
context_table->set_used(0);
return context_table;
}
@@ -1390,7 +1389,7 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
Handle<Context> context = NewFixedArrayWithMap<Context>(
- Heap::kModuleContextMapRootIndex, scope_info->ContextLength(), TENURED);
+ RootIndex::kModuleContextMap, scope_info->ContextLength(), TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*module);
@@ -1403,13 +1402,13 @@ Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
Handle<ScopeInfo> scope_info) {
int length = scope_info->ContextLength();
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, length);
- Heap::RootListIndex mapRootIndex;
+ RootIndex mapRootIndex;
switch (scope_info->scope_type()) {
case EVAL_SCOPE:
- mapRootIndex = Heap::kEvalContextMapRootIndex;
+ mapRootIndex = RootIndex::kEvalContextMap;
break;
case FUNCTION_SCOPE:
- mapRootIndex = Heap::kFunctionContextMapRootIndex;
+ mapRootIndex = RootIndex::kFunctionContextMap;
break;
default:
UNREACHABLE();
@@ -1427,7 +1426,7 @@ Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
Handle<Object> thrown_object) {
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
Handle<Context> context = NewFixedArrayWithMap<Context>(
- Heap::kCatchContextMapRootIndex, Context::MIN_CONTEXT_SLOTS + 1);
+ RootIndex::kCatchContextMap, Context::MIN_CONTEXT_SLOTS + 1);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1447,7 +1446,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
? Handle<HeapObject>::cast(the_hole_value())
: Handle<HeapObject>::cast(extension);
Handle<Context> c = NewFixedArrayWithMap<Context>(
- Heap::kDebugEvaluateContextMapRootIndex, Context::MIN_CONTEXT_SLOTS + 2);
+ RootIndex::kDebugEvaluateContextMap, Context::MIN_CONTEXT_SLOTS + 2);
c->set_scope_info(*scope_info);
c->set_previous(*previous);
c->set_native_context(previous->native_context());
@@ -1461,7 +1460,7 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension) {
Handle<Context> context = NewFixedArrayWithMap<Context>(
- Heap::kWithContextMapRootIndex, Context::MIN_CONTEXT_SLOTS);
+ RootIndex::kWithContextMap, Context::MIN_CONTEXT_SLOTS);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*extension);
@@ -1473,7 +1472,7 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
Handle<Context> context = NewFixedArrayWithMap<Context>(
- Heap::kBlockContextMapRootIndex, scope_info->ContextLength());
+ RootIndex::kBlockContextMap, scope_info->ContextLength());
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1485,7 +1484,7 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
int length) {
DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
Handle<Context> context =
- NewFixedArrayWithMap<Context>(Heap::kFunctionContextMapRootIndex, length);
+ NewFixedArrayWithMap<Context>(RootIndex::kFunctionContextMap, length);
context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
context->set_extension(*the_hole_value());
context->set_native_context(*native_context);
@@ -1495,8 +1494,8 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
Map* map;
switch (type) {
-#define MAKE_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
+#define MAKE_CASE(TYPE, Name, name) \
+ case TYPE: \
map = *name##_map(); \
break;
STRUCT_LIST(MAKE_CASE)
@@ -1623,6 +1622,16 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
return microtask;
}
+Handle<MicrotaskQueue> Factory::NewMicrotaskQueue() {
+ // MicrotaskQueue should be TENURED, as it outlives Context, and is mostly
+ // as long-living as Context is.
+ Handle<MicrotaskQueue> microtask_queue =
+ Handle<MicrotaskQueue>::cast(NewStruct(MICROTASK_QUEUE_TYPE, TENURED));
+ microtask_queue->set_queue(*empty_fixed_array());
+ microtask_queue->set_pending_microtask_count(0);
+ return microtask_queue;
+}
+
Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
@@ -1687,7 +1696,8 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
DCHECK(0 <= length && length <= Smi::kMaxValue);
int size = FixedTypedArrayBase::kHeaderSize;
HeapObject* result = AllocateRawWithImmortalMap(
- size, pretenure, isolate()->heap()->MapForFixedTypedArray(array_type));
+ size, pretenure,
+ ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type));
Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(result),
isolate());
elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
@@ -1704,7 +1714,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
size_t size =
OBJECT_POINTER_ALIGN(byte_length + FixedTypedArrayBase::kDataOffset);
- Map* map = isolate()->heap()->MapForFixedTypedArray(array_type);
+ Map* map = ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type);
AllocationAlignment alignment =
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned;
HeapObject* object = AllocateRawWithImmortalMap(static_cast<int>(size),
@@ -1778,7 +1788,7 @@ Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
Handle<TransitionArray> array = NewWeakFixedArrayWithMap<TransitionArray>(
- Heap::kTransitionArrayMapRootIndex, capacity, TENURED);
+ RootIndex::kTransitionArrayMap, capacity, TENURED);
// Transition arrays are tenured. When black allocation is on we have to
// add the transition array to the list of encountered_transition_arrays.
Heap* heap = isolate()->heap();
@@ -1812,7 +1822,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
ElementsKind elements_kind,
int inobject_properties) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- DCHECK_IMPLIES(Map::IsJSObject(type) &&
+ DCHECK_IMPLIES(InstanceTypeChecker::IsJSObject(type) &&
!Map::CanHaveFastTransitionableElementsKind(type),
IsDictionaryElementsKind(elements_kind) ||
IsTerminalElementsKind(elements_kind));
@@ -2480,12 +2490,12 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
}
Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
- return NewFixedArrayWithMap<ScopeInfo>(Heap::kScopeInfoMapRootIndex, length,
+ return NewFixedArrayWithMap<ScopeInfo>(RootIndex::kScopeInfoMap, length,
TENURED);
}
Handle<ModuleInfo> Factory::NewModuleInfo() {
- return NewFixedArrayWithMap<ModuleInfo>(Heap::kModuleInfoMapRootIndex,
+ return NewFixedArrayWithMap<ModuleInfo>(RootIndex::kModuleInfoMap,
ModuleInfo::kLength, TENURED);
}
@@ -2665,9 +2675,9 @@ Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
heap->ZapCodeObject(result->address(), size);
result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
DCHECK(IsAligned(result->address(), kCodeAlignment));
- DCHECK(!heap->memory_allocator()->code_range()->valid() ||
- heap->memory_allocator()->code_range()->contains(result->address()) ||
- static_cast<int>(size) <= heap->code_space()->AreaSize());
+ DCHECK_IMPLIES(
+ !heap->memory_allocator()->code_range().is_empty(),
+ heap->memory_allocator()->code_range().contains(result->address()));
return handle(Code::cast(result), isolate());
}
@@ -2729,10 +2739,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
#endif
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
- DCHECK(
- !heap->memory_allocator()->code_range()->valid() ||
- heap->memory_allocator()->code_range()->contains(new_code->address()) ||
- obj_size <= heap->code_space()->AreaSize());
+ DCHECK_IMPLIES(
+ !heap->memory_allocator()->code_range().is_empty(),
+ heap->memory_allocator()->code_range().contains(new_code->address()));
return new_code;
}
@@ -3095,26 +3104,6 @@ Handle<JSSet> Factory::NewJSSet() {
return js_set;
}
-Handle<JSMapIterator> Factory::NewJSMapIterator(Handle<Map> map,
- Handle<OrderedHashMap> table,
- int index) {
- Handle<JSMapIterator> result =
- Handle<JSMapIterator>::cast(NewJSObjectFromMap(map));
- result->set_table(*table);
- result->set_index(Smi::FromInt(index));
- return result;
-}
-
-Handle<JSSetIterator> Factory::NewJSSetIterator(Handle<Map> map,
- Handle<OrderedHashSet> table,
- int index) {
- Handle<JSSetIterator> result =
- Handle<JSSetIterator>::cast(NewJSObjectFromMap(map));
- result->set_table(*table);
- result->set_index(Smi::FromInt(index));
- return result;
-}
-
void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,
size_t* element_size) {
@@ -3181,26 +3170,16 @@ JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
void SetupArrayBufferView(i::Isolate* isolate,
i::Handle<i::JSArrayBufferView> obj,
i::Handle<i::JSArrayBuffer> buffer,
- size_t byte_offset, size_t byte_length,
- PretenureFlag pretenure = NOT_TENURED) {
- DCHECK(byte_offset + byte_length <=
- static_cast<size_t>(buffer->byte_length()->Number()));
-
+ size_t byte_offset, size_t byte_length) {
+ DCHECK_LE(byte_offset + byte_length, buffer->byte_length());
DCHECK_EQ(obj->GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
obj->SetEmbedderField(i, Smi::kZero);
}
-
obj->set_buffer(*buffer);
-
- i::Handle<i::Object> byte_offset_object =
- isolate->factory()->NewNumberFromSize(byte_offset, pretenure);
- obj->set_byte_offset(*byte_offset_object);
-
- i::Handle<i::Object> byte_length_object =
- isolate->factory()->NewNumberFromSize(byte_length, pretenure);
- obj->set_byte_length(*byte_length_object);
+ obj->set_byte_offset(byte_offset);
+ obj->set_byte_length(byte_length);
}
} // namespace
@@ -3237,8 +3216,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
// TODO(7881): Smi length check
CHECK(length <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = length * element_size;
- SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length,
- pretenure);
+ SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length);
Handle<Object> length_object = NewNumberFromSize(length, pretenure);
obj->set_length(*length_object);
@@ -3271,13 +3249,9 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
CHECK(number_of_elements <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = number_of_elements * element_size;
- obj->set_byte_offset(Smi::kZero);
- i::Handle<i::Object> byte_length_object =
- NewNumberFromSize(byte_length, pretenure);
- obj->set_byte_length(*byte_length_object);
- Handle<Object> length_object =
- NewNumberFromSize(number_of_elements, pretenure);
- obj->set_length(*length_object);
+ obj->set_byte_offset(0);
+ obj->set_byte_length(byte_length);
+ obj->set_length(Smi::FromIntptr(static_cast<intptr_t>(number_of_elements)));
Handle<JSArrayBuffer> buffer =
NewJSArrayBuffer(SharedFlag::kNotShared, pretenure);
@@ -3757,7 +3731,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
MaybeObject* result = cache->Get(cache_index);
HeapObject* heap_object;
- if (result->ToWeakHeapObject(&heap_object)) {
+ if (result->GetHeapObjectIfWeak(&heap_object)) {
Map* map = Map::cast(heap_object);
DCHECK(!map->is_dictionary_map());
return handle(map, isolate());
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index cd57b5bf87..8c6d32090e 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -44,6 +44,7 @@ class JSGeneratorObject;
class JSMap;
class JSMapIterator;
class JSModuleNamespace;
+class JSPromise;
class JSProxy;
class JSSet;
class JSSetIterator;
@@ -56,6 +57,7 @@ class PreParsedScopeData;
class PromiseResolveThenableJobTask;
class RegExpMatchInfo;
class ScriptContextTable;
+class StackFrameInfo;
class StoreHandler;
class TemplateObjectDescription;
class UncompiledDataWithoutPreParsedScope;
@@ -107,14 +109,13 @@ class V8_EXPORT_PRIVATE Factory {
// Allocates a fixed array-like object with given map and initialized with
// undefined values.
template <typename T = FixedArray>
- Handle<T> NewFixedArrayWithMap(Heap::RootListIndex map_root_index, int length,
+ Handle<T> NewFixedArrayWithMap(RootIndex map_root_index, int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocates a weak fixed array-like object with given map and initialized
// with undefined values.
template <typename T = WeakFixedArray>
- Handle<T> NewWeakFixedArrayWithMap(Heap::RootListIndex map_root_index,
- int length,
+ Handle<T> NewWeakFixedArrayWithMap(RootIndex map_root_index, int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocates a fixed array initialized with undefined values.
@@ -439,6 +440,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
Handle<JSReceiver> thenable, Handle<Context> context);
+ Handle<MicrotaskQueue> NewMicrotaskQueue();
+
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
PretenureFlag pretenure = NOT_TENURED);
@@ -667,13 +670,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSMap> NewJSMap();
Handle<JSSet> NewJSSet();
- Handle<JSMapIterator> NewJSMapIterator(Handle<Map> map,
- Handle<OrderedHashMap> table,
- int index);
- Handle<JSSetIterator> NewJSSetIterator(Handle<Map> map,
- Handle<OrderedHashSet> table,
- int index);
-
// Allocates a bound function.
MaybeHandle<JSBoundFunction> NewJSBoundFunction(
Handle<JSReceiver> target_function, Handle<Object> bound_this,
@@ -828,45 +824,12 @@ class V8_EXPORT_PRIVATE Factory {
Handle<String> NumberToString(Handle<Object> number, bool check_cache = true);
Handle<String> NumberToString(Smi* number, bool check_cache = true);
- inline Handle<String> Uint32ToString(uint32_t value,
- bool check_cache = false);
+ inline Handle<String> Uint32ToString(uint32_t value, bool check_cache = true);
-#define ROOT_ACCESSOR(type, name, camel_name) inline Handle<type> name();
+#define ROOT_ACCESSOR(type, name, CamelName) inline Handle<type> name();
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Handle<Map> name##_map();
- STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
- inline Handle<Map> name##_map();
- ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
-#undef ALLOCATION_SITE_MAP_ACCESSOR
-
-#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
- inline Handle<Map> name##_map();
- DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
-#undef DATA_HANDLER_MAP_ACCESSOR
-
-#define STRING_ACCESSOR(name, str) inline Handle<String> name();
- INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name) inline Handle<Symbol> name();
- PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, description) inline Handle<Symbol> name();
- PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
- inline Handle<AccessorInfo> accessor_name##_accessor();
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
-#undef ACCESSOR_INFO_ACCESSOR
-
// Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfoForApiFunction(
MaybeHandle<String> maybe_name,
@@ -961,6 +924,7 @@ class V8_EXPORT_PRIVATE Factory {
// Downcast to the privately inherited sub-class using c-style casts to
// avoid undefined behavior (as static_cast cannot cast across private
// bases).
+ // NOLINTNEXTLINE (google-readability-casting)
return (Isolate*)this; // NOLINT(readability/casting)
}
@@ -975,7 +939,7 @@ class V8_EXPORT_PRIVATE Factory {
HeapObject* AllocateRawArray(int size, PretenureFlag pretenure);
HeapObject* AllocateRawFixedArray(int length, PretenureFlag pretenure);
HeapObject* AllocateRawWeakArrayList(int length, PretenureFlag pretenure);
- Handle<FixedArray> NewFixedArrayWithFiller(Heap::RootListIndex map_root_index,
+ Handle<FixedArray> NewFixedArrayWithFiller(RootIndex map_root_index,
int length, Object* filler,
PretenureFlag pretenure);
@@ -1054,7 +1018,7 @@ class NewFunctionArgs final {
Handle<Map> GetMap(Isolate* isolate) const;
private:
- NewFunctionArgs() {} // Use the static factory constructors.
+ NewFunctionArgs() = default; // Use the static factory constructors.
void SetShouldCreateAndSetInitialMap();
void SetShouldSetPrototype();
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 5ee7186c6a..7d33c68ad1 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -1093,7 +1093,7 @@ void GCTracer::AddBackgroundScopeSample(
}
}
-void GCTracer::RecordMarkCompactHistograms(HistogramTimer* gc_timer) {
+void GCTracer::RecordGCPhasesHistograms(HistogramTimer* gc_timer) {
Counters* counters = heap_->isolate()->counters();
if (gc_timer == counters->gc_finalize()) {
DCHECK_EQ(Scope::FIRST_TOP_MC_SCOPE, Scope::MC_CLEAR);
@@ -1112,6 +1112,11 @@ void GCTracer::RecordMarkCompactHistograms(HistogramTimer* gc_timer) {
counters->gc_finalize_sweep()->AddSample(
static_cast<int>(current_.scopes[Scope::MC_SWEEP]));
DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
+ } else if (gc_timer == counters->gc_scavenger()) {
+ counters->gc_scavenger_scavenge_main()->AddSample(
+ static_cast<int>(current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL]));
+ counters->gc_scavenger_scavenge_roots()->AddSample(
+ static_cast<int>(current_.scopes[Scope::SCAVENGER_SCAVENGE_ROOTS]));
}
}
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 62e077be50..bf49586d57 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -321,7 +321,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
RuntimeCallCounter* runtime_call_counter);
- void RecordMarkCompactHistograms(HistogramTimer* gc_timer);
+ void RecordGCPhasesHistograms(HistogramTimer* gc_timer);
private:
FRIEND_TEST(GCTracer, AverageSpeed);
@@ -339,6 +339,7 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
FRIEND_TEST(GCTracerTest, MutatorUtilization);
FRIEND_TEST(GCTracerTest, RecordMarkCompactHistograms);
+ FRIEND_TEST(GCTracerTest, RecordScavengerHistograms);
struct BackgroundCounter {
double total_duration_ms;
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 485b22902a..d515199518 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -10,16 +10,16 @@ namespace internal {
// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
// (mutator speed), this function returns the heap growing factor that will
-// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
+// achieve the target_mutator_utilization_ if the GC speed and the mutator speed
// remain the same until the next GC.
//
// For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
// TM / (TM + TG), where TM is the time spent in the mutator and TG is the
// time spent in the garbage collector.
//
-// Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
-// time-frame from the end of the current GC to the end of the next GC. Based
-// on the MU we can compute the heap growing factor F as
+// Let MU be target_mutator_utilization_, the desired mutator utilization for
+// the time-frame from the end of the current GC to the end of the next GC.
+// Based on the MU we can compute the heap growing factor F as
//
// F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
//
@@ -49,69 +49,44 @@ namespace internal {
// F = R * (1 - MU) / (R * (1 - MU) - MU)
double MemoryController::GrowingFactor(double gc_speed, double mutator_speed,
double max_factor) {
- DCHECK_LE(kMinGrowingFactor, max_factor);
- DCHECK_GE(kMaxGrowingFactor, max_factor);
+ DCHECK_LE(min_growing_factor_, max_factor);
+ DCHECK_GE(max_growing_factor_, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
- const double a = speed_ratio * (1 - kTargetMutatorUtilization);
- const double b =
- speed_ratio * (1 - kTargetMutatorUtilization) - kTargetMutatorUtilization;
+ const double a = speed_ratio * (1 - target_mutator_utilization_);
+ const double b = speed_ratio * (1 - target_mutator_utilization_) -
+ target_mutator_utilization_;
// The factor is a / b, but we need to check for small b first.
double factor = (a < b * max_factor) ? a / b : max_factor;
factor = Min(factor, max_factor);
- factor = Max(factor, kMinGrowingFactor);
- return factor;
-}
-
-double MemoryController::MaxGrowingFactor(size_t curr_max_size) {
- const double min_small_factor = 1.3;
- const double max_small_factor = 2.0;
- const double high_factor = 4.0;
-
- size_t max_size_in_mb = curr_max_size / MB;
- max_size_in_mb = Max(max_size_in_mb, kMinSize);
-
- // If we are on a device with lots of memory, we allow a high heap
- // growing factor.
- if (max_size_in_mb >= kMaxSize) {
- return high_factor;
- }
-
- DCHECK_GE(max_size_in_mb, kMinSize);
- DCHECK_LT(max_size_in_mb, kMaxSize);
-
- // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_size_in_mb - kMinSize) *
- (max_small_factor - min_small_factor) /
- (kMaxSize - kMinSize) +
- min_small_factor;
+ factor = Max(factor, min_growing_factor_);
return factor;
}
size_t MemoryController::CalculateAllocationLimit(
- size_t curr_size, size_t max_size, double gc_speed, double mutator_speed,
- size_t new_space_capacity, Heap::HeapGrowingMode growing_mode) {
- double max_factor = MaxGrowingFactor(max_size);
+ size_t curr_size, size_t max_size, double max_factor, double gc_speed,
+ double mutator_speed, size_t new_space_capacity,
+ Heap::HeapGrowingMode growing_mode) {
double factor = GrowingFactor(gc_speed, mutator_speed, max_factor);
if (FLAG_trace_gc_verbose) {
heap_->isolate()->PrintWithTimestamp(
"%s factor %.1f based on mu=%.3f, speed_ratio=%.f "
"(gc=%.f, mutator=%.f)\n",
- ControllerName(), factor, kTargetMutatorUtilization,
+ ControllerName(), factor, target_mutator_utilization_,
gc_speed / mutator_speed, gc_speed, mutator_speed);
}
if (growing_mode == Heap::HeapGrowingMode::kConservative ||
growing_mode == Heap::HeapGrowingMode::kSlow) {
- factor = Min(factor, kConservativeGrowingFactor);
+ factor = Min(factor, conservative_growing_factor_);
}
if (growing_mode == Heap::HeapGrowingMode::kMinimal) {
- factor = kMinGrowingFactor;
+ factor = min_growing_factor_;
}
if (FLAG_heap_growing_percent > 0) {
@@ -147,5 +122,30 @@ size_t MemoryController::MinimumAllocationLimitGrowingStep(
: kRegularAllocationLimitGrowingStep);
}
+double HeapController::MaxGrowingFactor(size_t curr_max_size) {
+ const double min_small_factor = 1.3;
+ const double max_small_factor = 2.0;
+ const double high_factor = 4.0;
+
+ size_t max_size_in_mb = curr_max_size / MB;
+ max_size_in_mb = Max(max_size_in_mb, kMinSize);
+
+ // If we are on a device with lots of memory, we allow a high heap
+ // growing factor.
+ if (max_size_in_mb >= kMaxSize) {
+ return high_factor;
+ }
+
+ DCHECK_GE(max_size_in_mb, kMinSize);
+ DCHECK_LT(max_size_in_mb, kMaxSize);
+
+ // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
+ double factor = (max_size_in_mb - kMinSize) *
+ (max_small_factor - min_small_factor) /
+ (kMaxSize - kMinSize) +
+ min_small_factor;
+ return factor;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-controller.h b/deps/v8/src/heap/heap-controller.h
index 8aae46c279..f8625ee963 100644
--- a/deps/v8/src/heap/heap-controller.h
+++ b/deps/v8/src/heap/heap-controller.h
@@ -18,20 +18,18 @@ class V8_EXPORT_PRIVATE MemoryController {
MemoryController(Heap* heap, double min_growing_factor,
double max_growing_factor,
double conservative_growing_factor,
- double target_mutator_utilization, size_t min_size,
- size_t max_size)
+ double target_mutator_utilization)
: heap_(heap),
- kMinGrowingFactor(min_growing_factor),
- kMaxGrowingFactor(max_growing_factor),
- kConservativeGrowingFactor(conservative_growing_factor),
- kTargetMutatorUtilization(target_mutator_utilization),
- kMinSize(min_size),
- kMaxSize(max_size) {}
- virtual ~MemoryController() {}
+ min_growing_factor_(min_growing_factor),
+ max_growing_factor_(max_growing_factor),
+ conservative_growing_factor_(conservative_growing_factor),
+ target_mutator_utilization_(target_mutator_utilization) {}
+ virtual ~MemoryController() = default;
// Computes the allocation limit to trigger the next garbage collection.
size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
- double gc_speed, double mutator_speed,
+ double max_factor, double gc_speed,
+ double mutator_speed,
size_t new_space_capacity,
Heap::HeapGrowingMode growing_mode);
@@ -41,18 +39,13 @@ class V8_EXPORT_PRIVATE MemoryController {
protected:
double GrowingFactor(double gc_speed, double mutator_speed,
double max_factor);
- double MaxGrowingFactor(size_t curr_max_size);
virtual const char* ControllerName() = 0;
Heap* const heap_;
-
- const double kMinGrowingFactor;
- const double kMaxGrowingFactor;
- const double kConservativeGrowingFactor;
- const double kTargetMutatorUtilization;
- // Sizes are in MB.
- const size_t kMinSize;
- const size_t kMaxSize;
+ const double min_growing_factor_;
+ const double max_growing_factor_;
+ const double conservative_growing_factor_;
+ const double target_mutator_utilization_;
FRIEND_TEST(HeapControllerTest, HeapGrowingFactor);
FRIEND_TEST(HeapControllerTest, MaxHeapGrowingFactor);
@@ -60,18 +53,18 @@ class V8_EXPORT_PRIVATE MemoryController {
FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
};
-class HeapController : public MemoryController {
+class V8_EXPORT_PRIVATE HeapController : public MemoryController {
public:
- explicit HeapController(Heap* heap)
- : MemoryController(heap, 1.1, 4.0, 1.3, 0.97, kMinHeapSize,
- kMaxHeapSize) {}
-
// Sizes are in MB.
- static const size_t kMinHeapSize = 128 * Heap::kPointerMultiplier;
- static const size_t kMaxHeapSize = 1024 * Heap::kPointerMultiplier;
+ static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
+ static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
+
+ explicit HeapController(Heap* heap)
+ : MemoryController(heap, 1.1, 4.0, 1.3, 0.97) {}
+ double MaxGrowingFactor(size_t curr_max_size);
protected:
- const char* ControllerName() { return "HeapController"; }
+ const char* ControllerName() override { return "HeapController"; }
};
} // namespace internal
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 62f07ea322..65b791a42f 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -13,6 +13,7 @@
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
+#include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h"
#include "src/counters-inl.h"
#include "src/feedback-vector.h"
@@ -24,9 +25,11 @@
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/microtask-queue-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
#include "src/profiler/heap-profiler.h"
@@ -52,33 +55,20 @@ HeapObject* AllocationResult::ToObjectChecked() {
return HeapObject::cast(object_);
}
-#define ROOT_ACCESSOR(type, name, camel_name) \
- type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
+#define ROOT_ACCESSOR(type, name, CamelName) \
+ type* Heap::name() { return type::cast(roots_[RootIndex::k##CamelName]); }
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
- Map* Heap::name##_map() { \
- return Map::cast(roots_[k##Name##Size##MapRootIndex]); \
- }
-DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
-#undef DATA_HANDLER_MAP_ACCESSOR
-
-#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
- AccessorInfo* Heap::accessor_name##_accessor() { \
- return AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]); \
- }
-ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
-#undef ACCESSOR_INFO_ACCESSOR
-
-#define ROOT_ACCESSOR(type, name, camel_name) \
- void Heap::set_##name(type* value) { \
- /* The deserializer makes use of the fact that these common roots are */ \
- /* never in new space and never on a page that is being compacted. */ \
- DCHECK(!deserialization_complete() || \
- RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
- DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
- roots_[k##camel_name##RootIndex] = value; \
+#define ROOT_ACCESSOR(type, name, CamelName) \
+ void Heap::set_##name(type* value) { \
+ /* The deserializer makes use of the fact that these common roots are */ \
+ /* never in new space and never on a page that is being compacted. */ \
+ DCHECK(!deserialization_complete() || \
+ RootCanBeWrittenAfterInitialization(RootIndex::k##CamelName)); \
+ DCHECK_IMPLIES(static_cast<int>(RootIndex::k##CamelName) < kOldSpaceRoots, \
+ !InNewSpace(value)); \
+ roots_[RootIndex::k##CamelName] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -336,8 +326,7 @@ bool Heap::InNewSpace(Object* object) {
// static
bool Heap::InNewSpace(MaybeObject* object) {
HeapObject* heap_object;
- return object->ToStrongOrWeakHeapObject(&heap_object) &&
- InNewSpace(heap_object);
+ return object->GetHeapObject(&heap_object) && InNewSpace(heap_object);
}
// static
@@ -365,8 +354,7 @@ bool Heap::InFromSpace(Object* object) {
// static
bool Heap::InFromSpace(MaybeObject* object) {
HeapObject* heap_object;
- return object->ToStrongOrWeakHeapObject(&heap_object) &&
- InFromSpace(heap_object);
+ return object->GetHeapObject(&heap_object) && InFromSpace(heap_object);
}
// static
@@ -384,8 +372,7 @@ bool Heap::InToSpace(Object* object) {
// static
bool Heap::InToSpace(MaybeObject* object) {
HeapObject* heap_object;
- return object->ToStrongOrWeakHeapObject(&heap_object) &&
- InToSpace(heap_object);
+ return object->GetHeapObject(&heap_object) && InToSpace(heap_object);
}
// static
@@ -581,6 +568,19 @@ int Heap::MaxNumberToStringCacheSize() const {
// of entries.
return static_cast<int>(number_string_cache_size * 2);
}
+
+void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount) {
+ base::CheckedIncrement(&backing_store_bytes_, amount);
+ // TODO(mlippautz): Implement interrupt for global memory allocations that can
+ // trigger garbage collections.
+}
+
+void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount) {
+ base::CheckedDecrement(&backing_store_bytes_, amount);
+}
+
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
heap_->always_allocate_scope_count_++;
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 1e4550679c..b20e65d1f1 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -96,7 +96,7 @@ inline void GenerationalBarrier(HeapObject* object, Object** slot,
inline void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
MaybeObject* value) {
HeapObject* value_heap_object;
- if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
+ if (!value->GetHeapObject(&value_heap_object)) return;
heap_internals::GenerationalBarrierInternal(
object, reinterpret_cast<Address>(slot), value_heap_object);
}
@@ -129,7 +129,7 @@ inline void MarkingBarrier(HeapObject* object, Object** slot, Object* value) {
inline void MarkingBarrier(HeapObject* object, MaybeObject** slot,
MaybeObject* value) {
HeapObject* value_heap_object;
- if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
+ if (!value->GetHeapObject(&value_heap_object)) return;
heap_internals::MarkingBarrierInternal(
object, reinterpret_cast<Address>(slot), value_heap_object);
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 2ec30635be..b509d21142 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -33,7 +33,6 @@
#include "src/heap/heap-controller.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h"
-#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
@@ -108,12 +107,7 @@ bool Heap::GCCallbackTuple::operator==(
}
Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
- const Heap::GCCallbackTuple& other) {
- callback = other.callback;
- gc_type = other.gc_type;
- data = other.data;
- return *this;
-}
+ const Heap::GCCallbackTuple& other) = default;
struct Heap::StrongRootsList {
Object** start;
@@ -135,112 +129,17 @@ class IdleScavengeObserver : public AllocationObserver {
};
Heap::Heap()
- : external_memory_(0),
- external_memory_limit_(kExternalAllocationSoftLimit),
- external_memory_at_last_mark_compact_(0),
- external_memory_concurrently_freed_(0),
- isolate_(nullptr),
- code_range_size_(0),
- // semispace_size_ should be a power of 2 and old_generation_size_ should
- // be a multiple of Page::kPageSize.
- max_semi_space_size_(8 * (kPointerSize / 4) * MB),
- initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
- max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
- initial_max_old_generation_size_(max_old_generation_size_),
+ : initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
- old_generation_size_configured_(false),
- // Variables set based on semispace_size_ and old_generation_size_ in
- // ConfigureHeap.
- // Will be 4 * reserved_semispace_size_ to ensure that young
- // generation can be aligned to its size.
- maximum_committed_(0),
- survived_since_last_expansion_(0),
- survived_last_scavenge_(0),
- always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
- contexts_disposed_(0),
- number_of_disposed_maps_(0),
- new_space_(nullptr),
- old_space_(nullptr),
- code_space_(nullptr),
- map_space_(nullptr),
- lo_space_(nullptr),
- new_lo_space_(nullptr),
- read_only_space_(nullptr),
- write_protect_code_memory_(false),
- code_space_memory_modification_scope_depth_(0),
- gc_state_(NOT_IN_GC),
- gc_post_processing_depth_(0),
- allocations_count_(0),
- raw_allocations_hash_(0),
- stress_marking_observer_(nullptr),
- stress_scavenge_observer_(nullptr),
- allocation_step_in_progress_(false),
- max_marking_limit_reached_(0.0),
- ms_count_(0),
- gc_count_(0),
- consecutive_ineffective_mark_compacts_(0),
- mmap_region_base_(0),
- remembered_unmapped_pages_index_(0),
old_generation_allocation_limit_(initial_old_generation_size_),
- inline_allocation_disabled_(false),
- tracer_(nullptr),
- promoted_objects_size_(0),
- promotion_ratio_(0),
- semi_space_copied_object_size_(0),
- previous_semi_space_copied_object_size_(0),
- semi_space_copied_rate_(0),
- nodes_died_in_new_space_(0),
- nodes_copied_in_new_space_(0),
- nodes_promoted_(0),
- maximum_size_scavenges_(0),
- last_idle_notification_time_(0.0),
- last_gc_time_(0.0),
- mark_compact_collector_(nullptr),
- minor_mark_compact_collector_(nullptr),
- array_buffer_collector_(nullptr),
- memory_allocator_(nullptr),
- store_buffer_(nullptr),
- incremental_marking_(nullptr),
- concurrent_marking_(nullptr),
- gc_idle_time_handler_(nullptr),
- memory_reducer_(nullptr),
- live_object_stats_(nullptr),
- dead_object_stats_(nullptr),
- scavenge_job_(nullptr),
- parallel_scavenge_semaphore_(0),
- idle_scavenge_observer_(nullptr),
- new_space_allocation_counter_(0),
- old_generation_allocation_counter_at_last_gc_(0),
- old_generation_size_at_last_gc_(0),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
- is_marking_flag_(false),
- ring_buffer_full_(false),
- ring_buffer_end_(0),
- configured_(false),
- current_gc_flags_(Heap::kNoGCFlags),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
- external_string_table_(this),
- gc_callbacks_depth_(0),
- deserialization_complete_(false),
- strong_roots_list_(nullptr),
- heap_iterator_depth_(0),
- local_embedder_heap_tracer_(nullptr),
- fast_promotion_mode_(false),
- force_oom_(false),
- delay_sweeper_tasks_for_testing_(false),
- pending_layout_change_object_(nullptr),
- unprotected_memory_chunks_registry_enabled_(false)
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- ,
- allocation_timeout_(0)
-#endif // V8_ENABLE_ALLOCATION_TIMEOUT
-{
+ external_string_table_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
- memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(nullptr);
set_allocation_sites_list(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
@@ -259,8 +158,8 @@ size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
old_space_physical_memory_factor *
kPointerMultiplier);
- return Max(Min(computed_size, HeapController::kMaxHeapSize),
- HeapController::kMinHeapSize);
+ return Max(Min(computed_size, HeapController::kMaxSize),
+ HeapController::kMinSize);
}
size_t Heap::Capacity() {
@@ -477,6 +376,8 @@ void Heap::PrintShortHeapStatistics() {
CommittedMemoryOfHeapAndUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_ / KB);
+ PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
+ backing_store_bytes_ / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
external_memory_callback_() / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
@@ -531,7 +432,7 @@ bool Heap::IsRetainingPathTarget(HeapObject* object,
MaybeObject* object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
MaybeObject* target = targets->Get(i);
- DCHECK(target->IsWeakOrClearedHeapObject());
+ DCHECK(target->IsWeakOrCleared());
if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
*option = retaining_path_target_option_[i];
@@ -705,7 +606,7 @@ const char* Heap::GetSpaceName(int idx) {
}
void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
- roots_[kCodeStubsRootIndex] = value;
+ roots_[RootIndex::kCodeStubs] = value;
}
void Heap::RepairFreeListsAfterDeserialization() {
@@ -1219,13 +1120,16 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
- set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
+ set_current_gc_flags(kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
+ const v8::GCCallbackFlags callback_flags =
+ gc_reason == GarbageCollectionReason::kLowMemoryNotification
+ ? v8::kGCCallbackFlagForced
+ : v8::kGCCallbackFlagCollectAllAvailableGarbage;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_SPACE, gc_reason,
- v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
+ if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
@@ -1234,7 +1138,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
UncommitFromSpace();
- memory_allocator()->unmapper()->EnsureUnmappingCompleted();
+ EagerlyFreeExternalMemory();
if (FLAG_trace_duplicate_threshold_kb) {
std::map<int, std::vector<HeapObject*>> objects_by_size;
@@ -1259,6 +1163,15 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
}
+void Heap::PreciseCollectAllGarbage(int flags,
+ GarbageCollectionReason gc_reason,
+ const GCCallbackFlags gc_callback_flags) {
+ if (!incremental_marking()->IsStopped()) {
+ FinalizeIncrementalMarkingAtomically(gc_reason);
+ }
+ CollectAllGarbage(flags, gc_reason, gc_callback_flags);
+}
+
void Heap::ReportExternalMemoryPressure() {
const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
static_cast<GCCallbackFlags>(
@@ -1267,7 +1180,7 @@ void Heap::ReportExternalMemoryPressure() {
if (external_memory_ >
(external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
CollectAllGarbage(
- kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
+ kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagsForExternalMemory));
@@ -1377,8 +1290,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
- if (collector == MARK_COMPACTOR) {
- tracer()->RecordMarkCompactHistograms(gc_type_timer);
+ if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
+ tracer()->RecordGCPhasesHistograms(gc_type_timer);
}
}
@@ -1416,12 +1329,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
- // Start incremental marking for the next cycle. The heap snapshot
- // generator needs incremental marking to stay off after it aborted.
- // We do this only for scavenger to avoid a loop where mark-compact
- // causes another mark-compact.
- if (IsYoungGenerationCollector(collector) &&
- !ShouldAbortIncrementalMarking()) {
+ // Start incremental marking for the next cycle. We do this only for scavenger
+ // to avoid a loop where mark-compact causes another mark-compact.
+ if (IsYoungGenerationCollector(collector)) {
StartIncrementalMarkingIfAllocationLimitIsReached(
GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
@@ -1627,11 +1537,10 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
} else {
if (counter > 1) {
- CollectAllGarbage(
- kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
- GarbageCollectionReason::kDeserializer);
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kDeserializer);
} else {
- CollectAllGarbage(kAbortIncrementalMarkingMask,
+ CollectAllGarbage(kNoGCFlags,
GarbageCollectionReason::kDeserializer);
}
}
@@ -1785,18 +1694,22 @@ bool Heap::PerformGarbageCollection(
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
+ double max_factor =
+ heap_controller()->MaxGrowingFactor(max_old_generation_size_);
size_t new_limit = heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
- new_space()->Capacity(), CurrentHeapGrowingMode());
+ old_gen_size, max_old_generation_size_, max_factor, gc_speed,
+ mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
old_generation_allocation_limit_ = new_limit;
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
+ double max_factor =
+ heap_controller()->MaxGrowingFactor(max_old_generation_size_);
size_t new_limit = heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
- new_space()->Capacity(), CurrentHeapGrowingMode());
+ old_gen_size, max_old_generation_size_, max_factor, gc_speed,
+ mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
if (new_limit < old_generation_allocation_limit_) {
old_generation_allocation_limit_ = new_limit;
}
@@ -1940,26 +1853,6 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
}
-static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
- return Heap::InFromSpace(*p) &&
- !HeapObject::cast(*p)->map_word().IsForwardingAddress();
-}
-
-class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
- public:
- virtual Object* RetainAs(Object* object) {
- if (!Heap::InFromSpace(object)) {
- return object;
- }
-
- MapWord map_word = HeapObject::cast(object)->map_word();
- if (map_word.IsForwardingAddress()) {
- return map_word.ToForwardingAddress();
- }
- return nullptr;
- }
-};
-
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
@@ -2003,79 +1896,6 @@ void Heap::EvacuateYoungGeneration() {
SetGCState(NOT_IN_GC);
}
-static bool IsLogging(Isolate* isolate) {
- return FLAG_verify_predictable || isolate->logger()->is_logging() ||
- isolate->is_profiling() ||
- (isolate->heap_profiler() != nullptr &&
- isolate->heap_profiler()->is_tracking_object_moves()) ||
- isolate->heap()->has_heap_object_allocation_tracker();
-}
-
-class PageScavengingItem final : public ItemParallelJob::Item {
- public:
- explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- virtual ~PageScavengingItem() {}
-
- void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
-
- private:
- MemoryChunk* const chunk_;
-};
-
-class ScavengingTask final : public ItemParallelJob::Task {
- public:
- ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
- : ItemParallelJob::Task(heap->isolate()),
- heap_(heap),
- scavenger_(scavenger),
- barrier_(barrier) {}
-
- void RunInParallel() final {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
- double scavenging_time = 0.0;
- {
- barrier_->Start();
- TimedScope scope(&scavenging_time);
- PageScavengingItem* item = nullptr;
- while ((item = GetItem<PageScavengingItem>()) != nullptr) {
- item->Process(scavenger_);
- item->MarkFinished();
- }
- do {
- scavenger_->Process(barrier_);
- } while (!barrier_->Wait());
- scavenger_->Process();
- }
- if (FLAG_trace_parallel_scavenge) {
- PrintIsolate(heap_->isolate(),
- "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
- static_cast<void*>(this), scavenging_time,
- scavenger_->bytes_copied(), scavenger_->bytes_promoted());
- }
- };
-
- private:
- Heap* const heap_;
- Scavenger* const scavenger_;
- OneshotBarrier* const barrier_;
-};
-
-int Heap::NumberOfScavengeTasks() {
- if (!FLAG_parallel_scavenge) return 1;
- const int num_scavenge_tasks =
- static_cast<int>(new_space()->TotalCapacity()) / MB;
- static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
- int tasks =
- Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
- if (!CanExpandOldGeneration(static_cast<size_t>(tasks * Page::kPageSize))) {
- // Optimize for memory usage near the heap limit.
- tasks = 1;
- }
- return tasks;
-}
-
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
@@ -2088,7 +1908,6 @@ void Heap::Scavenge() {
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
-
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
@@ -2097,137 +1916,19 @@ void Heap::Scavenge() {
SetGCState(SCAVENGE);
- // Implements Cheney's copying algorithm
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
- new_space_->Flip();
- new_space_->ResetLinearAllocationArea();
-
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &parallel_scavenge_semaphore_);
- const int kMainThreadId = 0;
- Scavenger* scavengers[kMaxScavengerTasks];
- const bool is_logging = IsLogging(isolate());
- const int num_scavenge_tasks = NumberOfScavengeTasks();
- OneshotBarrier barrier;
- Scavenger::CopiedList copied_list(num_scavenge_tasks);
- Scavenger::PromotionList promotion_list(num_scavenge_tasks);
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i] =
- new Scavenger(this, is_logging, &copied_list, &promotion_list, i);
- job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
- }
-
- {
- Sweeper* sweeper = mark_compact_collector()->sweeper();
- // Pause the concurrent sweeper.
- Sweeper::PauseOrCompleteScope pause_scope(sweeper);
- // Filter out pages from the sweeper that need to be processed for old to
- // new slots by the Scavenger. After processing, the Scavenger adds back
- // pages that are still unsweeped. This way the Scavenger has exclusive
- // access to the slots of a page and can completely avoid any locks on
- // the page itself.
- Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
- filter_scope.FilterOldSpaceSweepingPages(
- [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- this, [&job](MemoryChunk* chunk) {
- job.AddItem(new PageScavengingItem(chunk));
- });
-
- RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
-
- {
- // Identify weak unmodified handles. Requires an unmodified graph.
- TRACE_GC(
- tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &JSObject::IsUnmodifiedApiObject);
- }
- {
- // Copy roots.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
- IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
- }
- {
- // Parallel phase scavenging all copied and promoted objects.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run(isolate()->async_counters());
- DCHECK(copied_list.IsEmpty());
- DCHECK(promotion_list.IsEmpty());
- }
- {
- // Scavenge weak global handles.
- TRACE_GC(tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnscavengedHeapObject);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
- &root_scavenge_visitor);
- scavengers[kMainThreadId]->Process();
-
- DCHECK(copied_list.IsEmpty());
- DCHECK(promotion_list.IsEmpty());
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
- &root_scavenge_visitor, &IsUnscavengedHeapObject);
- }
-
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i]->Finalize();
- delete scavengers[i];
- }
- }
-
- {
- // Update references into new space
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
- UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
- incremental_marking()->UpdateMarkingWorklistAfterScavenge();
- }
-
- if (FLAG_concurrent_marking) {
- // Ensure that concurrent marker does not track pages that are
- // going to be unmapped.
- for (Page* p : PageRange(new_space()->from_space().first_page(), nullptr)) {
- concurrent_marking()->ClearLiveness(p);
- }
- }
-
- ScavengeWeakObjectRetainer weak_object_retainer;
- ProcessYoungWeakReferences(&weak_object_retainer);
-
- // Set age mark.
- new_space_->set_age_mark(new_space_->top());
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
- ArrayBufferTracker::PrepareToFreeDeadInNewSpace(this);
- }
- array_buffer_collector()->FreeAllocationsOnBackgroundThread();
+ new_space()->Flip();
+ new_space()->ResetLinearAllocationArea();
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
- });
+ // We also flip the young generation large object space. All large objects
+ // will be in the from space.
+ new_lo_space()->Flip();
- // Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(SurvivedNewSpaceObjectSize());
+ // Implements Cheney's copying algorithm
+ LOG(isolate_, ResourceEvent("scavenge", "begin"));
- // Scavenger may find new wrappers by iterating objects promoted onto a black
- // page.
- local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ scavenger_collector_->CollectGarbage();
LOG(isolate_, ResourceEvent("scavenge", "end"));
@@ -2285,15 +1986,6 @@ bool Heap::ExternalStringTable::Contains(HeapObject* obj) {
return false;
}
-void Heap::ProcessMovedExternalString(Page* old_page, Page* new_page,
- ExternalString* string) {
- size_t size = string->ExternalPayloadSize();
- new_page->IncrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString, size);
- old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString, size);
-}
-
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
@@ -2312,18 +2004,15 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
// String is still reachable.
String* new_string = String::cast(first_word.ToForwardingAddress());
- String* original_string = reinterpret_cast<String*>(*p);
- // The length of the original string is used to disambiguate the scenario
- // of a ThingString being forwarded to an ExternalString (which already exists
- // in the OLD space), and an ExternalString being forwarded to its promoted
- // copy. See Scavenger::EvacuateThinString.
- if (new_string->IsThinString() || original_string->length() == 0) {
+ if (new_string->IsThinString()) {
// Filtering Thin strings out of the external string table.
return nullptr;
} else if (new_string->IsExternalString()) {
- heap->ProcessMovedExternalString(
+ MemoryChunk::MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString,
Page::FromAddress(reinterpret_cast<Address>(*p)),
- Page::FromHeapObject(new_string), ExternalString::cast(new_string));
+ Page::FromHeapObject(new_string),
+ ExternalString::cast(new_string)->ExternalPayloadSize());
return new_string;
}
@@ -2488,8 +2177,8 @@ void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
}
-void Heap::ForeachAllocationSite(Object* list,
- std::function<void(AllocationSite*)> visitor) {
+void Heap::ForeachAllocationSite(
+ Object* list, const std::function<void(AllocationSite*)>& visitor) {
DisallowHeapAllocation disallow_heap_allocation;
Object* current = list;
while (current->IsAllocationSite()) {
@@ -2555,8 +2244,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
explicit ExternalStringTableVisitorAdapter(
Isolate* isolate, v8::ExternalResourceVisitor* visitor)
: isolate_(isolate), visitor_(visitor) {}
- virtual void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
+ void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
@@ -2597,10 +2286,9 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
- intptr_t offset = OffsetFrom(address);
- if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
+ if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
return kPointerSize;
- if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
+ if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned.
return 0;
}
@@ -2693,33 +2381,29 @@ void Heap::CreateFixedStubs() {
Heap::CreateJSRunMicrotasksEntryStub();
}
-bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
+bool Heap::RootCanBeWrittenAfterInitialization(RootIndex root_index) {
switch (root_index) {
- case kNumberStringCacheRootIndex:
- case kCodeStubsRootIndex:
- case kScriptListRootIndex:
- case kMaterializedObjectsRootIndex:
- case kMicrotaskQueueRootIndex:
- case kDetachedContextsRootIndex:
- case kRetainedMapsRootIndex:
- case kRetainingPathTargetsRootIndex:
- case kFeedbackVectorsForProfilingToolsRootIndex:
- case kNoScriptSharedFunctionInfosRootIndex:
- case kSerializedObjectsRootIndex:
- case kSerializedGlobalProxySizesRootIndex:
- case kPublicSymbolTableRootIndex:
- case kApiSymbolTableRootIndex:
- case kApiPrivateSymbolTableRootIndex:
- case kMessageListenersRootIndex:
- case kDeserializeLazyHandlerRootIndex:
- case kDeserializeLazyHandlerWideRootIndex:
- case kDeserializeLazyHandlerExtraWideRootIndex:
+ case RootIndex::kNumberStringCache:
+ case RootIndex::kCodeStubs:
+ case RootIndex::kScriptList:
+ case RootIndex::kMaterializedObjects:
+ case RootIndex::kDetachedContexts:
+ case RootIndex::kRetainedMaps:
+ case RootIndex::kRetainingPathTargets:
+ case RootIndex::kFeedbackVectorsForProfilingTools:
+ case RootIndex::kNoScriptSharedFunctionInfos:
+ case RootIndex::kSerializedObjects:
+ case RootIndex::kSerializedGlobalProxySizes:
+ case RootIndex::kPublicSymbolTable:
+ case RootIndex::kApiSymbolTable:
+ case RootIndex::kApiPrivateSymbolTable:
+ case RootIndex::kMessageListeners:
// Smi values
-#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
+#define SMI_ENTRY(type, name, Name) case RootIndex::k##Name:
SMI_ROOT_LIST(SMI_ENTRY)
#undef SMI_ENTRY
// String table
- case kStringTableRootIndex:
+ case RootIndex::kStringTable:
return true;
default:
@@ -2727,7 +2411,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
}
-bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
+bool Heap::RootCanBeTreatedAsConstant(RootIndex root_index) {
bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
!InNewSpace(root(root_index));
DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
@@ -2743,61 +2427,6 @@ void Heap::FlushNumberStringCache() {
}
}
-namespace {
-
-Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
- switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
- case kExternal##Type##Array: \
- return Heap::kFixed##Type##ArrayMapRootIndex;
-
- TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
-#undef ARRAY_TYPE_TO_ROOT_INDEX
- }
- UNREACHABLE();
-}
-
-Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
- switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- return Heap::kFixed##Type##ArrayMapRootIndex;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
- default:
- UNREACHABLE();
-#undef TYPED_ARRAY_CASE
- }
-}
-
-Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
- ElementsKind elements_kind) {
- switch (elements_kind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- return Heap::kEmptyFixed##Type##ArrayRootIndex;
-
- TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
-#undef ELEMENT_KIND_TO_ROOT_INDEX
- default:
- UNREACHABLE();
- }
-}
-
-} // namespace
-
-Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
-}
-
-Map* Heap::MapForFixedTypedArray(ElementsKind elements_kind) {
- return Map::cast(roots_[RootIndexForFixedTypedArray(elements_kind)]);
-}
-
-FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
- return FixedTypedArrayBase::cast(
- roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
-}
-
HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode,
ClearFreedMemoryMode clear_memory_mode) {
@@ -2805,11 +2434,11 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)),
+ reinterpret_cast<Map*>(root(RootIndex::kOnePointerFillerMap)),
SKIP_WRITE_BARRIER);
} else if (size == 2 * kPointerSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
+ reinterpret_cast<Map*>(root(RootIndex::kTwoPointerFillerMap)),
SKIP_WRITE_BARRIER);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
Memory<Address>(addr + kPointerSize) =
@@ -2818,7 +2447,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
} else {
DCHECK_GT(size, 2 * kPointerSize);
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
+ reinterpret_cast<Map*>(root(RootIndex::kFreeSpaceMap)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->relaxed_write_size(size);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
@@ -2865,8 +2494,8 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase* to_check)
: to_check_(to_check) {}
- virtual void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
+ void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) override {
for (Object** p = start; p < end; ++p) {
DCHECK_NE(*p, to_check_);
}
@@ -3532,7 +3161,7 @@ class MemoryPressureInterruptTask : public CancelableTask {
explicit MemoryPressureInterruptTask(Heap* heap)
: CancelableTask(heap->isolate()), heap_(heap) {}
- virtual ~MemoryPressureInterruptTask() {}
+ ~MemoryPressureInterruptTask() override = default;
private:
// v8::internal::CancelableTask overrides.
@@ -3575,9 +3204,10 @@ void Heap::CollectGarbageOnMemoryPressure() {
const double kMaxMemoryPressurePauseMs = 100;
double start = MonotonicallyIncreasingTimeInMs();
- CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
+ CollectAllGarbage(kReduceMemoryFootprintMask,
GarbageCollectionReason::kMemoryPressure,
kGCCallbackFlagCollectAllAvailableGarbage);
+ EagerlyFreeExternalMemory();
double end = MonotonicallyIncreasingTimeInMs();
// Estimate how much memory we can free.
@@ -3591,10 +3221,9 @@ void Heap::CollectGarbageOnMemoryPressure() {
// If we spent less than half of the time budget, then perform full GC
// Otherwise, start incremental marking.
if (end - start < kMaxMemoryPressurePauseMs / 2) {
- CollectAllGarbage(
- kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
- GarbageCollectionReason::kMemoryPressure,
- kGCCallbackFlagCollectAllAvailableGarbage);
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kMemoryPressure,
+ kGCCallbackFlagCollectAllAvailableGarbage);
} else {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
StartIncrementalMarking(kReduceMemoryFootprintMask,
@@ -3617,13 +3246,27 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
} else {
ExecutionAccess access(isolate());
isolate()->stack_guard()->RequestGC();
- V8::GetCurrentPlatform()->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate()),
- new MemoryPressureInterruptTask(this));
+ auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(isolate()));
+ taskrunner->PostTask(
+ base::make_unique<MemoryPressureInterruptTask>(this));
}
}
}
+void Heap::EagerlyFreeExternalMemory() {
+ for (Page* page : *old_space()) {
+ if (!page->SweepingDone()) {
+ base::LockGuard<base::Mutex> guard(page->mutex());
+ if (!page->SweepingDone()) {
+ ArrayBufferTracker::FreeDead(
+ page, mark_compact_collector()->non_atomic_marking_state());
+ }
+ }
+ }
+ memory_allocator()->unmapper()->EnsureUnmappingCompleted();
+}
+
void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void* data) {
const size_t kMaxCallbacks = 100;
@@ -3833,16 +3476,15 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
}
}
-
-bool Heap::RootIsImmortalImmovable(int root_index) {
+bool Heap::RootIsImmortalImmovable(RootIndex root_index) {
switch (root_index) {
-#define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
+#define IMMORTAL_IMMOVABLE_ROOT(name) case RootIndex::k##name:
IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
#undef IMMORTAL_IMMOVABLE_ROOT
-#define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
- INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#define INTERNALIZED_STRING(_, name, value) case RootIndex::k##name:
+ INTERNALIZED_STRING_LIST_GENERATOR(INTERNALIZED_STRING, /* not used */)
#undef INTERNALIZED_STRING
-#define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
+#define STRING_TYPE(NAME, size, name, Name) case RootIndex::k##Name##Map:
STRING_TYPE_LIST(STRING_TYPE)
#undef STRING_TYPE
return true;
@@ -3867,7 +3509,7 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
- if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if ((*current)->GetHeapObject(&object)) {
CHECK(heap_->InReadOnlySpace(object));
}
}
@@ -3963,10 +3605,9 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
: SlotVerifyingVisitor(untyped, typed) {}
bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
- DCHECK_IMPLIES(
- target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target),
- Heap::InToSpace(target));
- return target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target) &&
+ DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InNewSpace(target),
+ Heap::InToSpace(target));
+ return target->IsStrongOrWeak() && Heap::InNewSpace(target) &&
!Heap::InNewSpace(host);
}
};
@@ -4077,9 +3718,8 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointer(
- Root::kStringTable, nullptr,
- reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
+ v->VisitRootPointer(Root::kStringTable, nullptr,
+ &roots_[RootIndex::kStringTable]);
v->Synchronize(VisitorSynchronization::kStringTable);
if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
mode != VISIT_FOR_SERIALIZATION) {
@@ -4094,8 +3734,8 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitRootPointers(Root::kSmiRootList, nullptr, &roots_[kSmiRootsStart],
- &roots_[kRootListLength]);
+ v->VisitRootPointers(Root::kSmiRootList, nullptr, roots_.smi_roots_begin(),
+ roots_.smi_roots_end());
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
@@ -4152,8 +3792,13 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointers(Root::kStrongRootList, nullptr, &roots_[0],
- &roots_[kStrongRootListLength]);
+ // Garbage collection can skip over the read-only roots.
+ const bool isGC = mode != VISIT_ALL && mode != VISIT_FOR_SERIALIZATION &&
+ mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION;
+ Object** start =
+ isGC ? roots_.read_only_roots_end() : roots_.strong_roots_begin();
+ v->VisitRootPointers(Root::kStrongRootList, nullptr, start,
+ roots_.strong_roots_end());
v->Synchronize(VisitorSynchronization::kStrongRootList);
isolate_->bootstrapper()->Iterate(v);
@@ -4192,17 +3837,19 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// global handles need to be added manually.
break;
case VISIT_ONLY_STRONG:
+ case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
break;
case VISIT_ALL_IN_MINOR_MC_MARK:
- // Global handles are processed manually be the minor MC.
+ // Global handles are processed manually by the minor MC.
break;
case VISIT_ALL_IN_MINOR_MC_UPDATE:
- // Global handles are processed manually be the minor MC.
+ // Global handles are processed manually by the minor MC.
break;
+ case VISIT_ALL_BUT_READ_ONLY:
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
isolate_->global_handles()->IterateAllRoots(v);
@@ -4706,6 +4353,9 @@ void Heap::SetUp() {
heap_controller_ = new HeapController(this);
mark_compact_collector_ = new MarkCompactCollector(this);
+
+ scavenger_collector_ = new ScavengerCollector(this);
+
incremental_marking_ =
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
mark_compact_collector_->weak_objects());
@@ -4715,10 +4365,11 @@ void Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
- marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
+ marking_worklist->on_hold(), mark_compact_collector_->weak_objects(),
+ marking_worklist->embedder());
} else {
- concurrent_marking_ =
- new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
+ concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr,
+ nullptr, nullptr);
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
@@ -4727,7 +4378,8 @@ void Heap::SetUp() {
space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
space_[NEW_SPACE] = new_space_ =
- new NewSpace(this, initial_semispace_size_, max_semi_space_size_);
+ new NewSpace(this, memory_allocator_->data_page_allocator(),
+ initial_semispace_size_, max_semi_space_size_);
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
@@ -4809,15 +4461,15 @@ void Heap::SetStackLimits() {
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
- roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
+ roots_[RootIndex::kStackLimit] = reinterpret_cast<Object*>(
(isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
- roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
+ roots_[RootIndex::kRealStackLimit] = reinterpret_cast<Object*>(
(isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
void Heap::ClearStackLimits() {
- roots_[kStackLimitRootIndex] = Smi::kZero;
- roots_[kRealStackLimitRootIndex] = Smi::kZero;
+ roots_[RootIndex::kStackLimit] = Smi::kZero;
+ roots_[RootIndex::kRealStackLimit] = Smi::kZero;
}
int Heap::NextAllocationTimeout(int current_timeout) {
@@ -4873,6 +4525,10 @@ void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
}
+EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
+ return local_embedder_heap_tracer()->remote_tracer();
+}
+
void Heap::TracePossibleWrapper(JSObject* js_object) {
DCHECK(js_object->IsApiWrapper());
if (js_object->GetEmbedderFieldCount() >= 2 &&
@@ -4961,6 +4617,11 @@ void Heap::TearDown() {
}
#endif // ENABLE_MINOR_MC
+ if (scavenger_collector_ != nullptr) {
+ delete scavenger_collector_;
+ scavenger_collector_ = nullptr;
+ }
+
if (array_buffer_collector_ != nullptr) {
delete array_buffer_collector_;
array_buffer_collector_ = nullptr;
@@ -5100,7 +4761,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
int copy_to = 0;
for (int i = 0; i < array->length(); i++) {
MaybeObject* element = array->Get(i);
- if (element->IsClearedWeakHeapObject()) continue;
+ if (element->IsCleared()) continue;
new_array->Set(copy_to++, element);
}
new_array->set_length(copy_to);
@@ -5174,11 +4835,11 @@ void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
// This loop compacts the array by removing cleared weak cells.
for (int i = 0; i < length; i += 2) {
MaybeObject* maybe_object = retained_maps->Get(i);
- if (maybe_object->IsClearedWeakHeapObject()) {
+ if (maybe_object->IsCleared()) {
continue;
}
- DCHECK(maybe_object->IsWeakHeapObject());
+ DCHECK(maybe_object->IsWeak());
MaybeObject* age = retained_maps->Get(i + 1);
DCHECK(age->IsSmi());
@@ -5268,17 +4929,19 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
}
}
-bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
- if (InNewSpace(object)) {
- return false;
- }
+#ifdef DEBUG
+void Heap::VerifyClearedSlot(HeapObject* object, Object** slot) {
+ if (InNewSpace(object)) return;
Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
- return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
- RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
+ CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr));
+ // Old to old slots are filtered with invalidated slots.
+ CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr),
+ page->RegisteredObjectWithInvalidatedSlots(object));
}
+#endif
void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
@@ -5308,9 +4971,7 @@ PagedSpace* PagedSpaces::next() {
SpaceIterator::SpaceIterator(Heap* heap)
: heap_(heap), current_space_(FIRST_SPACE - 1) {}
-SpaceIterator::~SpaceIterator() {
-}
-
+SpaceIterator::~SpaceIterator() = default;
bool SpaceIterator::has_next() {
// Iterate until no more spaces.
@@ -5325,7 +4986,7 @@ Space* SpaceIterator::next() {
class HeapObjectsFilter {
public:
- virtual ~HeapObjectsFilter() {}
+ virtual ~HeapObjectsFilter() = default;
virtual bool SkipObject(HeapObject* object) = 0;
};
@@ -5336,14 +4997,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkReachableObjects();
}
- ~UnreachableObjectsFilter() {
+ ~UnreachableObjectsFilter() override {
for (auto it : reachable_) {
delete it.second;
it.second = nullptr;
}
}
- bool SkipObject(HeapObject* object) {
+ bool SkipObject(HeapObject* object) override {
if (object->IsFiller()) return true;
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (reachable_.count(chunk) == 0) return true;
@@ -5396,7 +5057,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
// Treat weak references as strong.
for (MaybeObject** p = start; p < end; p++) {
HeapObject* heap_object;
- if ((*p)->ToStrongOrWeakHeapObject(&heap_object)) {
+ if ((*p)->GetHeapObject(&heap_object)) {
if (filter_->MarkAsReachable(heap_object)) {
marking_stack_.push_back(heap_object);
}
@@ -5411,7 +5072,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void MarkReachableObjects() {
MarkingVisitor visitor(this);
- heap_->IterateRoots(&visitor, VISIT_ALL);
+ heap_->IterateRoots(&visitor, VISIT_ALL_BUT_READ_ONLY);
visitor.TransitiveClosure();
}
@@ -5597,24 +5258,6 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
-bool Heap::IsDeserializeLazyHandler(Code* code) {
- return (code == deserialize_lazy_handler() ||
- code == deserialize_lazy_handler_wide() ||
- code == deserialize_lazy_handler_extra_wide());
-}
-
-void Heap::SetDeserializeLazyHandler(Code* code) {
- set_deserialize_lazy_handler(code);
-}
-
-void Heap::SetDeserializeLazyHandlerWide(Code* code) {
- set_deserialize_lazy_handler_wide(code);
-}
-
-void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
- set_deserialize_lazy_handler_extra_wide(code);
-}
-
void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
set_builtins_constants_table(cache);
}
@@ -5723,11 +5366,11 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
MaybeObject** end) {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
- if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if ((*current)->GetHeapObject(&object)) {
CHECK(heap_->Contains(object));
CHECK(object->map()->IsMap());
} else {
- CHECK((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject());
+ CHECK((*current)->IsSmi() || (*current)->IsCleared());
}
}
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 2e750d56fa..c99f0d424e 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -13,6 +13,7 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
+#include "include/v8-internal.h"
#include "include/v8.h"
#include "src/accessors.h"
#include "src/allocation.h"
@@ -40,6 +41,7 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
+class AllocationMemento;
class ObjectBoilerplateDescription;
class BytecodeArray;
class CodeDataContainer;
@@ -50,6 +52,10 @@ class JSArrayBuffer;
class ExternalString;
using v8::MemoryPressureLevel;
+// Adapts PRIVATE_SYMBOL_LIST_GERNATOR entry to IMMORTAL_IMMOVABLE_ROOT_LIST
+// entry
+#define PRIVATE_SYMBOL_LIST_TO_IMMORTAL_IMMOVABLE_LIST_ADAPTER(V, name) V(name)
+
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
@@ -57,6 +63,7 @@ using v8::MemoryPressureLevel;
V(ArgumentsMarkerMap) \
V(ArrayBufferNeuteringProtector) \
V(ArrayIteratorProtector) \
+ V(AwaitContextMap) \
V(BigIntMap) \
V(BlockContextMap) \
V(ObjectBoilerplateDescriptionMap) \
@@ -143,6 +150,7 @@ using v8::MemoryPressureLevel;
V(TypedArraySpeciesProtector) \
V(PromiseSpeciesProtector) \
V(StaleRegister) \
+ V(StringIteratorProtector) \
V(StringLengthProtector) \
V(StringTableMap) \
V(SymbolMap) \
@@ -162,7 +170,8 @@ using v8::MemoryPressureLevel;
V(WeakArrayListMap) \
V(WithContextMap) \
V(empty_string) \
- PRIVATE_SYMBOL_LIST(V)
+ PRIVATE_SYMBOL_LIST_GENERATOR( \
+ PRIVATE_SYMBOL_LIST_TO_IMMORTAL_IMMOVABLE_LIST_ADAPTER, V)
class AllocationObserver;
class ArrayBufferCollector;
@@ -189,6 +198,7 @@ class PagedSpace;
class RootVisitor;
class ScavengeJob;
class Scavenger;
+class ScavengerCollector;
class Space;
class StoreBuffer;
class StressScavengeObserver;
@@ -206,6 +216,8 @@ enum class ClearRecordedSlots { kYes, kNo };
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
+enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
+
enum class FixedArrayVisitationMode { kRegular, kIncremental };
enum class TraceRetainingPathMode { kEnabled, kDisabled };
@@ -305,54 +317,6 @@ struct CommentStatistic {
class Heap {
public:
- // Declare all the root indices. This defines the root list order.
- // clang-format off
- enum RootListIndex {
-#define DECL(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(DECL)
-#undef DECL
-
-#define DECL(name, str) k##name##RootIndex,
- INTERNALIZED_STRING_LIST(DECL)
-#undef DECL
-
-#define DECL(name) k##name##RootIndex,
- PRIVATE_SYMBOL_LIST(DECL)
-#undef DECL
-
-#define DECL(name, description) k##name##RootIndex,
- PUBLIC_SYMBOL_LIST(DECL)
- WELL_KNOWN_SYMBOL_LIST(DECL)
-#undef DECL
-
-#define DECL(accessor_name, AccessorName) k##AccessorName##AccessorRootIndex,
- ACCESSOR_INFO_LIST(DECL)
-#undef DECL
-
-#define DECL(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECL)
-#undef DECL
-
-#define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
- ALLOCATION_SITE_LIST(DECL)
-#undef DECL
-
-#define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
- DATA_HANDLER_LIST(DECL)
-#undef DECL
-
- kStringTableRootIndex,
-
-#define DECL(type, name, camel_name) k##camel_name##RootIndex,
- SMI_ROOT_LIST(DECL)
-#undef DECL
-
- kRootListLength,
- kStrongRootListLength = kStringTableRootIndex,
- kSmiRootsStart = kStringTableRootIndex + 1
- };
- // clang-format on
-
enum FindMementoMode { kForRuntime, kForGC };
enum HeapState {
@@ -399,11 +363,6 @@ class Heap {
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
- static const int kAbortIncrementalMarkingMask = 2;
- static const int kFinalizeIncrementalMarkingMask = 4;
-
- // Making the heap iterable requires us to abort incremental marking.
- static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
// The roots that have an index less than this are always in old space.
static const int kOldSpaceRoots = 0x20;
@@ -413,13 +372,18 @@ class Heap {
static const int kMinPromotedPercentForFastPromotionMode = 90;
- STATIC_ASSERT(kUndefinedValueRootIndex ==
+ STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) ==
Internals::kUndefinedValueRootIndex);
- STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
- STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
- STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
- STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
- STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
+ STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) ==
+ Internals::kTheHoleValueRootIndex);
+ STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) ==
+ Internals::kNullValueRootIndex);
+ STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) ==
+ Internals::kTrueValueRootIndex);
+ STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) ==
+ Internals::kFalseValueRootIndex);
+ STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) ==
+ Internals::kEmptyStringRootIndex);
// Calculates the maximum amount of filler that could be required by the
// given alignment.
@@ -430,14 +394,14 @@ class Heap {
void FatalProcessOutOfMemory(const char* location);
- V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(int root_index);
+ V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(RootIndex root_index);
// Checks whether the space is valid.
static bool IsValidAllocationSpace(AllocationSpace space);
// Generated code can embed direct references to non-writable roots if
// they are in new space.
- static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+ static bool RootCanBeWrittenAfterInitialization(RootIndex root_index);
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
@@ -566,8 +530,8 @@ class Heap {
// Traverse all the allocaions_sites [nested_site and weak_next] in the list
// and foreach call the visitor
- void ForeachAllocationSite(Object* list,
- std::function<void(AllocationSite*)> visitor);
+ void ForeachAllocationSite(
+ Object* list, const std::function<void(AllocationSite*)>& visitor);
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
@@ -688,8 +652,7 @@ class Heap {
external_memory_concurrently_freed_ = 0;
}
- void ProcessMovedExternalString(Page* old_page, Page* new_page,
- ExternalString* string);
+ size_t backing_store_bytes() const { return backing_store_bytes_; }
void CompactWeakArrayLists(PretenureFlag pretenure);
@@ -811,36 +774,29 @@ class Heap {
friend class ReadOnlyRoots;
public:
+ RootsTable& roots_table() { return roots_; }
+
// Heap root getters.
-#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
+#define ROOT_ACCESSOR(type, name, CamelName) inline type* name();
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
- inline Map* name##_map();
- DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
-#undef DATA_HANDLER_MAP_ACCESSOR
-
-#define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
- inline AccessorInfo* accessor_name##_accessor();
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
-#undef ACCESSOR_INFO_ACCESSOR
-
- Object* root(RootListIndex index) { return roots_[index]; }
- Handle<Object> root_handle(RootListIndex index) {
+ Object* root(RootIndex index) { return roots_[index]; }
+ Handle<Object> root_handle(RootIndex index) {
return Handle<Object>(&roots_[index]);
}
+
+ bool IsRootHandleLocation(Object** handle_location, RootIndex* index) const {
+ return roots_.IsRootHandleLocation(handle_location, index);
+ }
+
template <typename T>
- bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
- Object** const handle_location = bit_cast<Object**>(handle.address());
- if (handle_location >= &roots_[kRootListLength]) return false;
- if (handle_location < &roots_[0]) return false;
- *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
- return true;
+ bool IsRootHandle(Handle<T> handle, RootIndex* index) const {
+ return roots_.IsRootHandle(handle, index);
}
// Generated code can embed this address to get access to the roots.
- Object** roots_array_start() { return roots_; }
+ Object** roots_array_start() { return roots_.roots_; }
ExternalReferenceTable* external_reference_table() {
DCHECK(external_reference_table_.is_initialized());
@@ -868,23 +824,23 @@ class Heap {
void SetRootCodeStubs(SimpleNumberDictionary* value);
void SetRootMaterializedObjects(FixedArray* objects) {
- roots_[kMaterializedObjectsRootIndex] = objects;
+ roots_[RootIndex::kMaterializedObjects] = objects;
}
void SetRootScriptList(Object* value) {
- roots_[kScriptListRootIndex] = value;
+ roots_[RootIndex::kScriptList] = value;
}
void SetRootStringTable(StringTable* value) {
- roots_[kStringTableRootIndex] = value;
+ roots_[RootIndex::kStringTable] = value;
}
void SetRootNoScriptSharedFunctionInfos(Object* value) {
- roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
+ roots_[RootIndex::kNoScriptSharedFunctionInfos] = value;
}
void SetMessageListeners(TemplateList* value) {
- roots_[kMessageListenersRootIndex] = value;
+ roots_[RootIndex::kMessageListeners] = value;
}
// Set the stack limit in the roots_ array. Some architectures generate
@@ -897,20 +853,11 @@ class Heap {
void ClearStackLimits();
// Generated code can treat direct references to this root as constant.
- bool RootCanBeTreatedAsConstant(RootListIndex root_index);
-
- Map* MapForFixedTypedArray(ExternalArrayType array_type);
- Map* MapForFixedTypedArray(ElementsKind elements_kind);
- FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
+ bool RootCanBeTreatedAsConstant(RootIndex root_index);
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
- bool IsDeserializeLazyHandler(Code* code);
- void SetDeserializeLazyHandler(Code* code);
- void SetDeserializeLazyHandlerWide(Code* code);
- void SetDeserializeLazyHandlerExtraWide(Code* code);
-
void SetBuiltinsConstantsTable(FixedArray* cache);
// ===========================================================================
@@ -935,9 +882,7 @@ class Heap {
AllocationSpace space, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
- // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
- // non-zero, then the slower precise sweeper is used, which leaves the heap
- // in a state where we can iterate over the heap visiting all objects.
+ // Performs a full garbage collection.
V8_EXPORT_PRIVATE void CollectAllGarbage(
int flags, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -945,6 +890,13 @@ class Heap {
// Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
+ // Precise garbage collection that potentially finalizes already running
+ // incremental marking before performing an atomic garbage collection.
+ // Only use if absolutely necessary or in tests to avoid floating garbage!
+ void PreciseCollectAllGarbage(
+ int flags, GarbageCollectionReason gc_reason,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
// Reports and external memory pressure event, either performs a major GC or
// completes incremental marking in order to free external resources.
void ReportExternalMemoryPressure();
@@ -1002,7 +954,9 @@ class Heap {
void ClearRecordedSlot(HeapObject* object, Object** slot);
void ClearRecordedSlotRange(Address start, Address end);
- bool HasRecordedSlot(HeapObject* object, Object** slot);
+#ifdef DEBUG
+ void VerifyClearedSlot(HeapObject* object, Object** slot);
+#endif
// ===========================================================================
// Incremental marking API. ==================================================
@@ -1084,10 +1038,13 @@ class Heap {
// Embedder heap tracer support. =============================================
// ===========================================================================
- LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
+ LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
return local_embedder_heap_tracer_;
}
+
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+ EmbedderHeapTracer* GetEmbedderHeapTracer() const;
+
void TracePossibleWrapper(JSObject* js_object);
void RegisterExternallyReferencedObject(Object** object);
void SetEmbedderStackStateForNextFinalizaton(
@@ -1109,6 +1066,9 @@ class Heap {
// data and clearing the resource pointer.
inline void FinalizeExternalString(String* string);
+ static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap, Object** pointer);
+
// ===========================================================================
// Methods checking/returning the space of a given object/address. ===========
// ===========================================================================
@@ -1538,18 +1498,18 @@ class Heap {
struct StringTypeTable {
InstanceType type;
int size;
- RootListIndex index;
+ RootIndex index;
};
struct ConstantStringTable {
const char* contents;
- RootListIndex index;
+ RootIndex index;
};
struct StructTable {
InstanceType type;
int size;
- RootListIndex index;
+ RootIndex index;
};
struct GCCallbackTuple {
@@ -1584,13 +1544,8 @@ class Heap {
static const int kInitialFeedbackCapacity = 256;
- static const int kMaxScavengerTasks = 8;
-
Heap();
- static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap, Object** pointer);
-
// Selects the proper allocation space based on the pretenuring decision.
static AllocationSpace SelectSpace(PretenureFlag pretenure) {
switch (pretenure) {
@@ -1609,7 +1564,7 @@ class Heap {
return 0;
}
-#define ROOT_ACCESSOR(type, name, camel_name) \
+#define ROOT_ACCESSOR(type, name, CamelName) \
inline void set_##name(type* value);
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -1618,22 +1573,12 @@ class Heap {
void set_current_gc_flags(int flags) {
current_gc_flags_ = flags;
- DCHECK(!ShouldFinalizeIncrementalMarking() ||
- !ShouldAbortIncrementalMarking());
}
inline bool ShouldReduceMemory() const {
return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
}
- inline bool ShouldAbortIncrementalMarking() const {
- return (current_gc_flags_ & kAbortIncrementalMarkingMask) != 0;
- }
-
- inline bool ShouldFinalizeIncrementalMarking() const {
- return (current_gc_flags_ & kFinalizeIncrementalMarkingMask) != 0;
- }
-
int NumberOfScavengeTasks();
// Checks whether a global GC is necessary
@@ -1733,6 +1678,8 @@ class Heap {
void CollectGarbageOnMemoryPressure();
+ void EagerlyFreeExternalMemory();
+
bool InvokeNearHeapLimitCallback();
void ComputeFastPromotionMode();
@@ -1841,6 +1788,12 @@ class Heap {
void CheckIneffectiveMarkCompact(size_t old_generation_size,
double mutator_utilization);
+ inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
// ===========================================================================
// Growing strategy. =========================================================
// ===========================================================================
@@ -1962,28 +1915,28 @@ class Heap {
void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
// The amount of external memory registered through the API.
- int64_t external_memory_;
+ int64_t external_memory_ = 0;
// The limit when to trigger memory pressure from the API.
- int64_t external_memory_limit_;
+ int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
// Caches the amount of external memory registered at the last MC.
- int64_t external_memory_at_last_mark_compact_;
+ int64_t external_memory_at_last_mark_compact_ = 0;
// The amount of memory that has been freed concurrently.
- std::atomic<intptr_t> external_memory_concurrently_freed_;
+ std::atomic<intptr_t> external_memory_concurrently_freed_{0};
// This can be calculated directly from a pointer to the heap; however, it is
// more expedient to get at the isolate directly from within Heap methods.
- Isolate* isolate_;
+ Isolate* isolate_ = nullptr;
- Object* roots_[kRootListLength];
+ RootsTable roots_;
// This table is accessed from builtin code compiled into the snapshot, and
// thus its offset from roots_ must remain static. This is verified in
// Isolate::Init() using runtime checks.
static constexpr int kRootsExternalReferenceTableOffset =
- kRootListLength * kPointerSize;
+ static_cast<int>(RootIndex::kRootListLength) * kPointerSize;
ExternalReferenceTable external_reference_table_;
// As external references above, builtins are accessed through an offset from
@@ -2000,25 +1953,28 @@ class Heap {
static constexpr int kRootRegisterAddressableEndOffset =
kRootsBuiltinsOffset + Builtins::builtin_count * kPointerSize;
- size_t code_range_size_;
- size_t max_semi_space_size_;
- size_t initial_semispace_size_;
- size_t max_old_generation_size_;
+ size_t code_range_size_ = 0;
+ size_t max_semi_space_size_ = 8 * (kPointerSize / 4) * MB;
+ size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
+ size_t max_old_generation_size_ = 700ul * (kPointerSize / 4) * MB;
size_t initial_max_old_generation_size_;
size_t initial_old_generation_size_;
- bool old_generation_size_configured_;
- size_t maximum_committed_;
+ bool old_generation_size_configured_ = false;
+ size_t maximum_committed_ = 0;
+
+ // Backing store bytes (array buffers and external strings).
+ std::atomic<size_t> backing_store_bytes_{0};
// For keeping track of how much data has survived
// scavenge since last new space expansion.
- size_t survived_since_last_expansion_;
+ size_t survived_since_last_expansion_ = 0;
// ... and since the last scavenge.
- size_t survived_last_scavenge_;
+ size_t survived_last_scavenge_ = 0;
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).
- std::atomic<size_t> always_allocate_scope_count_;
+ std::atomic<size_t> always_allocate_scope_count_{0};
// Stores the memory pressure level that set by MemoryPressureNotification
// and reset by a mark-compact garbage collection.
@@ -2028,74 +1984,75 @@ class Heap {
near_heap_limit_callbacks_;
// For keeping track of context disposals.
- int contexts_disposed_;
+ int contexts_disposed_ = 0;
// The length of the retained_maps array at the time of context disposal.
// This separates maps in the retained_maps array that were created before
// and after context disposal.
- int number_of_disposed_maps_;
-
- NewSpace* new_space_;
- OldSpace* old_space_;
- CodeSpace* code_space_;
- MapSpace* map_space_;
- LargeObjectSpace* lo_space_;
- NewLargeObjectSpace* new_lo_space_;
- ReadOnlySpace* read_only_space_;
+ int number_of_disposed_maps_ = 0;
+
+ NewSpace* new_space_ = nullptr;
+ OldSpace* old_space_ = nullptr;
+ CodeSpace* code_space_ = nullptr;
+ MapSpace* map_space_ = nullptr;
+ LargeObjectSpace* lo_space_ = nullptr;
+ NewLargeObjectSpace* new_lo_space_ = nullptr;
+ ReadOnlySpace* read_only_space_ = nullptr;
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
// Determines whether code space is write-protected. This is essentially a
// race-free copy of the {FLAG_write_protect_code_memory} flag.
- bool write_protect_code_memory_;
+ bool write_protect_code_memory_ = false;
// Holds the number of open CodeSpaceMemoryModificationScopes.
- uintptr_t code_space_memory_modification_scope_depth_;
+ uintptr_t code_space_memory_modification_scope_depth_ = 0;
+
+ HeapState gc_state_ = NOT_IN_GC;
- HeapState gc_state_;
- int gc_post_processing_depth_;
+ int gc_post_processing_depth_ = 0;
// Returns the amount of external memory registered since last global gc.
uint64_t PromotedExternalMemorySize();
// How many "runtime allocations" happened.
- uint32_t allocations_count_;
+ uint32_t allocations_count_ = 0;
// Running hash over allocations performed.
- uint32_t raw_allocations_hash_;
+ uint32_t raw_allocations_hash_ = 0;
// Starts marking when stress_marking_percentage_% of the marking start limit
// is reached.
- int stress_marking_percentage_;
+ int stress_marking_percentage_ = 0;
// Observer that causes more frequent checks for reached incremental marking
// limit.
- AllocationObserver* stress_marking_observer_;
+ AllocationObserver* stress_marking_observer_ = nullptr;
// Observer that can cause early scavenge start.
- StressScavengeObserver* stress_scavenge_observer_;
+ StressScavengeObserver* stress_scavenge_observer_ = nullptr;
- bool allocation_step_in_progress_;
+ bool allocation_step_in_progress_ = false;
// The maximum percent of the marking limit reached wihout causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
- double max_marking_limit_reached_;
+ double max_marking_limit_reached_ = 0.0;
// How many mark-sweep collections happened.
- unsigned int ms_count_;
+ unsigned int ms_count_ = 0;
// How many gc happened.
- unsigned int gc_count_;
+ unsigned int gc_count_ = 0;
// The number of Mark-Compact garbage collections that are considered as
// ineffective. See IsIneffectiveMarkCompact() predicate.
- int consecutive_ineffective_mark_compacts_;
+ int consecutive_ineffective_mark_compacts_ = 0;
static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
- uintptr_t mmap_region_base_;
+ uintptr_t mmap_region_base_ = 0;
// For post mortem debugging.
- int remembered_unmapped_pages_index_;
+ int remembered_unmapped_pages_index_ = 0;
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
// Limit that triggers a global GC on the next (normally caused) GC. This
@@ -2106,7 +2063,7 @@ class Heap {
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
- bool inline_allocation_disabled_;
+ bool inline_allocation_disabled_ = false;
// Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start.
@@ -2120,71 +2077,62 @@ class Heap {
int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
- GCTracer* tracer_;
-
- size_t promoted_objects_size_;
- double promotion_ratio_;
- double promotion_rate_;
- size_t semi_space_copied_object_size_;
- size_t previous_semi_space_copied_object_size_;
- double semi_space_copied_rate_;
- int nodes_died_in_new_space_;
- int nodes_copied_in_new_space_;
- int nodes_promoted_;
+ size_t promoted_objects_size_ = 0;
+ double promotion_ratio_ = 0.0;
+ double promotion_rate_ = 0.0;
+ size_t semi_space_copied_object_size_ = 0;
+ size_t previous_semi_space_copied_object_size_ = 0;
+ double semi_space_copied_rate_ = 0.0;
+ int nodes_died_in_new_space_ = 0;
+ int nodes_copied_in_new_space_ = 0;
+ int nodes_promoted_ = 0;
// This is the pretenuring trigger for allocation sites that are in maybe
// tenure state. When we switched to the maximum new space size we deoptimize
// the code that belongs to the allocation site and derive the lifetime
// of the allocation site.
- unsigned int maximum_size_scavenges_;
+ unsigned int maximum_size_scavenges_ = 0;
// Total time spent in GC.
double total_gc_time_ms_;
// Last time an idle notification happened.
- double last_idle_notification_time_;
+ double last_idle_notification_time_ = 0.0;
// Last time a garbage collection happened.
- double last_gc_time_;
-
- MarkCompactCollector* mark_compact_collector_;
- MinorMarkCompactCollector* minor_mark_compact_collector_;
-
- ArrayBufferCollector* array_buffer_collector_;
-
- MemoryAllocator* memory_allocator_;
-
- StoreBuffer* store_buffer_;
-
- HeapController* heap_controller_;
-
- IncrementalMarking* incremental_marking_;
- ConcurrentMarking* concurrent_marking_;
-
- GCIdleTimeHandler* gc_idle_time_handler_;
-
- MemoryReducer* memory_reducer_;
-
- ObjectStats* live_object_stats_;
- ObjectStats* dead_object_stats_;
-
- ScavengeJob* scavenge_job_;
- base::Semaphore parallel_scavenge_semaphore_;
-
- AllocationObserver* idle_scavenge_observer_;
+ double last_gc_time_ = 0.0;
+
+ GCTracer* tracer_ = nullptr;
+ MarkCompactCollector* mark_compact_collector_ = nullptr;
+ MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
+ ScavengerCollector* scavenger_collector_ = nullptr;
+ ArrayBufferCollector* array_buffer_collector_ = nullptr;
+ MemoryAllocator* memory_allocator_ = nullptr;
+ StoreBuffer* store_buffer_ = nullptr;
+ HeapController* heap_controller_ = nullptr;
+ IncrementalMarking* incremental_marking_ = nullptr;
+ ConcurrentMarking* concurrent_marking_ = nullptr;
+ GCIdleTimeHandler* gc_idle_time_handler_ = nullptr;
+ MemoryReducer* memory_reducer_ = nullptr;
+ ObjectStats* live_object_stats_ = nullptr;
+ ObjectStats* dead_object_stats_ = nullptr;
+ ScavengeJob* scavenge_job_ = nullptr;
+ AllocationObserver* idle_scavenge_observer_ = nullptr;
+ LocalEmbedderHeapTracer* local_embedder_heap_tracer_ = nullptr;
+ StrongRootsList* strong_roots_list_ = nullptr;
// This counter is increased before each GC and never reset.
// To account for the bytes allocated since the last GC, use the
// NewSpaceAllocationCounter() function.
- size_t new_space_allocation_counter_;
+ size_t new_space_allocation_counter_ = 0;
// This counter is increased before each GC and never reset. To
// account for the bytes allocated since the last GC, use the
// OldGenerationAllocationCounter() function.
- size_t old_generation_allocation_counter_at_last_gc_;
+ size_t old_generation_allocation_counter_at_last_gc_ = 0;
// The size of objects in old generation after the last MarkCompact GC.
- size_t old_generation_size_at_last_gc_;
+ size_t old_generation_size_at_last_gc_ = 0;
// The feedback storage is used to store allocation sites (keys) and how often
// they have been visited (values) by finding a memento behind an object. The
@@ -2196,20 +2144,20 @@ class Heap {
char trace_ring_buffer_[kTraceRingBufferSize];
// Used as boolean.
- uint8_t is_marking_flag_;
+ uint8_t is_marking_flag_ = 0;
// If it's not full then the data is from 0 to ring_buffer_end_. If it's
// full then the data is from ring_buffer_end_ to the end of the buffer and
// from 0 to ring_buffer_end_.
- bool ring_buffer_full_;
- size_t ring_buffer_end_;
+ bool ring_buffer_full_ = false;
+ size_t ring_buffer_end_ = 0;
// Flag is set when the heap has been configured. The heap can be repeatedly
// configured through the API until it is set up.
- bool configured_;
+ bool configured_ = false;
// Currently set GC flags that are respected by all GC components.
- int current_gc_flags_;
+ int current_gc_flags_ = Heap::kNoGCFlags;
// Currently set GC callback flags that are used to pass information between
// the embedder and V8's GC.
@@ -2219,34 +2167,30 @@ class Heap {
base::Mutex relocation_mutex_;
- int gc_callbacks_depth_;
-
- bool deserialization_complete_;
+ int gc_callbacks_depth_ = 0;
- StrongRootsList* strong_roots_list_;
+ bool deserialization_complete_ = false;
// The depth of HeapIterator nestings.
- int heap_iterator_depth_;
+ int heap_iterator_depth_ = 0;
- LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
-
- bool fast_promotion_mode_;
+ bool fast_promotion_mode_ = false;
// Used for testing purposes.
- bool force_oom_;
- bool delay_sweeper_tasks_for_testing_;
+ bool force_oom_ = false;
+ bool delay_sweeper_tasks_for_testing_ = false;
- HeapObject* pending_layout_change_object_;
+ HeapObject* pending_layout_change_object_ = nullptr;
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
- bool unprotected_memory_chunks_registry_enabled_;
+ bool unprotected_memory_chunks_registry_enabled_ = false;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
- int allocation_timeout_;
+ int allocation_timeout_ = 0;
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
std::map<HeapObject*, HeapObject*> retainer_;
@@ -2262,6 +2206,7 @@ class Heap {
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
+ friend class ArrayBufferCollector;
friend class ConcurrentMarking;
friend class EphemeronHashTableMarkingTask;
friend class GCCallbacksScope;
@@ -2283,6 +2228,8 @@ class Heap {
friend class Page;
friend class PagedSpace;
friend class Scavenger;
+ friend class ScavengerCollector;
+ friend class Space;
friend class StoreBuffer;
friend class Sweeper;
friend class heap::TestMemoryAllocatorScope;
@@ -2297,7 +2244,8 @@ class Heap {
friend class heap::HeapTester;
FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
-
+ FRIEND_TEST(HeapTest, ExternalLimitDefault);
+ FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2417,7 +2365,7 @@ class VerifySmisVisitor : public RootVisitor {
// Space iterator for iterating over all the paged spaces of the heap: Map
// space, old space, code space and optionally read only space. Returns each
// space in turn, and null when it is done.
-class V8_EXPORT_PRIVATE PagedSpaces BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE PagedSpaces {
public:
enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
@@ -2460,7 +2408,7 @@ class SpaceIterator : public Malloced {
// nodes filtering uses GC marks, it can't be used during MS/MC GC
// phases. Also, it is forbidden to interrupt iteration in this mode,
// as this will leave heap objects marked (and thus, unusable).
-class HeapIterator BASE_EMBEDDED {
+class HeapIterator {
public:
enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
@@ -2487,7 +2435,7 @@ class HeapIterator BASE_EMBEDDED {
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
public:
- virtual ~WeakObjectRetainer() {}
+ virtual ~WeakObjectRetainer() = default;
// Return whether this object should be retained. If nullptr is returned the
// object has no references. Otherwise the address of the retained object
@@ -2503,7 +2451,7 @@ class AllocationObserver {
: step_size_(step_size), bytes_to_next_step_(step_size) {
DCHECK_LE(kPointerSize, step_size);
}
- virtual ~AllocationObserver() {}
+ virtual ~AllocationObserver() = default;
// Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 19d6b22e4d..e19d62f4d4 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -6,6 +6,8 @@
#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
#include "src/heap/incremental-marking.h"
+
+#include "src/heap/mark-compact-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/maybe-object.h"
@@ -13,6 +15,23 @@
namespace v8 {
namespace internal {
+void IncrementalMarking::TransferColor(HeapObject* from, HeapObject* to) {
+ if (atomic_marking_state()->IsBlack(to)) {
+ DCHECK(black_allocation());
+ return;
+ }
+
+ DCHECK(atomic_marking_state()->IsWhite(to));
+ if (atomic_marking_state()->IsGrey(from)) {
+ bool success = atomic_marking_state()->WhiteToGrey(to);
+ DCHECK(success);
+ USE(success);
+ } else if (atomic_marking_state()->IsBlack(from)) {
+ bool success = atomic_marking_state()->WhiteToBlack(to);
+ DCHECK(success);
+ USE(success);
+ }
+}
void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
Object* value) {
@@ -30,7 +49,7 @@ void IncrementalMarking::RecordMaybeWeakWrite(HeapObject* obj,
// When writing a weak reference, treat it as strong for the purposes of the
// marking barrier.
HeapObject* heap_object;
- if (IsMarking() && value->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (IsMarking() && value->GetHeapObject(&heap_object)) {
RecordWriteSlow(obj, reinterpret_cast<HeapObjectReference**>(slot),
heap_object);
}
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 7583aaaadf..96eff0508e 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -24,8 +24,9 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
if (!task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
- auto task = new Task(heap->isolate(), this);
- V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
+ auto taskrunner =
+ V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+ taskrunner->PostTask(base::make_unique<Task>(heap->isolate(), this));
}
}
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index a58d25fff4..239f416eaf 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -458,13 +458,6 @@ void IncrementalMarking::FinishBlackAllocation() {
}
}
-void IncrementalMarking::AbortBlackAllocation() {
- if (FLAG_trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Black allocation aborted\n");
- }
-}
-
void IncrementalMarking::MarkRoots() {
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -494,7 +487,6 @@ void IncrementalMarking::RetainMaps() {
// - memory pressure (reduce_memory_footprint_),
// - GC is requested by tests or dev-tools (abort_incremental_marking_).
bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
- heap()->ShouldAbortIncrementalMarking() ||
FLAG_retain_maps_for_n_gc == 0;
WeakArrayList* retained_maps = heap()->retained_maps();
int length = retained_maps->length();
@@ -505,10 +497,10 @@ void IncrementalMarking::RetainMaps() {
for (int i = 0; i < length; i += 2) {
MaybeObject* value = retained_maps->Get(i);
HeapObject* map_heap_object;
- if (!value->ToWeakHeapObject(&map_heap_object)) {
+ if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
continue;
}
- int age = Smi::ToInt(retained_maps->Get(i + 1)->ToSmi());
+ int age = Smi::ToInt(retained_maps->Get(i + 1)->cast<Smi>());
int new_age;
Map* map = Map::cast(map_heap_object);
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
@@ -801,6 +793,32 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
return bytes_processed;
}
+void IncrementalMarking::EmbedderStep(double duration_ms) {
+ constexpr int kObjectsToProcessBeforeInterrupt = 100;
+
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+
+ const double deadline =
+ heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
+
+ HeapObject* object;
+ int cnt = 0;
+ while (marking_worklist()->embedder()->Pop(0, &object)) {
+ heap_->TracePossibleWrapper(JSObject::cast(object));
+ if (++cnt == kObjectsToProcessBeforeInterrupt) {
+ cnt = 0;
+ if (heap_->MonotonicallyIncreasingTimeInMs() > deadline) {
+ break;
+ }
+ }
+ }
+
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ if (!heap_->local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking()) {
+ heap_->local_embedder_heap_tracer()->Trace(deadline);
+ }
+}
void IncrementalMarking::Hurry() {
// A scavenge may have pushed new objects on the marking deque (due to black
@@ -930,14 +948,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
heap_->local_embedder_heap_tracer()->InUse();
do {
if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
- const double wrapper_deadline =
- heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
- if (!heap_->local_embedder_heap_tracer()
- ->ShouldFinalizeIncrementalMarking()) {
- heap_->local_embedder_heap_tracer()->Trace(wrapper_deadline);
- }
+ EmbedderStep(kStepSizeInMs);
} else {
Step(step_size_in_bytes, completion_action, step_origin);
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 0fb5e11651..ee774c230f 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -100,23 +100,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void NotifyLeftTrimming(HeapObject* from, HeapObject* to);
- V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) {
- if (atomic_marking_state()->IsBlack(to)) {
- DCHECK(black_allocation());
- return;
- }
-
- DCHECK(atomic_marking_state()->IsWhite(to));
- if (atomic_marking_state()->IsGrey(from)) {
- bool success = atomic_marking_state()->WhiteToGrey(to);
- DCHECK(success);
- USE(success);
- } else if (atomic_marking_state()->IsBlack(from)) {
- bool success = atomic_marking_state()->WhiteToBlack(to);
- DCHECK(success);
- USE(success);
- }
- }
+ V8_INLINE void TransferColor(HeapObject* from, HeapObject* to);
State state() const {
DCHECK(state_ == STOPPED || FLAG_incremental_marking);
@@ -193,6 +177,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin,
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
+ void EmbedderStep(double duration);
inline void RestartIfNotMarking();
@@ -248,8 +233,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
}
}
- void AbortBlackAllocation();
-
MarkCompactCollector::MarkingWorklist* marking_worklist() const {
return marking_worklist_;
}
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index e909ef69d7..b536ccc5d4 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -58,7 +58,7 @@ ItemParallelJob::~ItemParallelJob() {
}
}
-void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
+void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
DCHECK_GT(tasks_.size(), 0);
const size_t num_items = items_.size();
const size_t num_tasks = tasks_.size();
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 1ad9d22fa4..15351d5d84 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
explicit Task(Isolate* isolate);
- virtual ~Task();
+ ~Task() override;
virtual void RunInParallel() = 0;
@@ -137,7 +137,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
// Runs this job. Reporting metrics in a thread-safe manner to
// |async_counters|.
- void Run(std::shared_ptr<Counters> async_counters);
+ void Run(const std::shared_ptr<Counters>& async_counters);
private:
std::vector<Item*> items_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 466a89080b..449ca43e50 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -14,6 +14,28 @@
namespace v8 {
namespace internal {
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(
+ HeapObject* obj) {
+ MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
+ MarkBit markbit = MarkBitFrom(p, obj->address());
+ if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
+ static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj->Size());
+ return true;
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(
+ HeapObject* obj) {
+ return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::WhiteToBlack(
+ HeapObject* obj) {
+ return WhiteToGrey(obj) && GreyToBlack(obj);
+}
+
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
MarkingVisitor<fixed_array_mode, retaining_path_mode,
@@ -26,16 +48,6 @@ MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitAllocationSite(Map* map,
- AllocationSite* object) {
- int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
- AllocationSite::BodyDescriptorWeak::IterateBody(map, object, size, this);
- return size;
-}
-
-template <FixedArrayVisitationMode fixed_array_mode,
- TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitBytecodeArray(Map* map,
BytecodeArray* array) {
int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
@@ -46,15 +58,6 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
- VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
- int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
- CodeDataContainer::BodyDescriptorWeak::IterateBody(map, object, size, this);
- return size;
-}
-
-template <FixedArrayVisitationMode fixed_array_mode,
- TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitFixedArray(Map* map,
FixedArray* object) {
@@ -65,25 +68,48 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSApiObject(Map* map, JSObject* object) {
+template <typename T>
+V8_INLINE int
+MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitEmbedderTracingSubclass(Map* map,
+ T* object) {
if (heap_->local_embedder_heap_tracer()->InUse()) {
- DCHECK(object->IsJSObject());
heap_->TracePossibleWrapper(object);
}
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- JSObject::BodyDescriptor::IterateBody(map, object, size, this);
+ int size = T::BodyDescriptor::SizeOf(map, object);
+ T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSFunction(Map* map,
- JSFunction* object) {
- int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- JSFunction::BodyDescriptorWeak::IterateBody(map, object, size, this);
- return size;
+ MarkingState>::VisitJSApiObject(Map* map, JSObject* object) {
+ return VisitEmbedderTracingSubclass(map, object);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSArrayBuffer(Map* map,
+ JSArrayBuffer* object) {
+ return VisitEmbedderTracingSubclass(map, object);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSDataView(Map* map,
+ JSDataView* object) {
+ return VisitEmbedderTracingSubclass(map, object);
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSTypedArray(Map* map,
+ JSTypedArray* object) {
+ return VisitEmbedderTracingSubclass(map, object);
}
template <FixedArrayVisitationMode fixed_array_mode,
@@ -141,16 +167,6 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitNativeContext(Map* map,
- Context* context) {
- int size = Context::BodyDescriptorWeak::SizeOf(map, context);
- Context::BodyDescriptorWeak::IterateBody(map, context, size, this);
- return size;
-}
-
-template <FixedArrayVisitationMode fixed_array_mode,
- TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitTransitionArray(Map* map,
TransitionArray* array) {
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
@@ -175,11 +191,11 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointer(HeapObject* host,
MaybeObject** p) {
HeapObject* target_object;
- if ((*p)->ToStrongHeapObject(&target_object)) {
+ if ((*p)->GetHeapObjectIfStrong(&target_object)) {
collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
target_object);
MarkObject(host, target_object);
- } else if ((*p)->ToWeakHeapObject(&target_object)) {
+ } else if ((*p)->GetHeapObjectIfWeak(&target_object)) {
if (marking_state()->IsBlackOrGrey(target_object)) {
// Weak references with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index dea105943a..6f46bc57bf 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -17,7 +17,7 @@
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
-#include "src/heap/incremental-marking.h"
+#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/local-allocator-inl.h"
@@ -193,7 +193,7 @@ class FullMarkingVerifier : public MarkingVerifier {
void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
- if ((*current)->ToStrongHeapObject(&object)) {
+ if ((*current)->GetHeapObjectIfStrong(&object)) {
CHECK(marking_state_->IsBlackOrGrey(object));
}
}
@@ -309,7 +309,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
- if ((*current)->ToStrongHeapObject(&object)) {
+ if ((*current)->GetHeapObjectIfStrong(&object)) {
if (Heap::InNewSpace(object)) {
CHECK(Heap::InToSpace(object));
}
@@ -514,27 +514,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
#endif // VERIFY_HEAP
-void MarkCompactCollector::ClearMarkbitsInPagedSpace(PagedSpace* space) {
- for (Page* p : *space) {
- non_atomic_marking_state()->ClearLiveness(p);
- }
-}
-
-void MarkCompactCollector::ClearMarkbitsInNewSpace(NewSpace* space) {
- for (Page* p : *space) {
- non_atomic_marking_state()->ClearLiveness(p);
- }
-}
-
-
-void MarkCompactCollector::ClearMarkbits() {
- ClearMarkbitsInPagedSpace(heap_->code_space());
- ClearMarkbitsInPagedSpace(heap_->map_space());
- ClearMarkbitsInPagedSpace(heap_->old_space());
- ClearMarkbitsInNewSpace(heap_->new_space());
- heap_->lo_space()->ClearMarkingStateOfLiveObjects();
-}
-
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper()->sweeping_in_progress()) return;
@@ -773,20 +752,6 @@ void MarkCompactCollector::Prepare() {
heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
- // Clear marking bits if incremental marking is aborted.
- if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
- heap()->incremental_marking()->Stop();
- heap()->incremental_marking()->AbortBlackAllocation();
- FinishConcurrentMarking(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
- heap()->incremental_marking()->Deactivate();
- ClearMarkbits();
- AbortWeakObjects();
- AbortCompaction();
- heap_->local_embedder_heap_tracer()->AbortTracing();
- marking_worklist()->Clear();
- was_marked_incrementally_ = false;
- }
-
if (!was_marked_incrementally_) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue();
@@ -1024,7 +989,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: marking_state_(marking_state) {}
- virtual Object* RetainAs(Object* object) {
+ Object* RetainAs(Object* object) override {
HeapObject* heap_object = HeapObject::cast(object);
DCHECK(!marking_state_->IsGrey(heap_object));
if (marking_state_->IsBlack(heap_object)) {
@@ -1112,7 +1077,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
protected:
inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
Address slot) {
- if (value->IsStrongOrWeakHeapObject()) {
+ if (value->IsStrongOrWeak()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
@@ -1133,7 +1098,7 @@ class MigrationObserver {
public:
explicit MigrationObserver(Heap* heap) : heap_(heap) {}
- virtual ~MigrationObserver() {}
+ virtual ~MigrationObserver() = default;
virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
int size) = 0;
@@ -1157,7 +1122,7 @@ class ProfilingMigrationObserver final : public MigrationObserver {
class HeapObjectVisitor {
public:
- virtual ~HeapObjectVisitor() {}
+ virtual ~HeapObjectVisitor() = default;
virtual bool Visit(HeapObject* object, int size) = 0;
};
@@ -1387,7 +1352,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
}
}
- inline bool Visit(HeapObject* object, int size) {
+ inline bool Visit(HeapObject* object, int size) override {
if (mode == NEW_TO_NEW) {
heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_);
@@ -1429,7 +1394,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
- inline bool Visit(HeapObject* object, int size) {
+ inline bool Visit(HeapObject* object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
object->IterateBodyFast(&visitor);
return true;
@@ -1633,6 +1598,10 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+ HeapObject* object;
+ while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
+ heap_->TracePossibleWrapper(JSObject::cast(object));
+ }
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
std::numeric_limits<double>::infinity());
@@ -1788,10 +1757,14 @@ void MarkCompactCollector::MarkLiveObjects() {
// through ephemerons.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
- while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone()) {
+ do {
+ // PerformWrapperTracing() also empties the work items collected by
+ // concurrent markers. As a result this call needs to happen at least
+ // once.
PerformWrapperTracing();
ProcessMarkingWorklist();
- }
+ } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone());
+ DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1838,6 +1811,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
}
+ DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -2092,7 +2066,7 @@ void MarkCompactCollector::ClearWeakReferences() {
while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
HeapObject* value;
HeapObjectReference** location = slot.second;
- if ((*location)->ToWeakHeapObject(&value)) {
+ if ((*location)->GetHeapObjectIfWeak(&value)) {
DCHECK(!value->IsCell());
if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
// The value of the weak reference is alive.
@@ -2174,9 +2148,9 @@ template <AccessMode access_mode>
static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot);
HeapObject* heap_obj;
- if (obj->ToWeakHeapObject(&heap_obj)) {
+ if (obj->GetHeapObjectIfWeak(&heap_obj)) {
UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
- } else if (obj->ToStrongHeapObject(&heap_obj)) {
+ } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
return UpdateSlot<access_mode>(slot, obj, heap_obj,
HeapObjectReferenceType::STRONG);
}
@@ -2185,7 +2159,7 @@ static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
template <AccessMode access_mode>
static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
- DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrongHeapObject());
+ DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrong());
Object** slot = reinterpret_cast<Object**>(maybe_slot);
Object* obj = base::AsAtomicPointer::Relaxed_Load(slot);
if (obj->IsHeapObject()) {
@@ -2248,8 +2222,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
private:
static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
MaybeObject** slot) {
- DCHECK(!(*slot)->IsWeakHeapObject());
- DCHECK(!(*slot)->IsClearedWeakHeapObject());
+ DCHECK(!(*slot)->IsWeakOrCleared());
return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot));
}
@@ -2274,9 +2247,11 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
String* new_string = String::cast(map_word.ToForwardingAddress());
if (new_string->IsExternalString()) {
- heap->ProcessMovedExternalString(
+ MemoryChunk::MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString,
Page::FromAddress(reinterpret_cast<Address>(*p)),
- Page::FromHeapObject(new_string), ExternalString::cast(new_string));
+ Page::FromHeapObject(new_string),
+ ExternalString::cast(new_string)->ExternalPayloadSize());
}
return new_string;
}
@@ -2364,7 +2339,7 @@ class Evacuator : public Malloced {
duration_(0.0),
bytes_compacted_(0) {}
- virtual ~Evacuator() {}
+ virtual ~Evacuator() = default;
void EvacuatePage(Page* page);
@@ -2520,7 +2495,7 @@ void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
class PageEvacuationItem : public ItemParallelJob::Item {
public:
explicit PageEvacuationItem(Page* page) : page_(page) {}
- virtual ~PageEvacuationItem() {}
+ ~PageEvacuationItem() override = default;
Page* page() const { return page_; }
private:
@@ -2559,11 +2534,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
- const bool profiling =
- heap()->isolate()->is_profiling() ||
- heap()->isolate()->logger()->is_listening_to_code_events() ||
- heap()->isolate()->heap_profiler()->is_tracking_object_moves() ||
- heap()->has_heap_object_allocation_tracker();
+ const bool profiling = isolate()->LogObjectRelocation();
ProfilingMigrationObserver profiling_observer(heap());
const int wanted_num_tasks =
@@ -2642,7 +2613,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
- virtual Object* RetainAs(Object* object) {
+ Object* RetainAs(Object* object) override {
if (object->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(object);
MapWord map_word = heap_object->map_word();
@@ -2817,7 +2788,7 @@ void MarkCompactCollector::Evacuate() {
class UpdatingItem : public ItemParallelJob::Item {
public:
- virtual ~UpdatingItem() {}
+ ~UpdatingItem() override = default;
virtual void Process() = 0;
};
@@ -2852,7 +2823,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
start_(start),
end_(end),
marking_state_(marking_state) {}
- virtual ~ToSpaceUpdatingItem() {}
+ ~ToSpaceUpdatingItem() override = default;
void Process() override {
if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
@@ -2906,7 +2877,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
marking_state_(marking_state),
chunk_(chunk),
updating_mode_(updating_mode) {}
- virtual ~RememberedSetUpdatingItem() {}
+ ~RememberedSetUpdatingItem() override = default;
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
@@ -2921,7 +2892,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
HeapObject* heap_object;
- if (!(*slot)->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (!(*slot)->GetHeapObject(&heap_object)) {
return REMOVE_SLOT;
}
if (Heap::InFromSpace(heap_object)) {
@@ -2931,7 +2902,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
reinterpret_cast<HeapObjectReference**>(slot),
map_word.ToForwardingAddress());
}
- bool success = (*slot)->ToStrongOrWeakHeapObject(&heap_object);
+ bool success = (*slot)->GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
// If the object was in from space before and is after executing the
@@ -3054,7 +3025,7 @@ class GlobalHandlesUpdatingItem : public UpdatingItem {
global_handles_(global_handles),
start_(start),
end_(end) {}
- virtual ~GlobalHandlesUpdatingItem() {}
+ ~GlobalHandlesUpdatingItem() override = default;
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
@@ -3081,7 +3052,7 @@ class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
: page_(page), state_(state) {}
- virtual ~ArrayBufferTrackerUpdatingItem() {}
+ ~ArrayBufferTrackerUpdatingItem() override = default;
void Process() override {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
@@ -3255,7 +3226,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run(isolate()->async_counters());
- heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
+ heap()->array_buffer_collector()->FreeAllocations();
}
}
@@ -3426,8 +3397,9 @@ void MarkCompactCollector::MarkingWorklist::PrintWorklist(
count[obj->map()->instance_type()]++;
});
std::vector<std::pair<int, InstanceType>> rank;
- for (auto i : count) {
- rank.push_back(std::make_pair(i.second, i.first));
+ rank.reserve(count.size());
+ for (const auto& i : count) {
+ rank.emplace_back(i.second, i.first);
}
std::map<InstanceType, std::string> instance_type_name;
#define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
@@ -3486,7 +3458,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
// Minor MC treats weak references as strong.
- if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if ((*current)->GetHeapObject(&object)) {
if (!Heap::InNewSpace(object)) {
continue;
}
@@ -3524,7 +3496,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
- if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if ((*current)->GetHeapObject(&object)) {
CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
}
}
@@ -3593,7 +3565,7 @@ class YoungGenerationMarkingVisitor final
HeapObject* target_object;
// Treat weak references as strong. TODO(marja): Proper weakness handling
// for minor-mcs.
- if (target->ToStrongOrWeakHeapObject(&target_object)) {
+ if (target->GetHeapObject(&target_object)) {
MarkObjectViaMarkingWorklist(target_object);
}
}
@@ -3697,7 +3669,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
inline void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
Address slot) final {
- if (value->IsStrongOrWeakHeapObject()) {
+ if (value->IsStrongOrWeak()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
@@ -3758,7 +3730,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run(isolate()->async_counters());
- heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
+ heap()->array_buffer_collector()->FreeAllocations();
}
{
@@ -3949,7 +3921,7 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MinorMarkCompactCollector* collector)
: marking_state_(collector->non_atomic_marking_state()) {}
- virtual Object* RetainAs(Object* object) {
+ Object* RetainAs(Object* object) override {
HeapObject* heap_object = HeapObject::cast(object);
if (!Heap::InNewSpace(heap_object)) return object;
@@ -4024,7 +3996,7 @@ class YoungGenerationMarkingTask;
class MarkingItem : public ItemParallelJob::Item {
public:
- virtual ~MarkingItem() {}
+ ~MarkingItem() override = default;
virtual void Process(YoungGenerationMarkingTask* task) = 0;
};
@@ -4112,7 +4084,7 @@ class PageMarkingItem : public MarkingItem {
public:
explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
: chunk_(chunk), global_slots_(global_slots), slots_(0) {}
- virtual ~PageMarkingItem() { *global_slots_ = *global_slots_ + slots_; }
+ ~PageMarkingItem() override { *global_slots_ = *global_slots_ + slots_; }
void Process(YoungGenerationMarkingTask* task) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
@@ -4152,7 +4124,7 @@ class PageMarkingItem : public MarkingItem {
// has to be in ToSpace.
DCHECK(Heap::InToSpace(object));
HeapObject* heap_object;
- bool success = object->ToStrongOrWeakHeapObject(&heap_object);
+ bool success = object->GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
@@ -4172,7 +4144,7 @@ class GlobalHandlesMarkingItem : public MarkingItem {
GlobalHandlesMarkingItem(Heap* heap, GlobalHandles* global_handles,
size_t start, size_t end)
: global_handles_(global_handles), start_(start), end_(end) {}
- virtual ~GlobalHandlesMarkingItem() {}
+ ~GlobalHandlesMarkingItem() override = default;
void Process(YoungGenerationMarkingTask* task) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index d62c964336..c4ab5b2b9c 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -63,21 +63,9 @@ class MarkingStateBase {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool WhiteToGrey(HeapObject* obj) {
- return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
- }
-
- V8_INLINE bool WhiteToBlack(HeapObject* obj) {
- return WhiteToGrey(obj) && GreyToBlack(obj);
- }
-
- V8_INLINE bool GreyToBlack(HeapObject* obj) {
- MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
- MarkBit markbit = MarkBitFrom(p, obj->address());
- if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj->Size());
- return true;
- }
+ V8_INLINE bool WhiteToGrey(HeapObject* obj);
+ V8_INLINE bool WhiteToBlack(HeapObject* obj);
+ V8_INLINE bool GreyToBlack(HeapObject* obj);
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
@@ -250,7 +238,7 @@ enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
public:
- virtual ~MarkCompactCollectorBase() {}
+ virtual ~MarkCompactCollectorBase() = default;
virtual void SetUp() = 0;
virtual void TearDown() = 0;
@@ -464,11 +452,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#else
using MarkingState = MajorNonAtomicMarkingState;
#endif // V8_CONCURRENT_MARKING
+
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
+
// Wrapper for the shared and bailout worklists.
class MarkingWorklist {
public:
using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
+ using EmbedderTracingWorklist = Worklist<HeapObject*, 16>;
// The heap parameter is not used but needed to match the sequential case.
explicit MarkingWorklist(Heap* heap) {}
@@ -500,8 +491,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
HeapObject* PopBailout() {
- HeapObject* result;
#ifdef V8_CONCURRENT_MARKING
+ HeapObject* result;
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
return nullptr;
@@ -511,6 +502,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bailout_.Clear();
shared_.Clear();
on_hold_.Clear();
+ embedder_.Clear();
}
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
@@ -523,6 +515,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
on_hold_.IsGlobalPoolEmpty();
}
+ bool IsEmbedderEmpty() {
+ return embedder_.IsLocalEmpty(kMainThread) &&
+ embedder_.IsGlobalPoolEmpty();
+ }
+
int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread) +
@@ -538,11 +535,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bailout_.Update(callback);
shared_.Update(callback);
on_hold_.Update(callback);
+ embedder_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
+ EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() {
PrintWorklist("shared", &shared_);
@@ -568,6 +567,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// for new space. This allow the compiler to remove write barriers
// for freshly allocatd objects.
ConcurrentMarkingWorklist on_hold_;
+
+ // Worklist for objects that potentially require embedder tracing, i.e.,
+ // these objects need to be handed over to the embedder to find the full
+ // transitive closure.
+ EmbedderTracingWorklist embedder_;
};
class RootMarkingVisitor;
@@ -626,8 +630,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void UpdateSlots(SlotsBuffer* buffer);
void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
- void ClearMarkbits();
-
bool is_compacting() const { return compacting_; }
// Ensures that sweeping is finished.
@@ -703,7 +705,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
private:
explicit MarkCompactCollector(Heap* heap);
- ~MarkCompactCollector();
+ ~MarkCompactCollector() override;
bool WillBeDeoptimized(Code* code);
@@ -835,9 +837,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void PostProcessEvacuationCandidates();
void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
- void ClearMarkbitsInPagedSpace(PagedSpace* space);
- void ClearMarkbitsInNewSpace(NewSpace* space);
-
static const int kEphemeronChunkSize = 8 * KB;
int NumberOfParallelEphemeronVisitingTasks(size_t elements);
@@ -910,15 +909,14 @@ class MarkingVisitor final
V8_INLINE bool ShouldVisitMapPointer() { return false; }
- V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
- V8_INLINE int VisitCodeDataContainer(Map* map, CodeDataContainer* object);
V8_INLINE int VisitEphemeronHashTable(Map* map, EphemeronHashTable* object);
V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
- V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
+ V8_INLINE int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object);
+ V8_INLINE int VisitJSDataView(Map* map, JSDataView* object);
+ V8_INLINE int VisitJSTypedArray(Map* map, JSTypedArray* object);
V8_INLINE int VisitMap(Map* map, Map* object);
- V8_INLINE int VisitNativeContext(Map* map, Context* object);
V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
// ObjectVisitor implementation.
@@ -931,6 +929,11 @@ class MarkingVisitor final
V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
+ // Weak list pointers should be ignored during marking. The lists are
+ // reconstructed after GC.
+ void VisitCustomWeakPointers(HeapObject* host, Object** start,
+ Object** end) final {}
+
private:
// Granularity in which FixedArrays are scanned if |fixed_array_mode|
// is true.
@@ -938,6 +941,9 @@ class MarkingVisitor final
V8_INLINE int VisitFixedArrayIncremental(Map* map, FixedArray* object);
+ template <typename T>
+ V8_INLINE int VisitEmbedderTracingSubclass(Map* map, T* object);
+
V8_INLINE void MarkMapContents(Map* map);
// Marks the object black without pushing it on the marking work list. Returns
@@ -980,7 +986,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
using NonAtomicMarkingState = MinorNonAtomicMarkingState;
explicit MinorMarkCompactCollector(Heap* heap);
- ~MinorMarkCompactCollector();
+ ~MinorMarkCompactCollector() override;
MarkingState* marking_state() { return &marking_state_; }
diff --git a/deps/v8/src/heap/marking.cc b/deps/v8/src/heap/marking.cc
index 23fbdd3465..93b5c06a45 100644
--- a/deps/v8/src/heap/marking.cc
+++ b/deps/v8/src/heap/marking.cc
@@ -28,6 +28,9 @@ void Bitmap::MarkAllBits() {
}
void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
+ if (start_index >= end_index) return;
+ end_index--;
+
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
@@ -43,10 +46,11 @@ void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
base::Relaxed_Store(cell_base + i, ~0u);
}
// Finally, fill all bits until the end address in the last cell with 1s.
- SetBitsInCell<AccessMode::ATOMIC>(end_cell_index, (end_index_mask - 1));
+ SetBitsInCell<AccessMode::ATOMIC>(end_cell_index,
+ end_index_mask | (end_index_mask - 1));
} else {
- SetBitsInCell<AccessMode::ATOMIC>(start_cell_index,
- end_index_mask - start_index_mask);
+ SetBitsInCell<AccessMode::ATOMIC>(
+ start_cell_index, end_index_mask | (end_index_mask - start_index_mask));
}
// This fence prevents re-ordering of publishing stores with the mark-
// bit setting stores.
@@ -54,6 +58,9 @@ void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
}
void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
+ if (start_index >= end_index) return;
+ end_index--;
+
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
@@ -71,10 +78,11 @@ void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
base::Relaxed_Store(cell_base + i, 0);
}
// Finally, set all bits until the end address in the last cell with 0s.
- ClearBitsInCell<AccessMode::ATOMIC>(end_cell_index, (end_index_mask - 1));
+ ClearBitsInCell<AccessMode::ATOMIC>(end_cell_index,
+ end_index_mask | (end_index_mask - 1));
} else {
- ClearBitsInCell<AccessMode::ATOMIC>(start_cell_index,
- (end_index_mask - start_index_mask));
+ ClearBitsInCell<AccessMode::ATOMIC>(
+ start_cell_index, end_index_mask | (end_index_mask - start_index_mask));
}
// This fence prevents re-ordering of publishing stores with the mark-
// bit clearing stores.
@@ -82,6 +90,9 @@ void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
}
bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
+ if (start_index >= end_index) return false;
+ end_index--;
+
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
@@ -97,21 +108,18 @@ bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
if (cells()[i] != ~0u) return false;
}
- matching_mask = (end_index_mask - 1);
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) ||
- ((cells()[end_cell_index] & matching_mask) == matching_mask);
+ matching_mask = end_index_mask | (end_index_mask - 1);
+ return ((cells()[end_cell_index] & matching_mask) == matching_mask);
} else {
- matching_mask = end_index_mask - start_index_mask;
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) ||
- (cells()[end_cell_index] & matching_mask) == matching_mask;
+ matching_mask = end_index_mask | (end_index_mask - start_index_mask);
+ return (cells()[end_cell_index] & matching_mask) == matching_mask;
}
}
bool Bitmap::AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
+ if (start_index >= end_index) return true;
+ end_index--;
+
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
@@ -125,15 +133,11 @@ bool Bitmap::AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
if (cells()[i]) return false;
}
- matching_mask = (end_index_mask - 1);
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
+ matching_mask = end_index_mask | (end_index_mask - 1);
+ return !(cells()[end_cell_index] & matching_mask);
} else {
- matching_mask = end_index_mask - start_index_mask;
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
+ matching_mask = end_index_mask | (end_index_mask - start_index_mask);
+ return !(cells()[end_cell_index] & matching_mask);
}
}
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index ac7bcb8087..bb069d19f4 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -547,7 +547,7 @@ void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject* object) {
static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
MaybeObject* maybe_obj, FeedbackSlotKind kind, Isolate* isolate) {
- if (maybe_obj->IsClearedWeakHeapObject())
+ if (maybe_obj->IsCleared())
return ObjectStats::FEEDBACK_VECTOR_SLOT_OTHER_TYPE;
Object* obj = maybe_obj->GetHeapObjectOrSmi();
switch (kind) {
@@ -623,11 +623,12 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
// Log the monomorphic/polymorphic helper objects that this slot owns.
for (int i = 0; i < it.entry_size(); i++) {
MaybeObject* raw_object = vector->get(slot.ToInt() + i);
- if (!raw_object->IsStrongOrWeakHeapObject()) continue;
- HeapObject* object = raw_object->GetHeapObject();
- if (object->IsCell() || object->IsWeakFixedArray()) {
- RecordSimpleVirtualObjectStats(
- vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
+ HeapObject* object;
+ if (raw_object->GetHeapObject(&object)) {
+ if (object->IsCell() || object->IsWeakFixedArray()) {
+ RecordSimpleVirtualObjectStats(
+ vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
+ }
}
}
}
@@ -677,8 +678,6 @@ void ObjectStatsCollectorImpl::CollectStatistics(
RecordVirtualContext(Context::cast(obj));
} else if (obj->IsScript()) {
RecordVirtualScriptDetails(Script::cast(obj));
- } else if (obj->IsExternalString()) {
- RecordVirtualExternalStringDetails(ExternalString::cast(obj));
} else if (obj->IsArrayBoilerplateDescription()) {
RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription::cast(obj));
@@ -688,6 +687,11 @@ void ObjectStatsCollectorImpl::CollectStatistics(
}
break;
case kPhase2:
+ if (obj->IsExternalString()) {
+ // This has to be in Phase2 to avoid conflicting with recording Script
+ // sources. We still want to run RecordObjectStats after though.
+ RecordVirtualExternalStringDetails(ExternalString::cast(obj));
+ }
RecordObjectStats(obj, map->instance_type(), obj->Size());
if (collect_field_stats == CollectFieldStats::kYes) {
field_stats_collector_.RecordStats(obj);
@@ -808,7 +812,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
} else if (raw_source->IsString()) {
String* source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
- script, HeapObject::cast(raw_source),
+ script, source,
source->IsOneByteRepresentation()
? ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE
: ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE);
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index f32bbc1914..c7a4f70f01 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -170,15 +170,6 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
- JSFunction* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- JSFunction::BodyDescriptorWeak::IterateBody(map, object, size, visitor);
- return size;
-}
-
-template <typename ConcreteVisitor>
int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
Context* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 63ef8fb353..147af52c7e 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -21,7 +21,9 @@ class BigInt;
class BytecodeArray;
class DataHandler;
class JSArrayBuffer;
+class JSDataView;
class JSRegExp;
+class JSTypedArray;
class JSWeakCollection;
class UncompiledDataWithoutPreParsedScope;
class UncompiledDataWithPreParsedScope;
@@ -44,8 +46,9 @@ class UncompiledDataWithPreParsedScope;
V(FixedFloat64Array) \
V(FixedTypedArrayBase) \
V(JSArrayBuffer) \
- V(JSFunction) \
+ V(JSDataView) \
V(JSObject) \
+ V(JSTypedArray) \
V(JSWeakCollection) \
V(Map) \
V(Oddball) \
@@ -119,7 +122,6 @@ class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
// Special cases for young generation.
- V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
V8_INLINE int VisitNativeContext(Map* map, Context* object);
V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 9feebbf4d5..5848d5342e 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -107,8 +107,9 @@ void ScavengeJob::ScheduleIdleTask(Heap* heap) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
- auto task = new IdleTask(heap->isolate(), this);
- V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
+ auto task = base::make_unique<IdleTask>(heap->isolate(), this);
+ V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate)->PostIdleTask(
+ std::move(task));
}
}
}
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 649292085a..376b5e75aa 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -7,6 +7,7 @@
#include "src/heap/scavenger.h"
+#include "src/heap/incremental-marking-inl.h"
#include "src/heap/local-allocator-inl.h"
#include "src/objects-inl.h"
#include "src/objects/map.h"
@@ -14,6 +15,81 @@
namespace v8 {
namespace internal {
+void Scavenger::PromotionList::View::PushRegularObject(HeapObject* object,
+ int size) {
+ promotion_list_->PushRegularObject(task_id_, object, size);
+}
+
+void Scavenger::PromotionList::View::PushLargeObject(HeapObject* object,
+ Map* map, int size) {
+ promotion_list_->PushLargeObject(task_id_, object, map, size);
+}
+
+bool Scavenger::PromotionList::View::IsEmpty() {
+ return promotion_list_->IsEmpty();
+}
+
+size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
+ return promotion_list_->LocalPushSegmentSize(task_id_);
+}
+
+bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
+ return promotion_list_->Pop(task_id_, entry);
+}
+
+bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
+ return promotion_list_->IsGlobalPoolEmpty();
+}
+
+bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
+ return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
+}
+
+void Scavenger::PromotionList::PushRegularObject(int task_id,
+ HeapObject* object, int size) {
+ regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
+}
+
+void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject* object,
+ Map* map, int size) {
+ large_object_promotion_list_.Push(task_id, {object, map, size});
+}
+
+bool Scavenger::PromotionList::IsEmpty() {
+ return regular_object_promotion_list_.IsEmpty() &&
+ large_object_promotion_list_.IsEmpty();
+}
+
+size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
+ return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
+ large_object_promotion_list_.LocalPushSegmentSize(task_id);
+}
+
+bool Scavenger::PromotionList::Pop(int task_id,
+ struct PromotionListEntry* entry) {
+ ObjectAndSize regular_object;
+ if (regular_object_promotion_list_.Pop(task_id, &regular_object)) {
+ entry->heap_object = regular_object.first;
+ entry->size = regular_object.second;
+ entry->map = entry->heap_object->map();
+ return true;
+ }
+ return large_object_promotion_list_.Pop(task_id, entry);
+}
+
+bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
+ return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
+ large_object_promotion_list_.IsGlobalPoolEmpty();
+}
+
+bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
+ // Threshold when to prioritize processing of the promotion list. Right
+ // now we only look into the regular object list.
+ const int kProcessPromotionListThreshold =
+ kRegularObjectPromotionListSegmentSize / 2;
+ return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
+}
+
// White list for objects that for sure only contain data.
bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
switch (visitor_id) {
@@ -38,7 +114,7 @@ void Scavenger::PageMemoryFence(MaybeObject* object) {
// Perform a dummy acquire load to tell TSAN that there is no data race
// with page initialization.
HeapObject* heap_object;
- if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (object->GetHeapObject(&heap_object)) {
MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
CHECK_NOT_NULL(chunk->synchronized_heap());
}
@@ -71,8 +147,10 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
return true;
}
-bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObjectReference** slot,
- HeapObject* object, int object_size) {
+CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map* map,
+ HeapObjectReference** slot,
+ HeapObject* object,
+ int object_size) {
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
@@ -85,21 +163,26 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObjectReference** slot,
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
- MapWord map_word = object->map_word();
+ MapWord map_word = object->synchronized_map_word();
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
- return true;
+ DCHECK(!Heap::InFromSpace(*slot));
+ return Heap::InToSpace(*slot)
+ ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
+ : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
}
HeapObjectReference::Update(slot, target);
copied_list_.Push(ObjectAndSize(target, object_size));
copied_size_ += object_size;
- return true;
+ return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
}
- return false;
+ return CopyAndForwardResult::FAILURE;
}
-bool Scavenger::PromoteObject(Map* map, HeapObjectReference** slot,
- HeapObject* object, int object_size) {
+CopyAndForwardResult Scavenger::PromoteObject(Map* map,
+ HeapObjectReference** slot,
+ HeapObject* object,
+ int object_size) {
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
@@ -111,61 +194,112 @@ bool Scavenger::PromoteObject(Map* map, HeapObjectReference** slot,
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
- MapWord map_word = object->map_word();
+ MapWord map_word = object->synchronized_map_word();
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
- return true;
+ DCHECK(!Heap::InFromSpace(*slot));
+ return Heap::InToSpace(*slot)
+ ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
+ : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
}
HeapObjectReference::Update(slot, target);
if (!ContainsOnlyData(map->visitor_id())) {
- promotion_list_.Push(ObjectAndSize(target, object_size));
+ promotion_list_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
+ return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
+ }
+ return CopyAndForwardResult::FAILURE;
+}
+
+SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
+ CopyAndForwardResult result) {
+ DCHECK_NE(CopyAndForwardResult::FAILURE, result);
+ return result == CopyAndForwardResult::SUCCESS_YOUNG_GENERATION ? KEEP_SLOT
+ : REMOVE_SLOT;
+}
+
+bool Scavenger::HandleLargeObject(Map* map, HeapObject* object,
+ int object_size) {
+ if (V8_UNLIKELY(FLAG_young_generation_large_objects &&
+ object_size > kMaxNewSpaceHeapObjectSize)) {
+ DCHECK_EQ(NEW_LO_SPACE,
+ MemoryChunk::FromHeapObject(object)->owner()->identity());
+ if (base::AsAtomicPointer::Release_CompareAndSwap(
+ reinterpret_cast<HeapObject**>(object->address()), map,
+ MapWord::FromForwardingAddress(object).ToMap()) == map) {
+ surviving_new_large_objects_.insert({object, map});
+
+ if (!ContainsOnlyData(map->visitor_id())) {
+ promotion_list_.PushLargeObject(object, map, object_size);
+ }
+ }
return true;
}
return false;
}
-void Scavenger::EvacuateObjectDefault(Map* map, HeapObjectReference** slot,
- HeapObject* object, int object_size) {
+SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
+ HeapObjectReference** slot,
+ HeapObject* object,
+ int object_size) {
SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
+ CopyAndForwardResult result;
+
+ if (HandleLargeObject(map, object, object_size)) {
+ return REMOVE_SLOT;
+ }
if (!heap()->ShouldBePromoted(object->address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
- if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
+ result = SemiSpaceCopyObject(map, slot, object, object_size);
+ if (result != CopyAndForwardResult::FAILURE) {
+ return RememberedSetEntryNeeded(result);
+ }
}
- if (PromoteObject(map, slot, object, object_size)) return;
+ // We may want to promote this object if the object was already semi-space
+ // copied in a previes young generation GC or if the semi-space copy above
+ // failed.
+ result = PromoteObject(map, slot, object, object_size);
+ if (result != CopyAndForwardResult::FAILURE) {
+ return RememberedSetEntryNeeded(result);
+ }
- // If promotion failed, we try to copy the object to the other semi-space
- if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
+ // If promotion failed, we try to copy the object to the other semi-space.
+ result = SemiSpaceCopyObject(map, slot, object, object_size);
+ if (result != CopyAndForwardResult::FAILURE) {
+ return RememberedSetEntryNeeded(result);
+ }
heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
+ UNREACHABLE();
}
-void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
- ThinString* object, int object_size) {
+SlotCallbackResult Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
+ ThinString* object,
+ int object_size) {
if (!is_incremental_marking_) {
- // Loading actual is fine in a parallel setting is there is no write.
+ // The ThinString should die after Scavenge, so avoid writing the proper
+ // forwarding pointer and instead just signal the actual object as forwarded
+ // reference.
String* actual = object->actual();
- object->set_length(0);
- *slot = actual;
- // ThinStrings always refer to internalized strings, which are
- // always in old space.
+ // ThinStrings always refer to internalized strings, which are always in old
+ // space.
DCHECK(!Heap::InNewSpace(actual));
- base::AsAtomicPointer::Relaxed_Store(
- reinterpret_cast<Map**>(object->address()),
- MapWord::FromForwardingAddress(actual).ToMap());
- return;
+ *slot = actual;
+ return REMOVE_SLOT;
}
- EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
- object, object_size);
+ return EvacuateObjectDefault(
+ map, reinterpret_cast<HeapObjectReference**>(slot), object, object_size);
}
-void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
- ConsString* object, int object_size) {
+SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map* map,
+ HeapObject** slot,
+ ConsString* object,
+ int object_size) {
DCHECK(IsShortcutCandidate(map->instance_type()));
if (!is_incremental_marking_ &&
object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
@@ -174,37 +308,38 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
*slot = first;
if (!Heap::InNewSpace(first)) {
- base::AsAtomicPointer::Relaxed_Store(
+ base::AsAtomicPointer::Release_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(first).ToMap());
- return;
+ return REMOVE_SLOT;
}
- MapWord first_word = first->map_word();
+ MapWord first_word = first->synchronized_map_word();
if (first_word.IsForwardingAddress()) {
HeapObject* target = first_word.ToForwardingAddress();
*slot = target;
- base::AsAtomicPointer::Relaxed_Store(
+ base::AsAtomicPointer::Release_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(target).ToMap());
- return;
+ return Heap::InToSpace(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map* map = first_word.ToMap();
- EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
- first, first->SizeFromMap(map));
- base::AsAtomicPointer::Relaxed_Store(
+ SlotCallbackResult result = EvacuateObjectDefault(
+ map, reinterpret_cast<HeapObjectReference**>(slot), first,
+ first->SizeFromMap(map));
+ base::AsAtomicPointer::Release_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(*slot).ToMap());
- return;
+ return result;
}
- EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
- object, object_size);
+ return EvacuateObjectDefault(
+ map, reinterpret_cast<HeapObjectReference**>(slot), object, object_size);
}
-void Scavenger::EvacuateObject(HeapObjectReference** slot, Map* map,
- HeapObject* source) {
+SlotCallbackResult Scavenger::EvacuateObject(HeapObjectReference** slot,
+ Map* map, HeapObject* source) {
SLOW_DCHECK(Heap::InFromSpace(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
int size = source->SizeFromMap(map);
@@ -213,23 +348,22 @@ void Scavenger::EvacuateObject(HeapObjectReference** slot, Map* map,
switch (map->visitor_id()) {
case kVisitThinString:
// At the moment we don't allow weak pointers to thin strings.
- DCHECK(!(*slot)->IsWeakHeapObject());
- EvacuateThinString(map, reinterpret_cast<HeapObject**>(slot),
- reinterpret_cast<ThinString*>(source), size);
- break;
+ DCHECK(!(*slot)->IsWeak());
+ return EvacuateThinString(map, reinterpret_cast<HeapObject**>(slot),
+ reinterpret_cast<ThinString*>(source), size);
case kVisitShortcutCandidate:
- DCHECK(!(*slot)->IsWeakHeapObject());
+ DCHECK(!(*slot)->IsWeak());
// At the moment we don't allow weak pointers to cons strings.
- EvacuateShortcutCandidate(map, reinterpret_cast<HeapObject**>(slot),
- reinterpret_cast<ConsString*>(source), size);
- break;
+ return EvacuateShortcutCandidate(
+ map, reinterpret_cast<HeapObject**>(slot),
+ reinterpret_cast<ConsString*>(source), size);
default:
- EvacuateObjectDefault(map, slot, source, size);
- break;
+ return EvacuateObjectDefault(map, slot, source, size);
}
}
-void Scavenger::ScavengeObject(HeapObjectReference** p, HeapObject* object) {
+SlotCallbackResult Scavenger::ScavengeObject(HeapObjectReference** p,
+ HeapObject* object) {
DCHECK(Heap::InFromSpace(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
@@ -240,20 +374,21 @@ void Scavenger::ScavengeObject(HeapObjectReference** p, HeapObject* object) {
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
DCHECK(Heap::InFromSpace(*p));
- if ((*p)->IsWeakHeapObject()) {
+ if ((*p)->IsWeak()) {
*p = HeapObjectReference::Weak(dest);
} else {
- DCHECK((*p)->IsStrongHeapObject());
+ DCHECK((*p)->IsStrong());
*p = HeapObjectReference::Strong(dest);
}
- return;
+ DCHECK(Heap::InToSpace(dest) || !Heap::InNewSpace((dest)));
+ return Heap::InToSpace(dest) ? KEEP_SLOT : REMOVE_SLOT;
}
Map* map = first_word.ToMap();
// AllocationMementos are unrooted and shouldn't survive a scavenge
DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
// Call the slow part of scavenge object.
- EvacuateObject(p, map, object);
+ return EvacuateObject(p, map, object);
}
SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
@@ -261,23 +396,13 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
MaybeObject* object = *slot;
if (Heap::InFromSpace(object)) {
- HeapObject* heap_object;
- bool success = object->ToStrongOrWeakHeapObject(&heap_object);
- USE(success);
- DCHECK(success);
+ HeapObject* heap_object = object->GetHeapObject();
DCHECK(heap_object->IsHeapObject());
- ScavengeObject(reinterpret_cast<HeapObjectReference**>(slot), heap_object);
-
- object = *slot;
- // If the object was in from space before and is after executing the
- // callback in to space, the object is still live.
- // Unfortunately, we do not know about the slot. It could be in a
- // just freed free space object.
- PageMemoryFence(object);
- if (Heap::InToSpace(object)) {
- return KEEP_SLOT;
- }
+ SlotCallbackResult result = ScavengeObject(
+ reinterpret_cast<HeapObjectReference**>(slot), heap_object);
+ DCHECK_IMPLIES(result == REMOVE_SLOT, !Heap::InNewSpace(*slot));
+ return result;
} else if (Heap::InToSpace(object)) {
// Already updated slot. This can happen when processing of the work list
// is interleaved with processing roots.
@@ -305,7 +430,7 @@ void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
if (!Heap::InNewSpace(object)) continue;
// Treat the weak reference as strong.
HeapObject* heap_object;
- if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (object->GetHeapObject(&heap_object)) {
scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
heap_object);
} else {
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index f8c6d496ce..4c63ed099a 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -4,17 +4,72 @@
#include "src/heap/scavenger.h"
+#include "src/heap/array-buffer-collector.h"
#include "src/heap/barrier.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
#include "src/objects-body-descriptors-inl.h"
+#include "src/utils-inl.h"
namespace v8 {
namespace internal {
+class PageScavengingItem final : public ItemParallelJob::Item {
+ public:
+ explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ ~PageScavengingItem() override = default;
+
+ void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
+
+ private:
+ MemoryChunk* const chunk_;
+};
+
+class ScavengingTask final : public ItemParallelJob::Task {
+ public:
+ ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
+ : ItemParallelJob::Task(heap->isolate()),
+ heap_(heap),
+ scavenger_(scavenger),
+ barrier_(barrier) {}
+
+ void RunInParallel() final {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ double scavenging_time = 0.0;
+ {
+ barrier_->Start();
+ TimedScope scope(&scavenging_time);
+ PageScavengingItem* item = nullptr;
+ while ((item = GetItem<PageScavengingItem>()) != nullptr) {
+ item->Process(scavenger_);
+ item->MarkFinished();
+ }
+ do {
+ scavenger_->Process(barrier_);
+ } while (!barrier_->Wait());
+ scavenger_->Process();
+ }
+ if (FLAG_trace_parallel_scavenge) {
+ PrintIsolate(heap_->isolate(),
+ "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
+ static_cast<void*>(this), scavenging_time,
+ scavenger_->bytes_copied(), scavenger_->bytes_promoted());
+ }
+ };
+
+ private:
+ Heap* const heap_;
+ Scavenger* const scavenger_;
+ OneshotBarrier* const barrier_;
+};
+
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
IterateAndScavengePromotedObjectsVisitor(Heap* heap, Scavenger* scavenger,
@@ -40,7 +95,7 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
for (MaybeObject** slot = start; slot < end; ++slot) {
MaybeObject* target = *slot;
HeapObject* heap_object;
- if (target->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (target->GetHeapObject(&heap_object)) {
HandleSlot(host, reinterpret_cast<Address>(slot), heap_object);
}
}
@@ -53,15 +108,13 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
if (Heap::InFromSpace(target)) {
- scavenger_->ScavengeObject(slot, target);
- bool success = (*slot)->ToStrongOrWeakHeapObject(&target);
+ SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
+ bool success = (*slot)->GetHeapObject(&target);
USE(success);
DCHECK(success);
- scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
- if (Heap::InNewSpace(target)) {
+ if (result == KEEP_SLOT) {
SLOW_DCHECK(target->IsHeapObject());
- SLOW_DCHECK(Heap::InToSpace(target));
RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
slot_address);
}
@@ -79,9 +132,204 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
const bool record_slots_;
};
-Scavenger::Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
- PromotionList* promotion_list, int task_id)
- : heap_(heap),
+static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+ return Heap::InFromSpace(*p) &&
+ !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+}
+
+class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ Object* RetainAs(Object* object) override {
+ if (!Heap::InFromSpace(object)) {
+ return object;
+ }
+
+ MapWord map_word = HeapObject::cast(object)->map_word();
+ if (map_word.IsForwardingAddress()) {
+ return map_word.ToForwardingAddress();
+ }
+ return nullptr;
+ }
+};
+
+ScavengerCollector::ScavengerCollector(Heap* heap)
+ : isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
+
+void ScavengerCollector::CollectGarbage() {
+ ItemParallelJob job(isolate_->cancelable_task_manager(),
+ &parallel_scavenge_semaphore_);
+ const int kMainThreadId = 0;
+ Scavenger* scavengers[kMaxScavengerTasks];
+ const bool is_logging = isolate_->LogObjectRelocation();
+ const int num_scavenge_tasks = NumberOfScavengeTasks();
+ OneshotBarrier barrier;
+ Scavenger::CopiedList copied_list(num_scavenge_tasks);
+ Scavenger::PromotionList promotion_list(num_scavenge_tasks);
+ for (int i = 0; i < num_scavenge_tasks; i++) {
+ scavengers[i] = new Scavenger(this, heap_, is_logging, &copied_list,
+ &promotion_list, i);
+ job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
+ }
+
+ {
+ Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
+ // Pause the concurrent sweeper.
+ Sweeper::PauseOrCompleteScope pause_scope(sweeper);
+ // Filter out pages from the sweeper that need to be processed for old to
+ // new slots by the Scavenger. After processing, the Scavenger adds back
+ // pages that are still unsweeped. This way the Scavenger has exclusive
+ // access to the slots of a page and can completely avoid any locks on
+ // the page itself.
+ Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
+ filter_scope.FilterOldSpaceSweepingPages(
+ [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap_, [&job](MemoryChunk* chunk) {
+ job.AddItem(new PageScavengingItem(chunk));
+ });
+
+ RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
+
+ {
+ // Identify weak unmodified handles. Requires an unmodified graph.
+ TRACE_GC(
+ heap_->tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
+ isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &JSObject::IsUnmodifiedApiObject);
+ }
+ {
+ // Copy roots.
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
+ heap_->IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ }
+ {
+ // Parallel phase scavenging all copied and promoted objects.
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ job.Run(isolate_->async_counters());
+ DCHECK(copied_list.IsEmpty());
+ DCHECK(promotion_list.IsEmpty());
+ }
+ {
+ // Scavenge weak global handles.
+ TRACE_GC(heap_->tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
+ isolate_->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnscavengedHeapObject);
+ isolate_->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
+ &root_scavenge_visitor);
+ scavengers[kMainThreadId]->Process();
+
+ DCHECK(copied_list.IsEmpty());
+ DCHECK(promotion_list.IsEmpty());
+ isolate_->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ &root_scavenge_visitor, &IsUnscavengedHeapObject);
+ }
+
+ {
+ // Finalize parallel scavenging.
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
+
+ for (int i = 0; i < num_scavenge_tasks; i++) {
+ scavengers[i]->Finalize();
+ delete scavengers[i];
+ }
+
+ HandleSurvivingNewLargeObjects();
+ }
+ }
+
+ {
+ // Update references into new space
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
+ heap_->UpdateNewSpaceReferencesInExternalStringTable(
+ &Heap::UpdateNewSpaceReferenceInExternalStringTableEntry);
+
+ heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ }
+
+ if (FLAG_concurrent_marking) {
+ // Ensure that concurrent marker does not track pages that are
+ // going to be unmapped.
+ for (Page* p :
+ PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
+ heap_->concurrent_marking()->ClearLiveness(p);
+ }
+ }
+
+ ScavengeWeakObjectRetainer weak_object_retainer;
+ heap_->ProcessYoungWeakReferences(&weak_object_retainer);
+
+ // Set age mark.
+ heap_->new_space_->set_age_mark(heap_->new_space()->top());
+
+ {
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
+ ArrayBufferTracker::PrepareToFreeDeadInNewSpace(heap_);
+ }
+ heap_->array_buffer_collector()->FreeAllocations();
+
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
+ if (chunk->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
+ } else {
+ RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
+ }
+ });
+
+ // Update how much has survived scavenge.
+ heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedNewSpaceObjectSize());
+
+ // Scavenger may find new wrappers by iterating objects promoted onto a black
+ // page.
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+}
+
+void ScavengerCollector::HandleSurvivingNewLargeObjects() {
+ for (SurvivingNewLargeObjectMapEntry update_info :
+ surviving_new_large_objects_) {
+ HeapObject* object = update_info.first;
+ Map* map = update_info.second;
+ // Order is important here. We have to re-install the map to have access
+ // to meta-data like size during page promotion.
+ object->set_map_word(MapWord::FromMap(map));
+ LargePage* page = LargePage::FromHeapObject(object);
+ heap_->lo_space()->PromoteNewLargeObject(page);
+ }
+ DCHECK(heap_->new_lo_space()->IsEmpty());
+}
+
+void ScavengerCollector::MergeSurvivingNewLargeObjects(
+ const SurvivingNewLargeObjectsMap& objects) {
+ for (SurvivingNewLargeObjectMapEntry object : objects) {
+ bool success = surviving_new_large_objects_.insert(object).second;
+ USE(success);
+ DCHECK(success);
+ }
+}
+
+int ScavengerCollector::NumberOfScavengeTasks() {
+ if (!FLAG_parallel_scavenge) return 1;
+ const int num_scavenge_tasks =
+ static_cast<int>(heap_->new_space()->TotalCapacity()) / MB;
+ static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
+ int tasks =
+ Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
+ if (!heap_->CanExpandOldGeneration(
+ static_cast<size_t>(tasks * Page::kPageSize))) {
+ // Optimize for memory usage near the heap limit.
+ tasks = 1;
+ }
+ return tasks;
+}
+
+Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
+ CopiedList* copied_list, PromotionList* promotion_list,
+ int task_id)
+ : collector_(collector),
+ heap_(heap),
promotion_list_(promotion_list, task_id),
copied_list_(copied_list, task_id),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
@@ -92,7 +340,8 @@ Scavenger::Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
-void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
+void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, Map* map,
+ int size) {
// We are not collecting slots on new space objects during mutation thus we
// have to scan for pointers to evacuation candidates when we promote
// objects. But we should not record any slots in non-black objects. Grey
@@ -103,7 +352,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
- target->IterateBodyFast(target->map(), size, &visitor);
+ target->IterateBodyFast(map, size, &visitor);
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
@@ -136,9 +385,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
void Scavenger::Process(OneshotBarrier* barrier) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::Process");
- // Threshold when to switch processing the promotion list to avoid
- // allocating too much backing store in the worklist.
- const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
ScavengeVisitor scavenge_visitor(this);
const bool have_barrier = barrier != nullptr;
@@ -147,8 +393,7 @@ void Scavenger::Process(OneshotBarrier* barrier) {
do {
done = true;
ObjectAndSize object_and_size;
- while ((promotion_list_.LocalPushSegmentSize() <
- kProcessPromotionListThreshold) &&
+ while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
copied_list_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
@@ -159,11 +404,11 @@ void Scavenger::Process(OneshotBarrier* barrier) {
}
}
- while (promotion_list_.Pop(&object_and_size)) {
- HeapObject* target = object_and_size.first;
- int size = object_and_size.second;
+ struct PromotionListEntry entry;
+ while (promotion_list_.Pop(&entry)) {
+ HeapObject* target = entry.heap_object;
DCHECK(!target->IsMap());
- IterateAndScavengePromotedObject(target, size);
+ IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
@@ -178,6 +423,7 @@ void Scavenger::Finalize() {
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
heap()->IncrementPromotedObjectsSize(promoted_size_);
+ collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 4e6753f6ce..b984102c6b 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -16,17 +16,101 @@ namespace internal {
class OneshotBarrier;
+enum class CopyAndForwardResult {
+ SUCCESS_YOUNG_GENERATION,
+ SUCCESS_OLD_GENERATION,
+ FAILURE
+};
+
+using ObjectAndSize = std::pair<HeapObject*, int>;
+using SurvivingNewLargeObjectsMap = std::unordered_map<HeapObject*, Map*>;
+using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject*, Map*>;
+
+class ScavengerCollector {
+ public:
+ static const int kMaxScavengerTasks = 8;
+
+ explicit ScavengerCollector(Heap* heap);
+
+ void CollectGarbage();
+
+ private:
+ void MergeSurvivingNewLargeObjects(
+ const SurvivingNewLargeObjectsMap& objects);
+
+ int NumberOfScavengeTasks();
+
+ void HandleSurvivingNewLargeObjects();
+
+ Isolate* const isolate_;
+ Heap* const heap_;
+ base::Semaphore parallel_scavenge_semaphore_;
+ SurvivingNewLargeObjectsMap surviving_new_large_objects_;
+
+ friend class Scavenger;
+};
+
class Scavenger {
public:
+ struct PromotionListEntry {
+ HeapObject* heap_object;
+ Map* map;
+ int size;
+ };
+
+ class PromotionList {
+ public:
+ class View {
+ public:
+ View(PromotionList* promotion_list, int task_id)
+ : promotion_list_(promotion_list), task_id_(task_id) {}
+
+ inline void PushRegularObject(HeapObject* object, int size);
+ inline void PushLargeObject(HeapObject* object, Map* map, int size);
+ inline bool IsEmpty();
+ inline size_t LocalPushSegmentSize();
+ inline bool Pop(struct PromotionListEntry* entry);
+ inline bool IsGlobalPoolEmpty();
+ inline bool ShouldEagerlyProcessPromotionList();
+
+ private:
+ PromotionList* promotion_list_;
+ int task_id_;
+ };
+
+ explicit PromotionList(int num_tasks)
+ : regular_object_promotion_list_(num_tasks),
+ large_object_promotion_list_(num_tasks) {}
+
+ inline void PushRegularObject(int task_id, HeapObject* object, int size);
+ inline void PushLargeObject(int task_id, HeapObject* object, Map* map,
+ int size);
+ inline bool IsEmpty();
+ inline size_t LocalPushSegmentSize(int task_id);
+ inline bool Pop(int task_id, struct PromotionListEntry* entry);
+ inline bool IsGlobalPoolEmpty();
+ inline bool ShouldEagerlyProcessPromotionList(int task_id);
+
+ private:
+ static const int kRegularObjectPromotionListSegmentSize = 256;
+ static const int kLargeObjectPromotionListSegmentSize = 4;
+
+ using RegularObjectPromotionList =
+ Worklist<ObjectAndSize, kRegularObjectPromotionListSegmentSize>;
+ using LargeObjectPromotionList =
+ Worklist<PromotionListEntry, kLargeObjectPromotionListSegmentSize>;
+
+ RegularObjectPromotionList regular_object_promotion_list_;
+ LargeObjectPromotionList large_object_promotion_list_;
+ };
+
static const int kCopiedListSegmentSize = 256;
- static const int kPromotionListSegmentSize = 256;
- using ObjectAndSize = std::pair<HeapObject*, int>;
using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
- using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
- Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
- PromotionList* promotion_list, int task_id);
+ Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
+ CopiedList* copied_list, PromotionList* promotion_list,
+ int task_id);
// Entry point for scavenging an old generation page. For scavenging single
// objects see RootScavengingVisitor and ScavengeVisitor below.
@@ -61,39 +145,52 @@ class Scavenger {
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
- inline void ScavengeObject(HeapObjectReference** p, HeapObject* object);
+ inline SlotCallbackResult ScavengeObject(HeapObjectReference** p,
+ HeapObject* object);
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
int size);
- V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObjectReference** slot,
- HeapObject* object, int object_size);
+ V8_INLINE SlotCallbackResult
+ RememberedSetEntryNeeded(CopyAndForwardResult result);
- V8_INLINE bool PromoteObject(Map* map, HeapObjectReference** slot,
- HeapObject* object, int object_size);
+ V8_INLINE CopyAndForwardResult SemiSpaceCopyObject(Map* map,
+ HeapObjectReference** slot,
+ HeapObject* object,
+ int object_size);
- V8_INLINE void EvacuateObject(HeapObjectReference** slot, Map* map,
- HeapObject* source);
+ V8_INLINE CopyAndForwardResult PromoteObject(Map* map,
+ HeapObjectReference** slot,
+ HeapObject* object,
+ int object_size);
- // Different cases for object evacuation.
+ V8_INLINE SlotCallbackResult EvacuateObject(HeapObjectReference** slot,
+ Map* map, HeapObject* source);
- V8_INLINE void EvacuateObjectDefault(Map* map, HeapObjectReference** slot,
- HeapObject* object, int object_size);
+ V8_INLINE bool HandleLargeObject(Map* map, HeapObject* object,
+ int object_size);
- V8_INLINE void EvacuateJSFunction(Map* map, HeapObject** slot,
- JSFunction* object, int object_size);
+ // Different cases for object evacuation.
+ V8_INLINE SlotCallbackResult EvacuateObjectDefault(Map* map,
+ HeapObjectReference** slot,
+ HeapObject* object,
+ int object_size);
- inline void EvacuateThinString(Map* map, HeapObject** slot,
- ThinString* object, int object_size);
+ inline SlotCallbackResult EvacuateThinString(Map* map, HeapObject** slot,
+ ThinString* object,
+ int object_size);
- inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
- ConsString* object, int object_size);
+ inline SlotCallbackResult EvacuateShortcutCandidate(Map* map,
+ HeapObject** slot,
+ ConsString* object,
+ int object_size);
- void IterateAndScavengePromotedObject(HeapObject* target, int size);
+ void IterateAndScavengePromotedObject(HeapObject* target, Map* map, int size);
static inline bool ContainsOnlyData(VisitorId visitor_id);
+ ScavengerCollector* const collector_;
Heap* const heap_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
@@ -101,6 +198,7 @@ class Scavenger {
size_t copied_size_;
size_t promoted_size_;
LocalAllocator allocator_;
+ SurvivingNewLargeObjectsMap surviving_new_large_objects_;
const bool is_logging_;
const bool is_incremental_marking_;
const bool is_compacting_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 2742cd9c9d..5790b82907 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -23,11 +23,13 @@
#include "src/objects/dictionary.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
+#include "src/objects/microtask-queue.h"
#include "src/objects/microtask.h"
#include "src/objects/module.h"
#include "src/objects/promise.h"
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
#include "src/regexp/jsregexp.h"
#include "src/wasm/wasm-objects.h"
@@ -56,33 +58,34 @@ bool Heap::CreateHeapObjects() {
}
const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
- {type, size, k##camel_name##MapRootIndex},
+#define STRING_TYPE_ELEMENT(type, size, name, CamelName) \
+ {type, size, RootIndex::k##CamelName##Map},
STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
#undef STRING_TYPE_ELEMENT
};
const Heap::ConstantStringTable Heap::constant_string_table[] = {
- {"", kempty_stringRootIndex},
-#define CONSTANT_STRING_ELEMENT(name, contents) {contents, k##name##RootIndex},
- INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+ {"", RootIndex::kempty_string},
+#define CONSTANT_STRING_ELEMENT(_, name, contents) \
+ {contents, RootIndex::k##name},
+ INTERNALIZED_STRING_LIST_GENERATOR(CONSTANT_STRING_ELEMENT, /* not used */)
#undef CONSTANT_STRING_ELEMENT
};
const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
- {NAME##_TYPE, Name::kSize, k##Name##MapRootIndex},
+#define STRUCT_TABLE_ELEMENT(TYPE, Name, name) \
+ {TYPE, Name::kSize, RootIndex::k##Name##Map},
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
-#define ALLOCATION_SITE_ELEMENT(NAME, Name, Size, name) \
- {NAME##_TYPE, Name::kSize##Size, k##Name##Size##MapRootIndex},
- ALLOCATION_SITE_LIST(ALLOCATION_SITE_ELEMENT)
+#define ALLOCATION_SITE_ELEMENT(_, TYPE, Name, Size, name) \
+ {TYPE, Name::kSize##Size, RootIndex::k##Name##Size##Map},
+ ALLOCATION_SITE_LIST(ALLOCATION_SITE_ELEMENT, /* not used */)
#undef ALLOCATION_SITE_ELEMENT
-#define DATA_HANDLER_ELEMENT(NAME, Name, Size, name) \
- {NAME##_TYPE, Name::kSizeWithData##Size, k##Name##Size##MapRootIndex},
- DATA_HANDLER_LIST(DATA_HANDLER_ELEMENT)
+#define DATA_HANDLER_ELEMENT(_, TYPE, Name, Size, name) \
+ {TYPE, Name::kSizeWithData##Size, RootIndex::k##Name##Size##Map},
+ DATA_HANDLER_LIST(DATA_HANDLER_ELEMENT, /* not used */)
#undef DATA_HANDLER_ELEMENT
};
@@ -91,7 +94,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
ElementsKind elements_kind,
int inobject_properties) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- bool is_js_object = Map::IsJSObject(instance_type);
+ bool is_js_object = InstanceTypeChecker::IsJSObject(instance_type);
DCHECK_IMPLIES(is_js_object &&
!Map::CanHaveFastTransitionableElementsKind(instance_type),
IsDictionaryElementsKind(elements_kind) ||
@@ -119,8 +122,8 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
Map* map = reinterpret_cast<Map*>(result);
- map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
- SKIP_WRITE_BARRIER);
+ map->set_map_after_allocation(
+ reinterpret_cast<Map*>(root(RootIndex::kMetaMap)), SKIP_WRITE_BARRIER);
map->set_instance_type(instance_type);
map->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
@@ -179,8 +182,9 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
if (!allocation.To(&object)) return allocation;
- object->set_map_after_allocation(MapForFixedTypedArray(array_type),
- SKIP_WRITE_BARRIER);
+ object->set_map_after_allocation(
+ ReadOnlyRoots(this).MapForFixedTypedArray(array_type),
+ SKIP_WRITE_BARRIER);
FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
elements->set_external_pointer(
@@ -390,8 +394,8 @@ bool Heap::CreateInitialMaps() {
{ // Create a separate external one byte string map for native sources.
AllocationResult allocation =
- AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
- ExternalOneByteString::kShortSize);
+ AllocateMap(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE,
+ ExternalOneByteString::kUncachedSize);
if (!allocation.To(&obj)) return false;
Map* map = Map::cast(obj);
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
@@ -461,6 +465,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(CATCH_CONTEXT_TYPE, catch_context)
ALLOCATE_VARSIZE_MAP(WITH_CONTEXT_TYPE, with_context)
ALLOCATE_VARSIZE_MAP(DEBUG_EVALUATE_CONTEXT_TYPE, debug_evaluate_context)
+ ALLOCATE_VARSIZE_MAP(AWAIT_CONTEXT_TYPE, await_context)
ALLOCATE_VARSIZE_MAP(BLOCK_CONTEXT_TYPE, block_context)
ALLOCATE_VARSIZE_MAP(MODULE_CONTEXT_TYPE, module_context)
ALLOCATE_VARSIZE_MAP(EVAL_CONTEXT_TYPE, eval_context)
@@ -696,35 +701,35 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(name) \
+#define SYMBOL_INIT(_, name) \
{ \
Handle<Symbol> symbol( \
isolate()->factory()->NewPrivateSymbol(TENURED_READ_ONLY)); \
- roots_[k##name##RootIndex] = *symbol; \
+ roots_[RootIndex::k##name] = *symbol; \
}
- PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
+ PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
}
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(name, description) \
+#define SYMBOL_INIT(_, name, description) \
Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
Handle<String> name##d = \
factory->NewStringFromStaticChars(#description, TENURED_READ_ONLY); \
name->set_name(*name##d); \
- roots_[k##name##RootIndex] = *name;
- PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
+ roots_[RootIndex::k##name] = *name;
+ PUBLIC_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
-#define SYMBOL_INIT(name, description) \
+#define SYMBOL_INIT(_, name, description) \
Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
Handle<String> name##d = \
factory->NewStringFromStaticChars(#description, TENURED_READ_ONLY); \
name->set_is_well_known_symbol(true); \
name->set_name(*name##d); \
- roots_[k##name##RootIndex] = *name;
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
+ roots_[RootIndex::k##name] = *name;
+ WELL_KNOWN_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
// Mark "Interesting Symbols" appropriately.
@@ -754,9 +759,7 @@ void Heap::CreateInitialObjects() {
factory->NewManyClosuresCell(factory->undefined_value());
set_many_closures_cell(*many_closures_cell);
- // Microtask queue uses the empty fixed array as a sentinel for "empty".
- // Number of queued microtasks stored in Isolate::pending_microtask_count().
- set_microtask_queue(roots.empty_fixed_array());
+ set_default_microtask_queue(*factory->NewMicrotaskQueue());
{
Handle<FixedArray> empty_sloppy_arguments_elements =
@@ -815,6 +818,9 @@ void Heap::CreateInitialObjects() {
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Script::TYPE_NATIVE);
+ // This is used for exceptions thrown with no stack frames. Such exceptions
+ // can be shared everywhere.
+ script->set_origin_options(ScriptOriginOptions(true, false));
set_empty_script(*script);
Handle<Cell> array_constructor_cell = factory->NewCell(
@@ -849,6 +855,10 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_promise_species_protector(*cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_string_iterator_protector(*cell);
+
Handle<Cell> string_length_overflow_cell = factory->NewCell(
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_string_length_protector(*string_length_overflow_cell);
@@ -874,11 +884,6 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(roots.empty_weak_array_list());
- STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
- set_deserialize_lazy_handler(Smi::kZero);
- set_deserialize_lazy_handler_wide(Smi::kZero);
- set_deserialize_lazy_handler_extra_wide(Smi::kZero);
-
// Evaluate the hash values which will then be cached in the strings.
isolate()->factory()->zero_string()->Hash();
isolate()->factory()->one_string()->Hash();
@@ -901,16 +906,19 @@ void Heap::CreateInternalAccessorInfoObjects() {
HandleScope scope(isolate);
Handle<AccessorInfo> acessor_info;
-#define INIT_ACCESSOR_INFO(accessor_name, AccessorName) \
- acessor_info = Accessors::Make##AccessorName##Info(isolate); \
- roots_[k##AccessorName##AccessorRootIndex] = *acessor_info;
- ACCESSOR_INFO_LIST(INIT_ACCESSOR_INFO)
+#define INIT_ACCESSOR_INFO(_, accessor_name, AccessorName, ...) \
+ acessor_info = Accessors::Make##AccessorName##Info(isolate); \
+ roots_[RootIndex::k##AccessorName##Accessor] = *acessor_info;
+ ACCESSOR_INFO_LIST_GENERATOR(INIT_ACCESSOR_INFO, /* not used */)
#undef INIT_ACCESSOR_INFO
-#define INIT_SIDE_EFFECT_FLAG(AccessorName) \
- AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]) \
- ->set_has_no_side_effect(true);
- SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(INIT_SIDE_EFFECT_FLAG)
+#define INIT_SIDE_EFFECT_FLAG(_, accessor_name, AccessorName, GetterType, \
+ SetterType) \
+ AccessorInfo::cast(roots_[RootIndex::k##AccessorName##Accessor]) \
+ ->set_getter_side_effect_type(SideEffectType::GetterType); \
+ AccessorInfo::cast(roots_[RootIndex::k##AccessorName##Accessor]) \
+ ->set_setter_side_effect_type(SideEffectType::SetterType);
+ ACCESSOR_INFO_LIST_GENERATOR(INIT_SIDE_EFFECT_FLAG, /* not used */)
#undef INIT_SIDE_EFFECT_FLAG
}
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 9e86905d00..7162769e5e 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
+#include "src/base/atomic-utils.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
@@ -92,6 +94,27 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
return nullptr;
}
+void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount) {
+ base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
+ heap()->IncrementExternalBackingStoreBytes(type, amount);
+}
+
+void Space::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount) {
+ base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
+ heap()->DecrementExternalBackingStoreBytes(type, amount);
+}
+
+void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
+ Space* from, Space* to,
+ size_t amount) {
+ if (from == to) return;
+
+ base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
+ base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
+}
+
// -----------------------------------------------------------------------------
// SemiSpace
@@ -189,6 +212,28 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
return chunk;
}
+void MemoryChunk::IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+ base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
+ owner()->IncrementExternalBackingStoreBytes(type, amount);
+}
+
+void MemoryChunk::DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+ base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
+ owner()->DecrementExternalBackingStoreBytes(type, amount);
+}
+
+void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
+ MemoryChunk* from,
+ MemoryChunk* to,
+ size_t amount) {
+ base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
+ base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
+ Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
+ amount);
+}
+
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index ff28ab56b2..dcacea0afc 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -15,7 +15,7 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
-#include "src/heap/incremental-marking.h"
+#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
@@ -94,227 +94,115 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
}
}
-// -----------------------------------------------------------------------------
-// CodeRange
-
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
LAZY_INSTANCE_INITIALIZER;
-CodeRange::CodeRange(Isolate* isolate, size_t requested)
+Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ auto it = recently_freed_.find(code_range_size);
+ if (it == recently_freed_.end() || it->second.empty()) {
+ return reinterpret_cast<Address>(GetRandomMmapAddr());
+ }
+ Address result = it->second.back();
+ it->second.pop_back();
+ return result;
+}
+
+void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ recently_freed_[code_range_size].push_back(code_range_start);
+}
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+//
+
+MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
+ size_t code_range_size)
: isolate_(isolate),
- free_list_(0),
- allocation_list_(0),
- current_allocation_block_index_(0),
- requested_code_range_size_(0) {
- DCHECK(!virtual_memory_.IsReserved());
+ data_page_allocator_(GetPlatformPageAllocator()),
+ code_page_allocator_(nullptr),
+ capacity_(RoundUp(capacity, Page::kPageSize)),
+ size_(0),
+ size_executable_(0),
+ lowest_ever_allocated_(static_cast<Address>(-1ll)),
+ highest_ever_allocated_(kNullAddress),
+ unmapper_(isolate->heap(), this) {
+ InitializeCodePageAllocator(data_page_allocator_, code_range_size);
+}
+
+void MemoryAllocator::InitializeCodePageAllocator(
+ v8::PageAllocator* page_allocator, size_t requested) {
+ DCHECK_NULL(code_page_allocator_instance_.get());
+
+ code_page_allocator_ = page_allocator;
if (requested == 0) {
+ if (!kRequiresCodeRange) return;
// When a target requires the code range feature, we put all code objects
// in a kMaximalCodeRangeSize range of virtual address space, so that
// they can call each other with near calls.
- if (kRequiresCodeRange) {
- requested = kMaximalCodeRangeSize;
- } else {
- return;
- }
- }
-
- if (requested <= kMinimumCodeRangeSize) {
+ requested = kMaximalCodeRangeSize;
+ } else if (requested <= kMinimumCodeRangeSize) {
requested = kMinimumCodeRangeSize;
}
const size_t reserved_area =
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
- if (requested < (kMaximalCodeRangeSize - reserved_area))
- requested += reserved_area;
-
+ if (requested < (kMaximalCodeRangeSize - reserved_area)) {
+ requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
+ // Fullfilling both reserved pages requirement and huge code area
+ // alignments is not supported (requires re-implementation).
+ DCHECK_LE(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize());
+ }
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
- requested_code_range_size_ = requested;
-
- VirtualMemory reservation;
- void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
- if (!AlignedAllocVirtualMemory(
- requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()), hint,
- &reservation)) {
- V8::FatalProcessOutOfMemory(isolate,
+ Address hint =
+ RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
+ page_allocator->AllocatePageSize());
+ VirtualMemory reservation(
+ page_allocator, requested, reinterpret_cast<void*>(hint),
+ Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()));
+ if (!reservation.IsReserved()) {
+ V8::FatalProcessOutOfMemory(isolate_,
"CodeRange setup: allocate virtual memory");
}
+ code_range_ = reservation.region();
// We are sure that we have mapped a block of requested addresses.
DCHECK_GE(reservation.size(), requested);
Address base = reservation.address();
// On some platforms, specifically Win64, we need to reserve some pages at
- // the beginning of an executable space.
+ // the beginning of an executable space. See
+ // https://cs.chromium.org/chromium/src/components/crash/content/
+ // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
+ // for details.
if (reserved_area > 0) {
if (!reservation.SetPermissions(base, reserved_area,
PageAllocator::kReadWrite))
- V8::FatalProcessOutOfMemory(isolate, "CodeRange setup: set permissions");
+ V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
base += reserved_area;
}
- Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
- size_t size = reservation.size() - (aligned_base - base) - reserved_area;
- allocation_list_.emplace_back(aligned_base, size);
- current_allocation_block_index_ = 0;
+ Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
+ size_t size =
+ RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
+ MemoryChunk::kPageSize);
+ DCHECK(IsAligned(aligned_base, kCodeRangeAreaAlignment));
LOG(isolate_,
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
requested));
- virtual_memory_.TakeControl(&reservation);
-}
-
-CodeRange::~CodeRange() {
- if (virtual_memory_.IsReserved()) {
- Address addr = start();
- virtual_memory_.Free();
- code_range_address_hint.Pointer()->NotifyFreedCodeRange(
- reinterpret_cast<void*>(addr), requested_code_range_size_);
- }
-}
-
-bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
- const FreeBlock& right) {
- return left.start < right.start;
-}
-
-bool CodeRange::GetNextAllocationBlock(size_t requested) {
- for (current_allocation_block_index_++;
- current_allocation_block_index_ < allocation_list_.size();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return true; // Found a large enough allocation block.
- }
- }
-
- // Sort and merge the free blocks on the free list and the allocation list.
- free_list_.insert(free_list_.end(), allocation_list_.begin(),
- allocation_list_.end());
- allocation_list_.clear();
- std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
- for (size_t i = 0; i < free_list_.size();) {
- FreeBlock merged = free_list_[i];
- i++;
- // Add adjacent free blocks to the current merged block.
- while (i < free_list_.size() &&
- free_list_[i].start == merged.start + merged.size) {
- merged.size += free_list_[i].size;
- i++;
- }
- if (merged.size > 0) {
- allocation_list_.push_back(merged);
- }
- }
- free_list_.clear();
-
- for (current_allocation_block_index_ = 0;
- current_allocation_block_index_ < allocation_list_.size();
- current_allocation_block_index_++) {
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return true; // Found a large enough allocation block.
- }
- }
- current_allocation_block_index_ = 0;
- // Code range is full or too fragmented.
- return false;
-}
-
-
-Address CodeRange::AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
- size_t* allocated) {
- // requested_size includes the header and two guard regions, while commit_size
- // only includes the header.
- DCHECK_LE(commit_size,
- requested_size - 2 * MemoryAllocator::CodePageGuardSize());
- FreeBlock current;
- if (!ReserveBlock(requested_size, &current)) {
- *allocated = 0;
- return kNullAddress;
- }
- *allocated = current.size;
- DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
- &virtual_memory_, current.start, commit_size, *allocated)) {
- *allocated = 0;
- ReleaseBlock(&current);
- return kNullAddress;
- }
- return current.start;
-}
-
-void CodeRange::FreeRawMemory(Address address, size_t length) {
- DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
- base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.emplace_back(address, length);
- virtual_memory_.SetPermissions(address, length, PageAllocator::kNoAccess);
-}
-
-bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
- base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- DCHECK(allocation_list_.empty() ||
- current_allocation_block_index_ < allocation_list_.size());
- if (allocation_list_.empty() ||
- requested_size > allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough.
- if (!GetNextAllocationBlock(requested_size)) return false;
- }
- // Commit the requested memory at the start of the current allocation block.
- size_t aligned_requested = ::RoundUp(requested_size, MemoryChunk::kAlignment);
- *block = allocation_list_[current_allocation_block_index_];
- // Don't leave a small free block, useless for a large object or chunk.
- if (aligned_requested < (block->size - Page::kPageSize)) {
- block->size = aligned_requested;
- }
- DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
- allocation_list_[current_allocation_block_index_].start += block->size;
- allocation_list_[current_allocation_block_index_].size -= block->size;
- return true;
-}
-
-
-void CodeRange::ReleaseBlock(const FreeBlock* block) {
- base::LockGuard<base::Mutex> guard(&code_range_mutex_);
- free_list_.push_back(*block);
-}
-
-void* CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- auto it = recently_freed_.find(code_range_size);
- if (it == recently_freed_.end() || it->second.empty()) {
- return GetRandomMmapAddr();
- }
- void* result = it->second.back();
- it->second.pop_back();
- return result;
-}
-
-void CodeRangeAddressHint::NotifyFreedCodeRange(void* code_range_start,
- size_t code_range_size) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- recently_freed_[code_range_size].push_back(code_range_start);
-}
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-//
-
-MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
- size_t code_range_size)
- : isolate_(isolate),
- code_range_(nullptr),
- capacity_(RoundUp(capacity, Page::kPageSize)),
- size_(0),
- size_executable_(0),
- lowest_ever_allocated_(static_cast<Address>(-1ll)),
- highest_ever_allocated_(kNullAddress),
- unmapper_(isolate->heap(), this) {
- code_range_ = new CodeRange(isolate_, code_range_size);
+ heap_reservation_.TakeControl(&reservation);
+ code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
+ page_allocator, aligned_base, size,
+ static_cast<size_t>(MemoryChunk::kAlignment));
+ code_page_allocator_ = code_page_allocator_instance_.get();
}
-
void MemoryAllocator::TearDown() {
unmapper()->TearDown();
@@ -328,8 +216,15 @@ void MemoryAllocator::TearDown() {
last_chunk_.Free();
}
- delete code_range_;
- code_range_ = nullptr;
+ if (code_page_allocator_instance_.get()) {
+ DCHECK(!code_range_.is_empty());
+ code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
+ code_range_.size());
+ code_range_ = base::AddressRegion();
+ code_page_allocator_instance_.reset();
+ }
+ code_page_allocator_ = nullptr;
+ data_page_allocator_ = nullptr;
}
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
@@ -489,61 +384,41 @@ size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
return sum;
}
-bool MemoryAllocator::CommitMemory(Address base, size_t size) {
- if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
+bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
+ Address base = reservation->address();
+ size_t size = reservation->size();
+ if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
+ isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
-void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- // Code which is part of the code-range does not have its own VirtualMemory.
- DCHECK(code_range() == nullptr ||
- !code_range()->contains(reservation->address()));
- DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
- reservation->size() <= Page::kPageSize);
-
- reservation->Free();
-}
-
-
-void MemoryAllocator::FreeMemory(Address base, size_t size,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- if (code_range() != nullptr && code_range()->contains(base)) {
- DCHECK(executable == EXECUTABLE);
- code_range()->FreeRawMemory(base, size);
- } else {
- DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
- CHECK(FreePages(reinterpret_cast<void*>(base), size));
+bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
+ size_t size = reservation->size();
+ if (!reservation->SetPermissions(reservation->address(), size,
+ PageAllocator::kNoAccess)) {
+ return false;
}
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+ return true;
}
-Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
- void* hint,
- VirtualMemory* controller) {
- VirtualMemory reservation;
- if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation)) {
- return kNullAddress;
- }
-
- Address result = reservation.address();
- size_ += reservation.size();
- controller->TakeControl(&reservation);
- return result;
+void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
+ Address base, size_t size) {
+ CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
}
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
Executability executable, void* hint, VirtualMemory* controller) {
+ v8::PageAllocator* page_allocator = this->page_allocator(executable);
DCHECK(commit_size <= reserve_size);
- VirtualMemory reservation;
- Address base =
- ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
- if (base == kNullAddress) return kNullAddress;
+ VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
+ if (!reservation.IsReserved()) return kNullAddress;
+ Address base = reservation.address();
+ size_ += reservation.size();
if (executable == EXECUTABLE) {
if (!CommitExecutableMemory(&reservation, base, commit_size,
@@ -608,8 +483,8 @@ void MemoryChunk::SetReadAndExecutable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
- CHECK(SetPermissions(protect_start, protect_size,
- PageAllocator::kReadExecute));
+ CHECK(reservation_.SetPermissions(protect_start, protect_size,
+ PageAllocator::kReadExecute));
}
}
@@ -627,15 +502,15 @@ void MemoryChunk::SetReadAndWritable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
- CHECK(SetPermissions(unprotect_start, unprotect_size,
- PageAllocator::kReadWrite));
+ CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
+ PageAllocator::kReadWrite));
}
}
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
- VirtualMemory* reservation) {
+ VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base);
DCHECK(base == chunk->address());
@@ -696,14 +571,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
- CHECK(SetPermissions(area_start, area_size,
- PageAllocator::kReadWriteExecute));
+ CHECK(reservation.SetPermissions(area_start, area_size,
+ PageAllocator::kReadWriteExecute));
}
}
- if (reservation != nullptr) {
- chunk->reservation_.TakeControl(reservation);
- }
+ chunk->reservation_ = std::move(reservation);
return chunk;
}
@@ -863,29 +736,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
// Size of header (not executable) plus area (executable).
size_t commit_size = ::RoundUp(
CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
-// Allocate executable memory either from code range or from the OS.
-#ifdef V8_TARGET_ARCH_MIPS64
- // Use code range only for large object space on mips64 to keep address
- // range within 256-MB memory region.
- if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
-#else
- if (code_range()->valid()) {
-#endif
- base =
- code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
- DCHECK(IsAligned(base, MemoryChunk::kAlignment));
- if (base == kNullAddress) return nullptr;
- size_ += chunk_size;
- // Update executable memory size.
- size_executable_ += chunk_size;
- } else {
- base = AllocateAlignedMemory(chunk_size, commit_size,
- MemoryChunk::kAlignment, executable,
- address_hint, &reservation);
- if (base == kNullAddress) return nullptr;
- // Update executable memory size.
- size_executable_ += reservation.size();
- }
+ base =
+ AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+ executable, address_hint, &reservation);
+ if (base == kNullAddress) return nullptr;
+ // Update executable memory size.
+ size_executable_ += reservation.size();
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
@@ -928,7 +784,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if ((base + chunk_size) == 0u) {
CHECK(!last_chunk_.IsReserved());
last_chunk_.TakeControl(&reservation);
- UncommitBlock(last_chunk_.address(), last_chunk_.size());
+ UncommitMemory(&last_chunk_);
size_ -= chunk_size;
if (executable == EXECUTABLE) {
size_executable_ -= chunk_size;
@@ -940,7 +796,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- executable, owner, &reservation);
+ executable, owner, std::move(reservation));
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
return chunk;
@@ -1128,12 +984,15 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
- UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
+ UncommitMemory(reservation);
} else {
if (reservation->IsReserved()) {
- FreeMemory(reservation, chunk->executable());
+ reservation->Free();
} else {
- FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+ // Only read-only pages can have non-initialized reservation object.
+ DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
+ FreeMemory(page_allocator(chunk->executable()), chunk->address(),
+ chunk->size());
}
}
}
@@ -1147,8 +1006,9 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
break;
case kAlreadyPooled:
// Pooled pages cannot be touched anymore as their memory is uncommitted.
- FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
- Executability::NOT_EXECUTABLE);
+ // Pooled pages are not-executable.
+ FreeMemory(data_page_allocator(), chunk->address(),
+ static_cast<size_t>(MemoryChunk::kPageSize));
break;
case kPooledAndQueue:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
@@ -1216,34 +1076,19 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
const Address start = reinterpret_cast<Address>(chunk);
const Address area_start = start + MemoryChunk::kObjectStartOffset;
const Address area_end = start + size;
- if (!CommitBlock(start, size)) {
- return nullptr;
+ // Pooled pages are always regular data pages.
+ DCHECK_NE(CODE_SPACE, owner->identity());
+ VirtualMemory reservation(data_page_allocator(), start, size);
+ if (!CommitMemory(&reservation)) return nullptr;
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(start, size, kZapValue);
}
- VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
- NOT_EXECUTABLE, owner, &reservation);
+ NOT_EXECUTABLE, owner, std::move(reservation));
size_ += size;
return chunk;
}
-bool MemoryAllocator::CommitBlock(Address start, size_t size) {
- if (!CommitMemory(start, size)) return false;
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size, kZapValue);
- }
-
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
- return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!SetPermissions(start, size, PageAllocator::kNoAccess)) return false;
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- return true;
-}
-
void MemoryAllocator::ZapBlock(Address start, size_t size,
uintptr_t zap_value) {
DCHECK_EQ(start % kPointerSize, 0);
@@ -1441,6 +1286,17 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
}
}
+bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject* object) {
+ if (ShouldSkipEvacuationSlotRecording()) {
+ // Invalidated slots do not matter if we are not recording slots.
+ return true;
+ }
+ if (invalidated_slots() == nullptr) {
+ return false;
+ }
+ return invalidated_slots()->find(object) != invalidated_slots()->end();
+}
+
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
HeapObject* new_start) {
DCHECK_LT(old_start, new_start);
@@ -1474,19 +1330,6 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
-void MemoryChunk::IncrementExternalBackingStoreBytes(
- ExternalBackingStoreType type, size_t amount) {
- external_backing_store_bytes_[type] += amount;
- owner()->IncrementExternalBackingStoreBytes(type, amount);
-}
-
-void MemoryChunk::DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType type, size_t amount) {
- DCHECK_GE(external_backing_store_bytes_[type], amount);
- external_backing_store_bytes_[type] -= amount;
- owner()->DecrementExternalBackingStoreBytes(type, amount);
-}
-
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -2027,7 +1870,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
} else if (object->IsJSArrayBuffer()) {
JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = NumberToSize(array_buffer->byte_length());
+ size_t size = array_buffer->byte_length();
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2119,12 +1962,12 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
// -----------------------------------------------------------------------------
// NewSpace implementation
-NewSpace::NewSpace(Heap* heap, size_t initial_semispace_capacity,
+NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
+ size_t initial_semispace_capacity,
size_t max_semispace_capacity)
: SpaceWithLinearArea(heap, NEW_SPACE),
to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace),
- reservation_() {
+ from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
DCHECK(
base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
@@ -2515,7 +2358,7 @@ void NewSpace::Verify(Isolate* isolate) {
} else if (object->IsJSArrayBuffer()) {
JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = NumberToSize(array_buffer->byte_length());
+ size_t size = array_buffer->byte_length();
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -3130,7 +2973,7 @@ size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace* cur = top();
while (cur != nullptr) {
- DCHECK(cur->map() == page()->heap()->root(Heap::kFreeSpaceMapRootIndex));
+ DCHECK(cur->map() == page()->heap()->root(RootIndex::kFreeSpaceMap));
sum += cur->relaxed_read_size();
cur = cur->next();
}
@@ -3337,12 +3180,18 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
const size_t page_size = MemoryAllocator::GetCommitPageSize();
const size_t area_start_offset = RoundUp(Page::kObjectStartOffset, page_size);
+ MemoryAllocator* memory_allocator = heap()->memory_allocator();
for (Page* p : *this) {
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
if (access == PageAllocator::kRead) {
page->MakeHeaderRelocatable();
}
- CHECK(SetPermissions(page->address() + area_start_offset,
+
+ // Read only pages don't have valid reservation object so we get proper
+ // page allocator manually.
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(page->executable());
+ CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
page->size() - area_start_offset, access));
}
}
@@ -3473,13 +3322,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
- size_ += static_cast<int>(page->size());
- AccountCommitted(page->size());
- objects_size_ += object_size;
- page_count_++;
- memory_chunk_list_.PushBack(page);
-
- InsertChunkMapEntries(page);
+ Register(page, object_size);
HeapObject* object = page->GetObject();
@@ -3572,6 +3415,39 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
}
+void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
+ DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ DCHECK(page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+ DCHECK(!page->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ size_t object_size = static_cast<size_t>(page->GetObject()->Size());
+ reinterpret_cast<NewLargeObjectSpace*>(page->owner())
+ ->Unregister(page, object_size);
+ Register(page, object_size);
+ page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->set_owner(this);
+}
+
+void LargeObjectSpace::Register(LargePage* page, size_t object_size) {
+ size_ += static_cast<int>(page->size());
+ AccountCommitted(page->size());
+ objects_size_ += object_size;
+ page_count_++;
+ memory_chunk_list_.PushBack(page);
+
+ InsertChunkMapEntries(page);
+}
+
+void LargeObjectSpace::Unregister(LargePage* page, size_t object_size) {
+ size_ -= static_cast<int>(page->size());
+ AccountUncommitted(page->size());
+ objects_size_ -= object_size;
+ page_count_--;
+ memory_chunk_list_.Remove(page);
+
+ RemoveChunkMapEntries(page);
+}
+
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
@@ -3759,5 +3635,13 @@ size_t NewLargeObjectSpace::Available() {
// TODO(hpayer): Update as soon as we have a growing strategy.
return 0;
}
+
+void NewLargeObjectSpace::Flip() {
+ for (LargePage* chunk = first_page(); chunk != nullptr;
+ chunk = chunk->next_page()) {
+ chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
+ chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 47272501f3..018e9da47b 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -14,6 +14,7 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/iterator.h"
#include "src/base/list.h"
#include "src/base/platform/mutex.h"
@@ -32,7 +33,7 @@ namespace internal {
namespace heap {
class HeapTester;
-class TestCodeRangeScope;
+class TestCodePageAllocatorScope;
} // namespace heap
class AllocationObserver;
@@ -109,11 +110,10 @@ class Space;
// Some assertion macros used in the debugging mode.
-#define DCHECK_PAGE_ALIGNED(address) \
- DCHECK((OffsetFrom(address) & kPageAlignmentMask) == 0)
+#define DCHECK_PAGE_ALIGNED(address) DCHECK_EQ(0, (address)&kPageAlignmentMask)
#define DCHECK_OBJECT_ALIGNED(address) \
- DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
+ DCHECK_EQ(0, (address)&kObjectAlignmentMask)
#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
@@ -142,12 +142,6 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
-enum ExternalBackingStoreType {
- kArrayBuffer,
- kExternalString,
- kNumTypes
-};
-
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
@@ -363,7 +357,7 @@ class MemoryChunk {
+ kUIntptrSize // uintptr_t flags_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
- + 2 * kPointerSize // VirtualMemory reservation_
+ + 3 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_
@@ -378,7 +372,7 @@ class MemoryChunk {
kPointerSize // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
+ kPointerSize // base::Mutex* page_protection_change_mutex_
+ kPointerSize // unitptr_t write_unprotect_counter_
- + kSizetSize * kNumTypes
+ + kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
@@ -416,7 +410,7 @@ class MemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
- return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
+ return reinterpret_cast<MemoryChunk*>(a & ~kAlignmentMask);
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(const HeapObject* o) {
@@ -444,6 +438,10 @@ class MemoryChunk {
!chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
}
+ static inline void MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
+ size_t amount);
+
Address address() const {
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
}
@@ -518,6 +516,7 @@ class MemoryChunk {
// Updates invalidated_slots after array left-trimming.
void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
HeapObject* new_start);
+ bool RegisteredObjectWithInvalidatedSlots(HeapObject* object);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void ReleaseLocalTracker();
@@ -550,10 +549,12 @@ class MemoryChunk {
}
}
- void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
- void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
+ inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
return external_backing_store_bytes_[type];
}
@@ -652,7 +653,7 @@ class MemoryChunk {
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
- VirtualMemory* reservation);
+ VirtualMemory reservation);
// Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory();
@@ -775,7 +776,7 @@ class Page : public MemoryChunk {
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
static Page* FromAddress(Address addr) {
- return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
+ return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
}
static Page* FromHeapObject(const HeapObject* o) {
return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) &
@@ -797,7 +798,7 @@ class Page : public MemoryChunk {
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address addr) {
- return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
+ return (addr & kPageAlignmentMask) == 0;
}
static bool IsAtObjectStart(Address addr) {
@@ -894,10 +895,23 @@ class ReadOnlyPage : public Page {
// Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable.
void MakeHeaderRelocatable();
+
+ private:
+ friend class ReadOnlySpace;
};
class LargePage : public MemoryChunk {
public:
+ // A limit to guarantee that we do not overflow typed slot offset in
+ // the old to old remembered set.
+ // Note that this limit is higher than what assembler already imposes on
+ // x64 and ia32 architectures.
+ static const int kMaxCodePageSize = 512 * MB;
+
+ static LargePage* FromHeapObject(const HeapObject* o) {
+ return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
+ }
+
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
inline LargePage* next_page() {
@@ -910,12 +924,6 @@ class LargePage : public MemoryChunk {
void ClearOutOfLiveRangeSlots(Address free_start);
- // A limit to guarantee that we do not overflow typed slot offset in
- // the old to old remembered set.
- // Note that this limit is higher than what assembler already imposes on
- // x64 and ia32 architectures.
- static const int kMaxCodePageSize = 512 * MB;
-
private:
static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable);
@@ -941,6 +949,9 @@ class Space : public Malloced {
0;
}
+ static inline void MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
+
virtual ~Space() {
delete[] external_backing_store_bytes_;
external_backing_store_bytes_ = nullptr;
@@ -980,12 +991,6 @@ class Space : public Malloced {
// (e.g. see LargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); }
- // Returns amount of off-heap memory in-use by objects in this Space.
- virtual size_t ExternalBackingStoreBytes(
- ExternalBackingStoreType type) const {
- return external_backing_store_bytes_[type];
- }
-
// Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0;
@@ -1015,14 +1020,16 @@ class Space : public Malloced {
committed_ -= bytes;
}
- void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount) {
- external_backing_store_bytes_[type] += amount;
- }
- void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount) {
- DCHECK_GE(external_backing_store_bytes_[type], amount);
- external_backing_store_bytes_[type] -= amount;
+ inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ // Returns amount of off-heap memory in-use by objects in this Space.
+ virtual size_t ExternalBackingStoreBytes(
+ ExternalBackingStoreType type) const {
+ return external_backing_store_bytes_[type];
}
V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
@@ -1074,94 +1081,6 @@ class MemoryChunkValidator {
};
-// ----------------------------------------------------------------------------
-// All heap objects containing executable code (code objects) must be allocated
-// from a 2 GB range of memory, so that they can call each other using 32-bit
-// displacements. This happens automatically on 32-bit platforms, where 32-bit
-// displacements cover the entire 4GB virtual address space. On 64-bit
-// platforms, we support this using the CodeRange object, which reserves and
-// manages a range of virtual memory.
-class CodeRange {
- public:
- CodeRange(Isolate* isolate, size_t requested_size);
- ~CodeRange();
-
- bool valid() { return virtual_memory_.IsReserved(); }
- Address start() {
- DCHECK(valid());
- return virtual_memory_.address();
- }
- size_t size() {
- DCHECK(valid());
- return virtual_memory_.size();
- }
- bool contains(Address address) {
- if (!valid()) return false;
- Address start = virtual_memory_.address();
- return start <= address && address < start + virtual_memory_.size();
- }
-
- // Allocates a chunk of memory from the large-object portion of
- // the code range. On platforms with no separate code range, should
- // not be called.
- V8_WARN_UNUSED_RESULT Address AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
- size_t* allocated);
- void FreeRawMemory(Address buf, size_t length);
-
- private:
- class FreeBlock {
- public:
- FreeBlock() : start(0), size(0) {}
- FreeBlock(Address start_arg, size_t size_arg)
- : start(start_arg), size(size_arg) {
- DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
- DCHECK(size >= static_cast<size_t>(Page::kPageSize));
- }
- FreeBlock(void* start_arg, size_t size_arg)
- : start(reinterpret_cast<Address>(start_arg)), size(size_arg) {
- DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
- DCHECK(size >= static_cast<size_t>(Page::kPageSize));
- }
-
- Address start;
- size_t size;
- };
-
- // Finds a block on the allocation list that contains at least the
- // requested amount of memory. If none is found, sorts and merges
- // the existing free memory blocks, and searches again.
- // If none can be found, returns false.
- bool GetNextAllocationBlock(size_t requested);
- // Compares the start addresses of two free blocks.
- static bool CompareFreeBlockAddress(const FreeBlock& left,
- const FreeBlock& right);
- bool ReserveBlock(const size_t requested_size, FreeBlock* block);
- void ReleaseBlock(const FreeBlock* block);
-
- Isolate* isolate_;
-
- // The reserved range of virtual memory that all code objects are put in.
- VirtualMemory virtual_memory_;
-
- // The global mutex guards free_list_ and allocation_list_ as GC threads may
- // access both lists concurrently to the main thread.
- base::Mutex code_range_mutex_;
-
- // Freed blocks of memory are added to the free list. When the allocation
- // list is exhausted, the free list is sorted and merged to make the new
- // allocation list.
- std::vector<FreeBlock> free_list_;
-
- // Memory is allocated from the free blocks on the allocation list.
- // The block at current_allocation_block_index_ is the current block.
- std::vector<FreeBlock> allocation_list_;
- size_t current_allocation_block_index_;
- size_t requested_code_range_size_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeRange);
-};
-
// The process-wide singleton that keeps track of code range regions with the
// intention to reuse free code range regions as a workaround for CFG memory
// leaks (see crbug.com/870054).
@@ -1169,9 +1088,9 @@ class CodeRangeAddressHint {
public:
// Returns the most recently freed code range start address for the given
// size. If there is no such entry, then a random address is returned.
- V8_EXPORT_PRIVATE void* GetAddressHint(size_t code_range_size);
+ V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
- V8_EXPORT_PRIVATE void NotifyFreedCodeRange(void* code_range_start,
+ V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
size_t code_range_size);
private:
@@ -1180,7 +1099,7 @@ class CodeRangeAddressHint {
// addresses. There should be O(1) different code range sizes.
// The length of each array is limited by the peak number of code ranges,
// which should be also O(1).
- std::map<size_t, std::vector<void*>> recently_freed_;
+ std::unordered_map<size_t, std::vector<Address>> recently_freed_;
};
class SkipList {
@@ -1211,7 +1130,7 @@ class SkipList {
}
static inline int RegionNumber(Address addr) {
- return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2;
+ return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
}
static void Update(Address addr, int size) {
@@ -1422,16 +1341,11 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
- Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
- VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
void* hint, VirtualMemory* controller);
- bool CommitMemory(Address addr, size_t size);
-
- void FreeMemory(VirtualMemory* reservation, Executability executable);
- void FreeMemory(Address addr, size_t size, Executability executable);
+ void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation.
@@ -1440,23 +1354,19 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free, Address new_area_end);
- // Commit a contiguous block of memory from the initial chunk. Assumes that
- // the address is not kNullAddress, the size is greater than zero, and that
- // the block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool CommitBlock(Address start, size_t size);
-
// Checks if an allocated MemoryChunk was intended to be used for executable
// memory.
bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
return executable_memory_.find(chunk) != executable_memory_.end();
}
- // Uncommit a contiguous block of memory [start..(start+size)[.
- // start is not kNullAddress, the size is greater than zero, and the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool UncommitBlock(Address start, size_t size);
+ // Commit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool CommitMemory(VirtualMemory* reservation);
+
+ // Uncommit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool UncommitMemory(VirtualMemory* reservation);
// Zaps a contiguous block of memory [start..(start+size)[ with
// a given zap value.
@@ -1467,10 +1377,38 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
size_t commit_size,
size_t reserved_size);
- CodeRange* code_range() { return code_range_; }
+ // Page allocator instance for allocating non-executable pages.
+ // Guaranteed to be a valid pointer.
+ v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
+
+ // Page allocator instance for allocating executable pages.
+ // Guaranteed to be a valid pointer.
+ v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
+
+ // Returns page allocator suitable for allocating pages with requested
+ // executability.
+ v8::PageAllocator* page_allocator(Executability executable) {
+ return executable == EXECUTABLE ? code_page_allocator_
+ : data_page_allocator_;
+ }
+
+ // A region of memory that may contain executable code including reserved
+ // OS page with read-write access in the beginning.
+ const base::AddressRegion& code_range() const {
+ // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
+ DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
+ DCHECK_IMPLIES(!code_range_.is_empty(),
+ code_range_.contains(code_page_allocator_instance_->begin(),
+ code_page_allocator_instance_->size()));
+ return code_range_;
+ }
+
Unmapper* unmapper() { return &unmapper_; }
private:
+ void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
+ size_t requested);
+
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk);
@@ -1518,7 +1456,43 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
Isolate* isolate_;
- CodeRange* code_range_;
+
+ // This object controls virtual space reserved for V8 heap instance.
+ // Depending on the configuration it may contain the following:
+ // - no reservation (on 32-bit architectures)
+ // - code range reservation used by bounded code page allocator (on 64-bit
+ // architectures without pointers compression in V8 heap)
+ // - data + code range reservation (on 64-bit architectures with pointers
+ // compression in V8 heap)
+ VirtualMemory heap_reservation_;
+
+ // Page allocator used for allocating data pages. Depending on the
+ // configuration it may be a page allocator instance provided by v8::Platform
+ // or a BoundedPageAllocator (when pointer compression is enabled).
+ v8::PageAllocator* data_page_allocator_;
+
+ // Page allocator used for allocating code pages. Depending on the
+ // configuration it may be a page allocator instance provided by v8::Platform
+ // or a BoundedPageAllocator (when pointer compression is enabled or
+ // on those 64-bit architectures where pc-relative 32-bit displacement
+ // can be used for call and jump instructions).
+ v8::PageAllocator* code_page_allocator_;
+
+ // A part of the |heap_reservation_| that may contain executable code
+ // including reserved page with read-write access in the beginning.
+ // See details below.
+ base::AddressRegion code_range_;
+
+ // This unique pointer owns the instance of bounded code allocator
+ // that controls executable pages allocation. It does not control the
+ // optionally existing page in the beginning of the |code_range_|.
+ // So, summarizing all above, the following conditions hold:
+ // 1) |heap_reservation_| >= |code_range_|
+ // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
+ // 3) |heap_reservation_| is AllocatePageSize()-aligned
+ // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
+ // 5) |code_range_| is CommitPageSize()-aligned
+ std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
// Maximum space size in bytes.
size_t capacity_;
@@ -1542,7 +1516,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Data structure to remember allocated executable memory chunks.
std::unordered_set<MemoryChunk*> executable_memory_;
- friend class heap::TestCodeRangeScope;
+ friend class heap::TestCodePageAllocatorScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
@@ -1567,7 +1541,7 @@ MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
public:
- virtual ~ObjectIterator() {}
+ virtual ~ObjectIterator() = default;
virtual HeapObject* Next() = 0;
};
@@ -1701,7 +1675,7 @@ class LinearAllocationArea {
// functions increase or decrease one of the non-capacity stats in conjunction
// with capacity, or else they always balance increases and decreases to the
// non-capacity stats.
-class AllocationStats BASE_EMBEDDED {
+class AllocationStats {
public:
AllocationStats() { Clear(); }
@@ -2604,8 +2578,8 @@ class NewSpace : public SpaceWithLinearArea {
public:
typedef PageIterator iterator;
- NewSpace(Heap* heap, size_t initial_semispace_capacity,
- size_t max_semispace_capacity);
+ NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
+ size_t initial_semispace_capacity, size_t max_semispace_capacity);
~NewSpace() override { TearDown(); }
@@ -3037,6 +3011,8 @@ class LargeObjectSpace : public Space {
void RemoveChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page, Address free_start);
+ void PromoteNewLargeObject(LargePage* page);
+
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
// Checks whether an address is in the object area in this space. Iterates
@@ -3046,6 +3022,9 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
+ void Register(LargePage* page, size_t object_size);
+ void Unregister(LargePage* page, size_t object_size);
+
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
}
@@ -3094,6 +3073,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
// Available bytes for objects in this space.
size_t Available() override;
+
+ void Flip();
};
class LargeObjectIterator : public ObjectIterator {
@@ -3108,7 +3089,7 @@ class LargeObjectIterator : public ObjectIterator {
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
-class MemoryChunkIterator BASE_EMBEDDED {
+class MemoryChunkIterator {
public:
inline explicit MemoryChunkIterator(Heap* heap);
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index b428a82046..f737eb099d 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -31,15 +31,15 @@ StoreBuffer::StoreBuffer(Heap* heap)
}
void StoreBuffer::SetUp() {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
const size_t requested_size = kStoreBufferSize * kStoreBuffers;
// Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// use a bit test to detect the ends of the buffers.
const size_t alignment =
- std::max<size_t>(kStoreBufferSize, AllocatePageSize());
+ std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
- VirtualMemory reservation;
- if (!AlignedAllocVirtualMemory(requested_size, alignment, hint,
- &reservation)) {
+ VirtualMemory reservation(page_allocator, requested_size, hint, alignment);
+ if (!reservation.IsReserved()) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index d2c0f9b75f..4dbb471b7a 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -123,7 +123,7 @@ class StoreBuffer {
: CancelableTask(isolate),
store_buffer_(store_buffer),
tracer_(isolate->heap()->tracer()) {}
- virtual ~Task() {}
+ ~Task() override = default;
private:
void RunInternal() override {
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 9e622c3385..4f5ad18bec 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -76,7 +76,7 @@ class Sweeper::SweeperTask final : public CancelableTask {
space_to_start_(space_to_start),
tracer_(isolate->heap()->tracer()) {}
- virtual ~SweeperTask() {}
+ ~SweeperTask() override = default;
private:
void RunInternal() final {
@@ -111,7 +111,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
: CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
- virtual ~IncrementalSweeperTask() {}
+ ~IncrementalSweeperTask() override = default;
private:
void RunInternal() final {
@@ -447,10 +447,11 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
void Sweeper::ScheduleIncrementalSweepingTask() {
if (!incremental_sweeper_pending_) {
incremental_sweeper_pending_ = true;
- IncrementalSweeperTask* task =
- new IncrementalSweeperTask(heap_->isolate(), this);
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
- V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
+ auto taskrunner =
+ V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+ taskrunner->PostTask(
+ base::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
}
}
@@ -530,7 +531,7 @@ class Sweeper::IterabilityTask final : public CancelableTask {
pending_iterability_task_(pending_iterability_task),
tracer_(isolate->heap()->tracer()) {}
- virtual ~IterabilityTask() {}
+ ~IterabilityTask() override = default;
private:
void RunInternal() final {
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 1ce23129e5..4598395642 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -323,6 +323,10 @@ void Assembler::deserialization_set_target_internal_reference_at(
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+#ifdef DEBUG
+ AddUsedRegister(index);
+ AddUsedRegister(base);
+#endif
DCHECK_EQ(len_, 1);
DCHECK_EQ(scale & -4, 0);
// Use SIB with no index register only for base esp.
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 38b65c583f..ff589c820b 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -55,6 +55,7 @@
#include "src/deoptimizer.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
+#include "src/string-constants.h"
#include "src/v8.h"
namespace v8 {
@@ -76,6 +77,13 @@ Immediate Immediate::EmbeddedCode(CodeStub* stub) {
return result;
}
+Immediate Immediate::EmbeddedStringConstant(const StringConstantBase* str) {
+ Immediate result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
@@ -300,6 +308,7 @@ Register Operand::reg() const {
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
@@ -311,6 +320,12 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
+ case HeapObjectRequest::kStringConstant: {
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
+ }
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
Memory<Handle<Object>>(pc) = object;
@@ -486,6 +501,7 @@ void Assembler::pushad() {
void Assembler::popad() {
EnsureSpace ensure_space(this);
+ AssertIsAddressable(ebx);
EMIT(0x61);
}
@@ -522,6 +538,7 @@ void Assembler::push_imm32(int32_t imm32) {
void Assembler::push(Register src) {
+ AssertIsAddressable(src);
EnsureSpace ensure_space(this);
EMIT(0x50 | src.code());
}
@@ -534,6 +551,7 @@ void Assembler::push(Operand src) {
void Assembler::pop(Register dst) {
+ AssertIsAddressable(dst);
DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
EMIT(0x58 | dst.code());
@@ -605,6 +623,7 @@ void Assembler::mov_w(Operand dst, const Immediate& src) {
void Assembler::mov(Register dst, int32_t imm32) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(imm32);
@@ -612,12 +631,14 @@ void Assembler::mov(Register dst, int32_t imm32) {
void Assembler::mov(Register dst, const Immediate& x) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(x);
}
void Assembler::mov(Register dst, Handle<HeapObject> handle) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(handle);
@@ -631,6 +652,8 @@ void Assembler::mov(Register dst, Operand src) {
void Assembler::mov(Register dst, Register src) {
+ AssertIsAddressable(src);
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x89);
EMIT(0xC0 | src.code() << 3 | dst.code());
@@ -735,6 +758,8 @@ void Assembler::stos() {
void Assembler::xchg(Register dst, Register src) {
+ AssertIsAddressable(src);
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
EMIT(0x90 | (src == eax ? dst.code() : src.code()));
@@ -965,6 +990,7 @@ void Assembler::cmpw_ax(Operand op) {
void Assembler::dec_b(Register dst) {
+ AssertIsAddressable(dst);
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0xFE);
@@ -979,6 +1005,7 @@ void Assembler::dec_b(Operand dst) {
void Assembler::dec(Register dst) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x48 | dst.code());
}
@@ -1009,6 +1036,7 @@ void Assembler::div(Operand src) {
void Assembler::imul(Register reg) {
+ AssertIsAddressable(reg);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE8 | reg.code());
@@ -1041,6 +1069,7 @@ void Assembler::imul(Register dst, Operand src, int32_t imm32) {
void Assembler::inc(Register dst) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x40 | dst.code());
}
@@ -1059,6 +1088,7 @@ void Assembler::lea(Register dst, Operand src) {
void Assembler::mul(Register src) {
+ AssertIsAddressable(src);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE0 | src.code());
@@ -1066,12 +1096,14 @@ void Assembler::mul(Register src) {
void Assembler::neg(Register dst) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xD8 | dst.code());
}
void Assembler::neg(Operand dst) {
+ AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(ebx, dst);
@@ -1079,6 +1111,7 @@ void Assembler::neg(Operand dst) {
void Assembler::not_(Register dst) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xD0 | dst.code());
@@ -1115,6 +1148,7 @@ void Assembler::or_(Operand dst, Register src) {
void Assembler::rcl(Register dst, uint8_t imm8) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1129,6 +1163,7 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
void Assembler::rcr(Register dst, uint8_t imm8) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1286,6 +1321,7 @@ void Assembler::test(Register reg, const Immediate& imm) {
return;
}
+ AssertIsAddressable(reg);
EnsureSpace ensure_space(this);
// This is not using emit_arith because test doesn't support
// sign-extension of 8-bit operands.
@@ -1326,6 +1362,7 @@ void Assembler::test(Operand op, const Immediate& imm) {
}
void Assembler::test_b(Register reg, Immediate imm8) {
+ AssertIsAddressable(reg);
DCHECK(imm8.is_uint8());
EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte
@@ -1355,6 +1392,7 @@ void Assembler::test_b(Operand op, Immediate imm8) {
}
void Assembler::test_w(Register reg, Immediate imm16) {
+ AssertIsAddressable(reg);
DCHECK(imm16.is_int16() || imm16.is_uint16());
EnsureSpace ensure_space(this);
if (reg == eax) {
@@ -1411,6 +1449,7 @@ void Assembler::xor_(Operand dst, const Immediate& x) {
}
void Assembler::bswap(Register dst) {
+ AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xC8 + dst.code());
@@ -1839,6 +1878,7 @@ void Assembler::fld_d(Operand adr) {
}
void Assembler::fstp_s(Operand adr) {
+ AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(ebx, adr);
@@ -1851,6 +1891,7 @@ void Assembler::fst_s(Operand adr) {
}
void Assembler::fstp_d(Operand adr) {
+ AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ebx, adr);
@@ -1875,6 +1916,7 @@ void Assembler::fild_d(Operand adr) {
}
void Assembler::fistp_s(Operand adr) {
+ AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ebx, adr);
@@ -2162,6 +2204,7 @@ void Assembler::sahf() {
void Assembler::setcc(Condition cc, Register reg) {
+ AssertIsAddressable(reg);
DCHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2358,7 +2401,7 @@ void Assembler::maxps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::cmpps(XMMRegister dst, Operand src, int8_t cmp) {
+void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xC2);
@@ -2617,7 +2660,7 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
-void Assembler::psllw(XMMRegister reg, int8_t shift) {
+void Assembler::psllw(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2626,7 +2669,7 @@ void Assembler::psllw(XMMRegister reg, int8_t shift) {
EMIT(shift);
}
-void Assembler::pslld(XMMRegister reg, int8_t shift) {
+void Assembler::pslld(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2635,7 +2678,7 @@ void Assembler::pslld(XMMRegister reg, int8_t shift) {
EMIT(shift);
}
-void Assembler::psrlw(XMMRegister reg, int8_t shift) {
+void Assembler::psrlw(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2644,7 +2687,7 @@ void Assembler::psrlw(XMMRegister reg, int8_t shift) {
EMIT(shift);
}
-void Assembler::psrld(XMMRegister reg, int8_t shift) {
+void Assembler::psrld(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2653,7 +2696,7 @@ void Assembler::psrld(XMMRegister reg, int8_t shift) {
EMIT(shift);
}
-void Assembler::psraw(XMMRegister reg, int8_t shift) {
+void Assembler::psraw(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2662,7 +2705,7 @@ void Assembler::psraw(XMMRegister reg, int8_t shift) {
EMIT(shift);
}
-void Assembler::psrad(XMMRegister reg, int8_t shift) {
+void Assembler::psrad(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2671,7 +2714,7 @@ void Assembler::psrad(XMMRegister reg, int8_t shift) {
EMIT(shift);
}
-void Assembler::psllq(XMMRegister reg, int8_t shift) {
+void Assembler::psllq(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2689,8 +2732,7 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::psrlq(XMMRegister reg, int8_t shift) {
+void Assembler::psrlq(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2757,7 +2799,7 @@ void Assembler::palignr(XMMRegister dst, Operand src, uint8_t mask) {
EMIT(mask);
}
-void Assembler::pextrb(Operand dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrb(Operand dst, XMMRegister src, uint8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2768,7 +2810,7 @@ void Assembler::pextrb(Operand dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pextrw(Operand dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrw(Operand dst, XMMRegister src, uint8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2779,7 +2821,7 @@ void Assembler::pextrw(Operand dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pextrd(Operand dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrd(Operand dst, XMMRegister src, uint8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2790,7 +2832,7 @@ void Assembler::pextrd(Operand dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::insertps(XMMRegister dst, Operand src, int8_t offset) {
+void Assembler::insertps(XMMRegister dst, Operand src, uint8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2801,7 +2843,7 @@ void Assembler::insertps(XMMRegister dst, Operand src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t offset) {
+void Assembler::pinsrb(XMMRegister dst, Operand src, uint8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2812,7 +2854,7 @@ void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t offset) {
+void Assembler::pinsrw(XMMRegister dst, Operand src, uint8_t offset) {
DCHECK(is_uint8(offset));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2822,7 +2864,7 @@ void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrd(XMMRegister dst, Operand src, int8_t offset) {
+void Assembler::pinsrd(XMMRegister dst, Operand src, uint8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2933,7 +2975,7 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
}
void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t cmp) {
+ uint8_t cmp) {
vps(0xC2, dst, src1, src2);
EMIT(cmp);
}
@@ -2945,37 +2987,37 @@ void Assembler::vshufps(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(imm8);
}
-void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+void Assembler::vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(6);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
-void Assembler::vpslld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+void Assembler::vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(6);
vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
-void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(2);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
-void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+void Assembler::vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(2);
vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
-void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+void Assembler::vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(4);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
}
-void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
+void Assembler::vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(4);
vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG);
EMIT(imm8);
@@ -3008,41 +3050,41 @@ void Assembler::vpalignr(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(mask);
}
-void Assembler::vpextrb(Operand dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrb(Operand dst, XMMRegister src, uint8_t offset) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpextrw(Operand dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrw(Operand dst, XMMRegister src, uint8_t offset) {
vinstr(0x15, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpextrd(Operand dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrd(Operand dst, XMMRegister src, uint8_t offset) {
vinstr(0x16, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
void Assembler::vinsertps(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t offset) {
+ uint8_t offset) {
vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t offset) {
+ uint8_t offset) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
void Assembler::vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t offset) {
+ uint8_t offset) {
vinstr(0xC4, dst, src1, src2, k66, k0F, kWIG);
EMIT(offset);
}
void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t offset) {
+ uint8_t offset) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
@@ -3143,6 +3185,7 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
}
void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
+ AllowExplicitEbxAccessScope accessing_xmm_register(this);
Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
@@ -3154,11 +3197,13 @@ void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
+ AssertIsAddressable(dst);
EMIT(0xC0 | dst.code() << 3 | src.code());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ AssertIsAddressable(src);
EMIT(0xC0 | (dst.code() << 3) | src.code());
}
@@ -3244,6 +3289,7 @@ void Assembler::GrowBuffer() {
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+ AssertIsAddressable(dst);
DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode
DCHECK(is_uint8(imm8));
DCHECK_EQ(op1 & 0x01, 0); // should be 8bit operation
@@ -3254,6 +3300,7 @@ void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
+ AssertIsAddressable(dst);
DCHECK((0 <= sel) && (sel <= 7));
Register ireg = Register::from_code(sel);
if (x.is_int8()) {
@@ -3280,6 +3327,8 @@ void Assembler::emit_operand(XMMRegister reg, Operand adr) {
}
void Assembler::emit_operand(int code, Operand adr) {
+ AssertIsAddressable(adr);
+ AssertIsAddressable(Register::from_code(code));
// Isolate-independent code may not embed relocatable addresses.
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::CODE_TARGET);
@@ -3356,17 +3405,20 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- DCHECK(!RelocInfo::IsNone(rmode));
- if (options().disable_reloc_info_for_patching) return;
- // Don't record external references unless the heap will be serialized.
- if (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code()) {
- return;
- }
+ if (!ShouldRecordRelocInfo(rmode)) return;
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);
}
+#ifdef DEBUG
+void Assembler::AssertIsAddressable(const Operand& operand) {
+ DCHECK(is_ebx_addressable_ || !operand.UsesEbx());
+}
+
+void Assembler::AssertIsAddressable(const Register& reg) {
+ DCHECK(is_ebx_addressable_ || reg != ebx);
+}
+#endif // DEBUG
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 69d243a749..b721542f13 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -140,7 +140,6 @@ typedef XMMRegister Simd128Register;
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
-constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// Note that the bit values must match those used in actual instruction encoding
@@ -208,7 +207,7 @@ enum RoundingMode {
// -----------------------------------------------------------------------------
// Machine instruction Immediates
-class Immediate BASE_EMBEDDED {
+class Immediate {
public:
// Calls where x is an Address (uintptr_t) resolve to this overload.
inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NONE) {
@@ -224,6 +223,7 @@ class Immediate BASE_EMBEDDED {
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedCode(CodeStub* code);
+ static Immediate EmbeddedStringConstant(const StringConstantBase* str);
static Immediate CodeRelativeOffset(Label* label) {
return Immediate(label);
@@ -361,10 +361,17 @@ class V8_EXPORT_PRIVATE Operand {
// register.
Register reg() const;
+#ifdef DEBUG
+ bool UsesEbx() const { return uses_ebx_; }
+#endif // DEBUG
+
private:
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
inline void set_modrm(int mod, Register rm) {
+#ifdef DEBUG
+ AddUsedRegister(rm);
+#endif
DCHECK_EQ(mod & -4, 0);
buf_[0] = mod << 6 | rm.code();
len_ = 1;
@@ -391,12 +398,23 @@ class V8_EXPORT_PRIVATE Operand {
// Only valid if len_ > 4.
RelocInfo::Mode rmode_ = RelocInfo::NONE;
+#ifdef DEBUG
+ // TODO(v8:6666): Remove once kRootRegister support is complete.
+ bool uses_ebx_ = false;
+ void AddUsedRegister(Register reg) {
+ if (reg == ebx) uses_ebx_ = true;
+ }
+#endif // DEBUG
+
// TODO(clemensh): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
+// TODO(v8:6666): Re-enable globally once kRootRegister support is complete.
+#ifndef DEBUG
static_assert(sizeof(Operand) <= 2 * kPointerSize,
"Operand must be small enough to pass it by value");
+#endif
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
@@ -417,7 +435,7 @@ static_assert(sizeof(Operand) <= 2 * kPointerSize,
// |31.....2|1......0|
// [ next | type |
-class Displacement BASE_EMBEDDED {
+class Displacement {
public:
enum Type { UNCONDITIONAL_JUMP, CODE_RELATIVE, OTHER, CODE_ABSOLUTE };
@@ -667,7 +685,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void and_(Operand dst, Register src);
void and_(Operand dst, const Immediate& x);
- void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
+ void cmpb(Register reg, Immediate imm8) {
+ DCHECK(reg.is_byte_register());
+ cmpb(Operand(reg), imm8);
+ }
void cmpb(Operand op, Immediate imm8);
void cmpb(Register reg, Operand op);
void cmpb(Operand op, Register reg);
@@ -983,7 +1004,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void maxps(XMMRegister dst, Operand src);
void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
- void cmpps(XMMRegister dst, Operand src, int8_t cmp);
+ void cmpps(XMMRegister dst, Operand src, uint8_t cmp);
#define SSE_CMP_P(instr, imm8) \
void instr##ps(XMMRegister dst, XMMRegister src) { \
cmpps(dst, Operand(src), imm8); \
@@ -1088,15 +1109,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
void extractps(Register dst, XMMRegister src, byte imm8);
- void psllw(XMMRegister reg, int8_t shift);
- void pslld(XMMRegister reg, int8_t shift);
- void psrlw(XMMRegister reg, int8_t shift);
- void psrld(XMMRegister reg, int8_t shift);
- void psraw(XMMRegister reg, int8_t shift);
- void psrad(XMMRegister reg, int8_t shift);
- void psllq(XMMRegister reg, int8_t shift);
+ void psllw(XMMRegister reg, uint8_t shift);
+ void pslld(XMMRegister reg, uint8_t shift);
+ void psrlw(XMMRegister reg, uint8_t shift);
+ void psrld(XMMRegister reg, uint8_t shift);
+ void psraw(XMMRegister reg, uint8_t shift);
+ void psrad(XMMRegister reg, uint8_t shift);
+ void psllq(XMMRegister reg, uint8_t shift);
void psllq(XMMRegister dst, XMMRegister src);
- void psrlq(XMMRegister reg, int8_t shift);
+ void psrlq(XMMRegister reg, uint8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
@@ -1122,36 +1143,36 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void palignr(XMMRegister dst, Operand src, uint8_t mask);
- void pextrb(Register dst, XMMRegister src, int8_t offset) {
+ void pextrb(Register dst, XMMRegister src, uint8_t offset) {
pextrb(Operand(dst), src, offset);
}
- void pextrb(Operand dst, XMMRegister src, int8_t offset);
+ void pextrb(Operand dst, XMMRegister src, uint8_t offset);
// Use SSE4_1 encoding for pextrw reg, xmm, imm8 for consistency
- void pextrw(Register dst, XMMRegister src, int8_t offset) {
+ void pextrw(Register dst, XMMRegister src, uint8_t offset) {
pextrw(Operand(dst), src, offset);
}
- void pextrw(Operand dst, XMMRegister src, int8_t offset);
- void pextrd(Register dst, XMMRegister src, int8_t offset) {
+ void pextrw(Operand dst, XMMRegister src, uint8_t offset);
+ void pextrd(Register dst, XMMRegister src, uint8_t offset) {
pextrd(Operand(dst), src, offset);
}
- void pextrd(Operand dst, XMMRegister src, int8_t offset);
+ void pextrd(Operand dst, XMMRegister src, uint8_t offset);
- void insertps(XMMRegister dst, XMMRegister src, int8_t offset) {
+ void insertps(XMMRegister dst, XMMRegister src, uint8_t offset) {
insertps(dst, Operand(src), offset);
}
- void insertps(XMMRegister dst, Operand src, int8_t offset);
- void pinsrb(XMMRegister dst, Register src, int8_t offset) {
+ void insertps(XMMRegister dst, Operand src, uint8_t offset);
+ void pinsrb(XMMRegister dst, Register src, uint8_t offset) {
pinsrb(dst, Operand(src), offset);
}
- void pinsrb(XMMRegister dst, Operand src, int8_t offset);
- void pinsrw(XMMRegister dst, Register src, int8_t offset) {
+ void pinsrb(XMMRegister dst, Operand src, uint8_t offset);
+ void pinsrw(XMMRegister dst, Register src, uint8_t offset) {
pinsrw(dst, Operand(src), offset);
}
- void pinsrw(XMMRegister dst, Operand src, int8_t offset);
- void pinsrd(XMMRegister dst, Register src, int8_t offset) {
+ void pinsrw(XMMRegister dst, Operand src, uint8_t offset);
+ void pinsrd(XMMRegister dst, Register src, uint8_t offset) {
pinsrd(dst, Operand(src), offset);
}
- void pinsrd(XMMRegister dst, Operand src, int8_t offset);
+ void pinsrd(XMMRegister dst, Operand src, uint8_t offset);
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1414,12 +1435,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
- void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8);
- void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8);
- void vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8);
- void vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8);
- void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8);
- void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8);
+ void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8);
+ void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8);
+ void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8);
+ void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8);
+ void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8);
+ void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshufhw(dst, Operand(src), shuffle);
@@ -1446,40 +1467,40 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask);
- void vpextrb(Register dst, XMMRegister src, int8_t offset) {
+ void vpextrb(Register dst, XMMRegister src, uint8_t offset) {
vpextrb(Operand(dst), src, offset);
}
- void vpextrb(Operand dst, XMMRegister src, int8_t offset);
- void vpextrw(Register dst, XMMRegister src, int8_t offset) {
+ void vpextrb(Operand dst, XMMRegister src, uint8_t offset);
+ void vpextrw(Register dst, XMMRegister src, uint8_t offset) {
vpextrw(Operand(dst), src, offset);
}
- void vpextrw(Operand dst, XMMRegister src, int8_t offset);
- void vpextrd(Register dst, XMMRegister src, int8_t offset) {
+ void vpextrw(Operand dst, XMMRegister src, uint8_t offset);
+ void vpextrd(Register dst, XMMRegister src, uint8_t offset) {
vpextrd(Operand(dst), src, offset);
}
- void vpextrd(Operand dst, XMMRegister src, int8_t offset);
+ void vpextrd(Operand dst, XMMRegister src, uint8_t offset);
void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- int8_t offset) {
+ uint8_t offset) {
vinsertps(dst, src1, Operand(src2), offset);
}
void vinsertps(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t offset);
+ uint8_t offset);
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2,
- int8_t offset) {
+ uint8_t offset) {
vpinsrb(dst, src1, Operand(src2), offset);
}
- void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2,
- int8_t offset) {
+ uint8_t offset) {
vpinsrw(dst, src1, Operand(src2), offset);
}
- void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2,
- int8_t offset) {
+ uint8_t offset) {
vpinsrd(dst, src1, Operand(src2), offset);
}
- void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
vcvtdq2ps(dst, Operand(src));
@@ -1612,7 +1633,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
- void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp);
+ void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp);
#define AVX_CMP_P(instr, imm8) \
void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vcmpps(dst, src1, Operand(src2), imm8); \
@@ -1757,6 +1778,30 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
UNREACHABLE();
}
+ // Temporary helper data structures while adding kRootRegister support to ia32
+ // builtins. The SupportsRootRegisterScope is intended to mark each builtin
+ // and helper that fully supports the root register, i.e. that does not
+ // clobber ebx. The AllowExplicitEbxAccessScope marks regions that are allowed
+ // to clobber ebx, e.g. when ebx is spilled and restored.
+ // TODO(v8:6666): Remove once kRootRegister is fully supported.
+ template <bool new_value>
+ class SetRootRegisterSupportScope final {
+ public:
+ explicit SetRootRegisterSupportScope(Assembler* assembler)
+ : assembler_(assembler), old_value_(assembler->is_ebx_addressable_) {
+ assembler_->is_ebx_addressable_ = new_value;
+ }
+ ~SetRootRegisterSupportScope() {
+ assembler_->is_ebx_addressable_ = old_value_;
+ }
+
+ private:
+ Assembler* assembler_;
+ const bool old_value_;
+ };
+ typedef SetRootRegisterSupportScope<false> SupportsRootRegisterScope;
+ typedef SetRootRegisterSupportScope<true> AllowExplicitEbxAccessScope;
+
protected:
void emit_sse_operand(XMMRegister reg, Operand adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
@@ -1765,6 +1810,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
byte* addr_at(int pos) { return buffer_ + pos; }
+#ifdef DEBUG
+ // TODO(v8:6666): Remove once kRootRegister is fully supported.
+ void AssertIsAddressable(const Register& reg);
+ void AssertIsAddressable(const Operand& operand);
+#else
+ // An empty inline definition to avoid slowing down release builds.
+ void AssertIsAddressable(const Register&) {}
+ void AssertIsAddressable(const Operand&) {}
+#endif // DEBUG
+ bool is_ebx_addressable_ = true;
private:
uint32_t long_at(int pos) {
@@ -1868,7 +1923,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// instructions and relocation information. The constructor makes
// sure that there is enough space and (in debug mode) the destructor
// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 7bfc0875cb..63cd5a9621 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -47,6 +47,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ push(ebx);
__ InitializeRootRegister();
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
// Save copies of the top frame descriptor on the stack.
ExternalReference c_entry_fp =
@@ -95,6 +96,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ VerifyRootRegister();
// Check if the current stack frame is marked as the outermost JS frame.
+ Assembler::AllowExplicitEbxAccessScope exiting_js(masm);
__ pop(ebx);
__ cmp(ebx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
@@ -132,6 +134,8 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// Save volatile registers.
const int kNumSavedRegisters = 3;
__ push(eax);
@@ -180,9 +184,8 @@ static void PrepareCallApiFunction(MacroAssembler* masm, int argc) {
}
}
-
// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Clobbers ebx, edi and
+// from handle and propagates exceptions. Clobbers esi, edi and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
static void CallApiFunctionAndReturn(MacroAssembler* masm,
@@ -191,6 +194,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Operand thunk_last_arg, int stack_space,
Operand* stack_space_operand,
Operand return_value_operand) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
Isolate* isolate = masm->isolate();
ExternalReference next_address =
@@ -202,7 +206,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
DCHECK(edx == function_address);
// Allocate HandleScope in callee-save registers.
- __ mov(ebx, __ StaticVariable(next_address));
+ __ mov(esi, __ StaticVariable(next_address));
__ mov(edi, __ StaticVariable(limit_address));
__ add(__ StaticVariable(level_address), Immediate(1));
@@ -256,7 +260,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- __ mov(__ StaticVariable(next_address), ebx);
+ __ mov(__ StaticVariable(next_address), esi);
__ sub(__ StaticVariable(level_address), Immediate(1));
__ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
__ cmp(edi, __ StaticVariable(limit_address));
@@ -265,7 +269,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
if (stack_space_operand != nullptr) {
- __ mov(ebx, *stack_space_operand);
+ __ mov(edx, *stack_space_operand);
}
__ LeaveApiExitFrame();
@@ -314,7 +318,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (stack_space_operand != nullptr) {
DCHECK_EQ(0, stack_space);
__ pop(ecx);
- __ add(esp, ebx);
+ __ add(esp, edx);
__ jmp(ecx);
} else {
__ ret(stack_space * kPointerSize);
@@ -339,8 +343,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// ----------- S t a t e -------------
- // -- ebx : call_data
+ // -- eax : call_data
// -- ecx : holder
// -- edx : api_function_address
// -- esi : context
@@ -352,10 +358,10 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Register call_data = ebx;
+ Register call_data = eax;
Register holder = ecx;
Register api_function_address = edx;
- Register return_address = eax;
+ Register return_address = edi;
typedef FunctionCallbackArguments FCA;
@@ -370,15 +376,15 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ pop(return_address);
// new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// call data
__ push(call_data);
// return value
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// return value default
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// isolate
__ push(Immediate(ExternalReference::isolate_address(isolate())));
// holder
@@ -429,6 +435,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm);
+
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
@@ -443,15 +451,15 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = ebx;
+ Register scratch = edi;
DCHECK(!AreAliased(receiver, holder, callback, scratch));
__ pop(scratch); // Pop return address to extend the frame.
__ push(receiver);
__ push(FieldOperand(callback, AccessorInfo::kDataOffset));
- __ PushRoot(Heap::kUndefinedValueRootIndex); // ReturnValue
+ __ PushRoot(RootIndex::kUndefinedValue); // ReturnValue
// ReturnValue default value
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
__ push(Immediate(ExternalReference::isolate_address(isolate())));
__ push(holder);
__ push(Immediate(Smi::kZero)); // should_throw_on_error -> false
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 2a66676c9c..78790b75d0 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -7,7 +7,6 @@
#include "src/codegen.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -15,13 +14,14 @@ namespace internal {
#define __ masm.
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+UnaryMathFunction CreateSqrtFunction() {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
// Move double input into registers.
@@ -35,12 +35,13 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
}
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
@@ -129,14 +130,14 @@ class LabelConverter {
byte* buffer_;
};
-
-MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
+MemMoveFunction CreateMemMoveFunction() {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
@@ -447,10 +448,11 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
MemMoveEmitPopAndReturn(&masm);
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index c9cc71f161..2c68241fc2 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -18,6 +18,8 @@ const int Deoptimizer::table_entry_size_ = 10;
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
+ Assembler::SupportsRootRegisterScope supports_root_register(masm());
+
GeneratePrologue();
// Save all general purpose registers before messing with them.
@@ -53,7 +55,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
kNumberOfRegisters * kPointerSize + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
- __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
+ __ mov(esi, Operand(esp, kSavedRegistersAreaSize));
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
@@ -74,7 +76,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize),
Immediate(static_cast<int>(deopt_kind())));
- __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
+ __ mov(Operand(esp, 2 * kPointerSize), esi); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
@@ -86,19 +88,19 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
- __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
+ __ mov(esi, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(ebx, offset));
+ __ pop(Operand(esi, offset));
}
int float_regs_offset = FrameDescription::float_registers_offset();
// Fill in the float input registers.
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
int dst_offset = i * kFloatSize + float_regs_offset;
- __ pop(Operand(ebx, dst_offset));
+ __ pop(Operand(esi, dst_offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
@@ -108,7 +110,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize;
__ movsd(xmm0, Operand(esp, src_offset));
- __ movsd(Operand(ebx, dst_offset), xmm0);
+ __ movsd(Operand(esi, dst_offset), xmm0);
}
// Clear FPU all exceptions.
@@ -121,13 +123,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
__ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+ __ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
@@ -140,7 +142,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute the output frame in the deoptimizer.
__ push(eax);
- __ PrepareCallCFunction(1, ebx);
+ __ PrepareCallCFunction(1, esi);
__ mov(Operand(esp, 0 * kPointerSize), eax);
{
AllowExternalCallThatCantCauseGC scope(masm());
@@ -153,20 +155,21 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
- // Outer loop state: eax = current FrameDescription**, edx = one past the
- // last FrameDescription**.
+ // Outer loop state: eax = current FrameDescription**, edx = one
+ // past the last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
- // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
- __ mov(ebx, Operand(eax, 0));
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ // Inner loop state: esi = current FrameDescription*, ecx = loop
+ // index.
+ __ mov(esi, Operand(eax, 0));
+ __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
- __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
@@ -180,20 +183,21 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(ebx, src_offset));
+ __ movsd(xmm_reg, Operand(esi, src_offset));
}
// Push pc and continuation from the last output frame.
- __ push(Operand(ebx, FrameDescription::pc_offset()));
- __ push(Operand(ebx, FrameDescription::continuation_offset()));
+ __ push(Operand(esi, FrameDescription::pc_offset()));
+ __ push(Operand(esi, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(ebx, offset));
+ __ push(Operand(esi, offset));
}
// Restore the registers from the stack.
+ Assembler::AllowExplicitEbxAccessScope restoring_spilled_value(masm());
__ popad();
// Return to the continuation point.
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 71205b10d0..e2c04f7525 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -13,13 +13,7 @@ const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
CallInterfaceDescriptorData* data, int register_parameter_count) {
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
- // TODO(jgruber,v8:6666): Keep kRootRegister free unconditionally.
constexpr Register default_stub_registers[] = {eax, ecx, edx, edi};
- DCHECK(!AreAliased(eax, ecx, edx, edi, kRootRegister));
-#else
- constexpr Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
-#endif
STATIC_ASSERT(arraysize(default_stub_registers) == kMaxBuiltinRegisterParams);
CHECK_LE(static_cast<size_t>(register_parameter_count),
arraysize(default_stub_registers));
@@ -29,7 +23,7 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- static const Register default_stub_registers[] = {ebx, ecx, edx, edi,
+ static const Register default_stub_registers[] = {ecx, edx, edi,
kReturnRegister0};
data->RestrictAllocatableRegisters(default_stub_registers,
@@ -49,32 +43,31 @@ const Register LoadDescriptor::ReceiverRegister() { return edx; }
const Register LoadDescriptor::NameRegister() { return ecx; }
const Register LoadDescriptor::SlotRegister() { return eax; }
-const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return no_reg; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
-const Register StoreDescriptor::ValueRegister() { return eax; }
-const Register StoreDescriptor::SlotRegister() { return edi; }
+const Register StoreDescriptor::ValueRegister() { return no_reg; }
+const Register StoreDescriptor::SlotRegister() { return no_reg; }
-const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
+const Register StoreWithVectorDescriptor::VectorRegister() { return no_reg; }
const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
+const Register StoreTransitionDescriptor::VectorRegister() { return no_reg; }
const Register StoreTransitionDescriptor::MapRegister() { return edi; }
const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-
+const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ebx};
+ Register registers[] = {ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -96,9 +89,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments (on the stack, not including receiver)
// edi : the target to call
- // ebx : arguments list (FixedArray)
// ecx : arguments list length (untagged)
- Register registers[] = {edi, eax, ebx, ecx};
+ // On the stack : arguments list (FixedArray)
+ Register registers[] = {edi, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -115,16 +108,16 @@ void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments (on the stack, not including receiver)
// edi : the target to call
- // ebx : the object to spread
- Register registers[] = {edi, eax, ebx};
+ // ecx : the object to spread
+ Register registers[] = {edi, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// edi : the target to call
- // ebx : the arguments list
- Register registers[] = {edi, ebx};
+ // edx : the arguments list
+ Register registers[] = {edi, edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -133,9 +126,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// eax : number of arguments (on the stack, not including receiver)
// edi : the target to call
// edx : the new target
- // ebx : arguments list (FixedArray)
// ecx : arguments list length (untagged)
- Register registers[] = {edi, edx, eax, ebx, ecx};
+ // On the stack : arguments list (FixedArray)
+ Register registers[] = {edi, edx, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -154,8 +147,8 @@ void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
// eax : number of arguments (on the stack, not including receiver)
// edi : the target to call
// edx : the new target
- // ebx : the object to spread
- Register registers[] = {edi, edx, eax, ebx};
+ // ecx : the object to spread
+ Register registers[] = {edi, edx, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -163,8 +156,8 @@ void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// edi : the target to call
// edx : the new target
- // ebx : the arguments list
- Register registers[] = {edi, edx, ebx};
+ // ecx : the arguments list
+ Register registers[] = {edi, edx, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -173,8 +166,9 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
// eax : number of arguments
// edx : the new target
// edi : the target to call
- // ebx : allocation site or undefined
- Register registers[] = {edi, edx, eax, ebx};
+ // ecx : allocation site or undefined
+ // TODO(jgruber): Remove the unused allocation site parameter.
+ Register registers[] = {edi, edx, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -202,13 +196,13 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // JSFunction
edx, // the new target
eax, // actual number of arguments
- ebx, // expected number of arguments
+ ecx, // expected number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -217,7 +211,7 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
JavaScriptFrame::context_register(), // callee context
- ebx, // call_data
+ eax, // call_data
ecx, // holder
edx, // api_function_address
};
@@ -236,7 +230,7 @@ void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
- ebx, // address of first argument
+ ecx, // address of first argument
edi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -246,9 +240,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
- edx, // new target
- edi, // constructor
- ebx, // allocation site feedback
ecx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -266,7 +257,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- ebx, // loaded new FP
+ eax, // loaded new FP
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index bb806edebd..82cea88ac4 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -51,7 +51,7 @@ MacroAssembler::MacroAssembler(Isolate* isolate,
#endif // V8_EMBEDDED_BUILTINS
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
// TODO(jgruber, v8:6666): Support loads through the root register once it
// exists.
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
@@ -67,22 +67,20 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
}
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
- mov(destination, Immediate(index));
+ mov(destination, Immediate(static_cast<int>(index)));
mov(destination,
StaticArray(destination, times_pointer_size, roots_array_start));
}
-void MacroAssembler::CompareRoot(Register with,
- Register scratch,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register with, Register scratch,
+ RootIndex index) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(index));
+ mov(scratch, Immediate(static_cast<int>(index)));
cmp(with, StaticArray(scratch, times_pointer_size, roots_array_start));
}
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register with, RootIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> object = isolate()->heap()->root_handle(index);
if (object->IsHeapObject()) {
@@ -92,7 +90,7 @@ void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
}
}
-void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Operand with, RootIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> object = isolate()->heap()->root_handle(index);
if (object->IsHeapObject()) {
@@ -102,7 +100,7 @@ void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
}
}
-void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+void MacroAssembler::PushRoot(RootIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> object = isolate()->heap()->root_handle(index);
if (object->IsHeapObject()) {
@@ -114,11 +112,12 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
+ DCHECK(!is_ebx_addressable_);
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
+ RootIndex::kBuiltinsConstantsTable));
// TODO(jgruber): LoadRoot should be a register-relative load once we have
// the kRootRegister.
- LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
mov(destination,
FieldOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
@@ -126,6 +125,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
+ DCHECK(!is_ebx_addressable_);
DCHECK(is_int32(offset));
// TODO(jgruber): Register-relative load once kRootRegister exists.
mov(destination, Immediate(ExternalReference::roots_array_start(isolate())));
@@ -135,6 +135,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ DCHECK(!is_ebx_addressable_);
// TODO(jgruber): Register-relative load once kRootRegister exists.
LoadRootRegisterOffset(destination, offset);
mov(destination, Operand(destination, 0));
@@ -326,8 +327,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -339,8 +338,6 @@ void TurboAssembler::CallRecordWriteStub(
pop(slot_parameter);
pop(object_parameter);
- mov(isolate_parameter,
- Immediate(ExternalReference::isolate_address(isolate())));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -410,8 +407,8 @@ void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
- mov(ebx, StaticVariable(restart_fp));
- test(ebx, ebx);
+ mov(eax, StaticVariable(restart_fp));
+ test(eax, eax);
j(not_zero, BUILTIN_CODE(isolate(), FrameDropperTrampoline),
RelocInfo::CODE_TARGET);
}
@@ -733,6 +730,9 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
+ STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
+ STATIC_ASSERT(esi == kContextRegister);
+
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
@@ -1035,6 +1035,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_near) {
+ DCHECK_IMPLIES(expected.is_reg(), expected.reg() == ecx);
+ DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax);
+
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
@@ -1053,7 +1056,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
definitely_matches = true;
} else {
*definitely_mismatches = true;
- mov(ebx, expected.immediate());
+ mov(ecx, expected.immediate());
}
}
} else {
@@ -1064,14 +1067,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
mov(eax, actual.immediate());
cmp(expected.reg(), actual.immediate());
j(equal, &invoke);
- DCHECK(expected.reg() == ebx);
+ DCHECK(expected.reg() == ecx);
} else if (expected.reg() != actual.reg()) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmp(expected.reg(), actual.reg());
j(equal, &invoke);
DCHECK(actual.reg() == eax);
- DCHECK(expected.reg() == ebx);
+ DCHECK(expected.reg() == ecx);
} else {
definitely_matches = true;
Move(eax, actual.reg());
@@ -1150,6 +1153,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(function == edi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
+ DCHECK_IMPLIES(expected.is_reg(), expected.reg() == ecx);
+ DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
@@ -1187,28 +1192,15 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(fun == edi);
- mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- movzx_w(ebx,
- FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+ movzx_w(ecx,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- ParameterCount expected(ebx);
+ ParameterCount expected(ecx);
InvokeFunctionCode(edi, new_target, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- DCHECK(fun == edi);
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- InvokeFunctionCode(edi, no_reg, expected, actual, flag);
-}
-
void MacroAssembler::LoadGlobalProxy(Register dst) {
mov(dst, NativeContextOperand());
mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
@@ -1365,7 +1357,7 @@ void TurboAssembler::Pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
}
}
-void TurboAssembler::Psraw(XMMRegister dst, int8_t shift) {
+void TurboAssembler::Psraw(XMMRegister dst, uint8_t shift) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsraw(dst, dst, shift);
@@ -1374,7 +1366,7 @@ void TurboAssembler::Psraw(XMMRegister dst, int8_t shift) {
}
}
-void TurboAssembler::Psrlw(XMMRegister dst, int8_t shift) {
+void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsrlw(dst, dst, shift);
@@ -1394,7 +1386,7 @@ void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
psignb(dst, src);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE3 support");
}
void TurboAssembler::Psignw(XMMRegister dst, Operand src) {
@@ -1408,7 +1400,7 @@ void TurboAssembler::Psignw(XMMRegister dst, Operand src) {
psignw(dst, src);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE3 support");
}
void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
@@ -1422,7 +1414,7 @@ void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
psignd(dst, src);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE3 support");
}
void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
@@ -1436,7 +1428,7 @@ void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
pshufb(dst, src);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE3 support");
}
void TurboAssembler::Pblendw(XMMRegister dst, Operand src, uint8_t imm8) {
@@ -1450,7 +1442,7 @@ void TurboAssembler::Pblendw(XMMRegister dst, Operand src, uint8_t imm8) {
pblendw(dst, src, imm8);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE4.1 support");
}
void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
@@ -1464,10 +1456,10 @@ void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
palignr(dst, src, imm8);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE3 support");
}
-void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
+void TurboAssembler::Pextrb(Register dst, XMMRegister src, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpextrb(dst, src, imm8);
@@ -1478,10 +1470,10 @@ void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
pextrb(dst, src, imm8);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE4.1 support");
}
-void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
+void TurboAssembler::Pextrw(Register dst, XMMRegister src, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpextrw(dst, src, imm8);
@@ -1492,10 +1484,10 @@ void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
pextrw(dst, src, imm8);
return;
}
- UNREACHABLE();
+ FATAL("no AVX or SSE4.1 support");
}
-void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
@@ -1510,37 +1502,44 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
pextrd(dst, src, imm8);
return;
}
- DCHECK_LT(imm8, 4);
- pshufd(xmm0, src, imm8);
- movd(dst, xmm0);
+ // Without AVX or SSE, we can only have 64-bit values in xmm registers.
+ // We don't have an xmm scratch register, so move the data via the stack. This
+ // path is rarely required, so it's acceptable to be slow.
+ DCHECK_LT(imm8, 2);
+ sub(esp, Immediate(kDoubleSize));
+ movsd(Operand(esp, 0), src);
+ mov(dst, Operand(esp, imm8 * kUInt32Size));
+ add(esp, Immediate(kDoubleSize));
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
- bool is_64_bits) {
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst, dst, src, imm8);
+ return;
+ }
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
}
- if (is_64_bits) {
- movd(xmm0, src);
- if (imm8 == 1) {
- punpckldq(dst, xmm0);
- } else {
- DCHECK_EQ(0, imm8);
- psrlq(dst, 32);
- punpckldq(xmm0, dst);
- movaps(dst, xmm0);
- }
+ // Without AVX or SSE, we can only have 64-bit values in xmm registers.
+ // We don't have an xmm scratch register, so move the data via the stack. This
+ // path is rarely required, so it's acceptable to be slow.
+ DCHECK_LT(imm8, 2);
+ sub(esp, Immediate(kDoubleSize));
+ // Write original content of {dst} to the stack.
+ movsd(Operand(esp, 0), dst);
+ // Overwrite the portion specified in {imm8}.
+ if (src.is_reg_only()) {
+ mov(Operand(esp, imm8 * kUInt32Size), src.reg());
} else {
- DCHECK_LT(imm8, 4);
- push(eax);
- mov(eax, src);
- pinsrw(dst, eax, imm8 * 2);
- shr(eax, 16);
- pinsrw(dst, eax, imm8 * 2 + 1);
- pop(eax);
+ movss(dst, src);
+ movss(Operand(esp, imm8 * kUInt32Size), dst);
}
+ // Load back the full value into {dst}.
+ movsd(dst, Operand(esp, 0));
+ add(esp, Immediate(kDoubleSize));
}
void TurboAssembler::Lzcnt(Register dst, Operand src) {
@@ -1576,7 +1575,7 @@ void TurboAssembler::Popcnt(Register dst, Operand src) {
popcnt(dst, src);
return;
}
- UNREACHABLE();
+ FATAL("no POPCNT support");
}
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
@@ -1719,14 +1718,16 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
- // TODO(jgruber): Figure out which register we can clobber here.
// TODO(jgruber): Pc-relative builtin-to-builtin calls.
- Register scratch = kOffHeapTrampolineRegister;
if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadConstant(scratch, code_object);
- lea(scratch, FieldOperand(scratch, Code::kHeaderSize));
- call(scratch);
- return;
+ // TODO(jgruber): There's no scratch register on ia32. Any call that
+ // requires loading a code object from the builtins constant table must:
+ // 1) spill two scratch registers, 2) load the target into scratch1, 3)
+ // store the target into a virtual register on the isolate using scratch2,
+ // 4) restore both scratch registers, and finally 5) call through the
+ // virtual register. All affected call sites should vanish once all
+ // builtins are embedded on ia32.
+ UNREACHABLE();
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
@@ -1747,14 +1748,16 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
- // TODO(jgruber): Figure out which register we can clobber here.
// TODO(jgruber): Pc-relative builtin-to-builtin calls.
- Register scratch = kOffHeapTrampolineRegister;
if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadConstant(scratch, code_object);
- lea(scratch, FieldOperand(scratch, Code::kHeaderSize));
- jmp(scratch);
- return;
+ // TODO(jgruber): There's no scratch register on ia32. Any call that
+ // requires loading a code object from the builtins constant table must:
+ // 1) spill two scratch registers, 2) load the target into scratch1, 3)
+ // store the target into a virtual register on the isolate using scratch2,
+ // 4) restore both scratch registers, and finally 5) call through the
+ // virtual register. All affected call sites should vanish once all
+ // builtins are embedded on ia32.
+ UNREACHABLE();
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 8ad92a9d1d..bdb04fb222 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -58,6 +58,9 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -116,6 +119,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void VerifyRootRegister() {
if (FLAG_ia32_verify_root_register && FLAG_embedded_builtins) {
+ Assembler::AllowExplicitEbxAccessScope read_only_access(this);
Label root_register_ok;
cmp(kRootRegister, kRootRegisterSentinel);
j(equal, &root_register_ok);
@@ -238,7 +242,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret();
- void LoadRoot(Register destination, Heap::RootListIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) override;
+
+ void MoveForRootRegisterRefactoring(Register dst, Register src) {
+ // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the
+ // kRootRegister ebx, most call sites of this wrapper function can probably
+ // be removed.
+ Move(dst, src);
+ }
// Indirect root-relative loads.
void LoadFromConstantsTable(Register destination,
@@ -248,6 +259,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadAddress(Register destination, ExternalReference source);
+ void PushRootRegister() {
+ // Check that a NoRootArrayScope exists.
+ CHECK(!root_array_available());
+ push(kRootRegister);
+ }
+ void PopRootRegister() {
+ // Check that a NoRootArrayScope exists.
+ CHECK(!root_array_available());
+ pop(kRootRegister);
+ }
+
// Wrapper functions to ensure external reference operands produce
// isolate-independent code if needed.
Operand StaticVariable(const ExternalReference& ext);
@@ -270,8 +292,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Pshufd(dst, Operand(src), shuffle);
}
void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
- void Psraw(XMMRegister dst, int8_t shift);
- void Psrlw(XMMRegister dst, int8_t shift);
+ void Psraw(XMMRegister dst, uint8_t shift);
+ void Psrlw(XMMRegister dst, uint8_t shift);
// SSE/SSE2 instructions with AVX version.
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
@@ -380,15 +402,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void Palignr(XMMRegister dst, Operand src, uint8_t imm8);
- void Pextrb(Register dst, XMMRegister src, int8_t imm8);
- void Pextrw(Register dst, XMMRegister src, int8_t imm8);
- void Pextrd(Register dst, XMMRegister src, int8_t imm8);
- void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
- bool is_64_bits = false) {
- Pinsrd(dst, Operand(src), imm8, is_64_bits);
+ void Pextrb(Register dst, XMMRegister src, uint8_t imm8);
+ void Pextrw(Register dst, XMMRegister src, uint8_t imm8);
+ void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src, uint8_t imm8) {
+ Pinsrd(dst, Operand(src), imm8);
}
- void Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
- bool is_64_bits = false);
+ void Pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
@@ -460,10 +480,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -478,34 +502,32 @@ class MacroAssembler : public TurboAssembler {
void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
// Operations on roots in the root-array.
- void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
+ void CompareRoot(Register with, Register scratch, RootIndex index);
// These methods can only be used with constant roots (i.e. non-writable
// and not in new space).
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(Operand with, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index);
+ void CompareRoot(Register with, RootIndex index);
+ void CompareRoot(Operand with, RootIndex index);
+ void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
- void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
+ void JumpIfRoot(Operand with, RootIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal,
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
- void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
- Label* if_not_equal,
+ void JumpIfNotRoot(Operand with, RootIndex index, Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
@@ -585,9 +607,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Register function, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 0b5e58b92e..f730c50555 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -4,11 +4,13 @@
#include "src/ic/accessor-assembler.h"
+#include "src/ast/ast.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/ic/handler-configuration.h"
#include "src/ic/ic.h"
+#include "src/ic/keyed-store-generic.h"
#include "src/ic/stub-cache.h"
#include "src/objects-inl.h"
#include "src/objects/module.h"
@@ -127,7 +129,7 @@ void AccessorAssembler::HandlePolymorphicCase(
Label next_entry(this);
TNode<MaybeObject> maybe_cached_map =
LoadWeakFixedArrayElement(feedback, map_index);
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_cached_map));
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map));
GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)),
&next_entry);
@@ -151,7 +153,7 @@ void AccessorAssembler::HandlePolymorphicCase(
Label next_entry(this);
TNode<MaybeObject> maybe_cached_map =
LoadWeakFixedArrayElement(feedback, index);
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_cached_map));
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map));
GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)),
&next_entry);
@@ -241,9 +243,9 @@ void AccessorAssembler::HandleLoadAccessor(
[=] { return LoadHandlerDataField(handler, 3); },
[=] { return LoadHandlerDataField(handler, 2); });
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_context));
- CSA_CHECK(this, IsNotClearedWeakHeapObject(maybe_context));
- TNode<Object> context = ToWeakHeapObject(maybe_context);
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ CSA_CHECK(this, IsNotCleared(maybe_context));
+ TNode<Object> context = GetHeapObjectAssumeWeak(maybe_context);
GotoIf(IsRuntimeCallStatsEnabled(), &runtime);
{
@@ -700,8 +702,9 @@ Node* AccessorAssembler::HandleProtoHandler(
BIND(&if_do_access_check);
{
TNode<MaybeObject> data2 = LoadHandlerDataField(handler, 2);
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(data2));
- TNode<Object> expected_native_context = ToWeakHeapObject(data2, miss);
+ CSA_ASSERT(this, IsWeakOrCleared(data2));
+ TNode<Object> expected_native_context =
+ GetHeapObjectAssumeWeak(data2, miss);
EmitAccessCheck(expected_native_context, p->context, p->receiver, &done,
miss);
}
@@ -773,8 +776,8 @@ void AccessorAssembler::HandleLoadICProtoHandler(
// For regular holders, having passed the receiver map check and the
// validity cell check implies that |holder| is alive. However, for global
// object receivers, |maybe_holder| may be cleared.
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_holder));
- Node* holder = ToWeakHeapObject(maybe_holder, miss);
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_holder));
+ Node* holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
var_holder->Bind(holder);
Goto(&done);
@@ -807,9 +810,14 @@ void AccessorAssembler::EmitAccessCheck(Node* expected_native_context,
void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
Label* readonly) {
- // Accessor properties never have the READ_ONLY attribute set.
- GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
- readonly);
+ if (readonly) {
+ // Accessor properties never have the READ_ONLY attribute set.
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+ readonly);
+ } else {
+ CSA_ASSERT(this, IsNotSetWord32(details,
+ PropertyDetails::kAttributesReadOnlyMask));
+ }
Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
GotoIf(Word32Equal(kind, Int32Constant(kData)), writable);
// Fall through if it's an accessor property.
@@ -907,7 +915,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_nonsmi_handler);
{
- GotoIf(IsWeakOrClearedHeapObject(handler), &store_transition_or_global);
+ GotoIf(IsWeakOrCleared(handler), &store_transition_or_global);
TNode<HeapObject> strong_handler = CAST(handler);
TNode<Map> handler_map = LoadMap(strong_handler);
Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
@@ -930,8 +938,9 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&store_transition_or_global);
{
// Load value or miss if the {handler} weak cell is cleared.
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(handler));
- TNode<HeapObject> map_or_property_cell = ToWeakHeapObject(handler, miss);
+ CSA_ASSERT(this, IsWeakOrCleared(handler));
+ TNode<HeapObject> map_or_property_cell =
+ GetHeapObjectAssumeWeak(handler, miss);
Label store_global(this), store_transition(this);
Branch(IsMap(map_or_property_cell), &store_transition, &store_global);
@@ -946,7 +955,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&store_transition);
{
TNode<Map> map = CAST(map_or_property_cell);
- HandleStoreICTransitionMapHandlerCase(p, map, miss, false);
+ HandleStoreICTransitionMapHandlerCase(p, map, miss,
+ kCheckPrototypeValidity);
Return(p->value);
}
}
@@ -954,10 +964,13 @@ void AccessorAssembler::HandleStoreICHandlerCase(
void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
const StoreICParameters* p, TNode<Map> transition_map, Label* miss,
- bool validate_transition_handler) {
- Node* maybe_validity_cell =
- LoadObjectField(transition_map, Map::kPrototypeValidityCellOffset);
- CheckPrototypeValidityCell(maybe_validity_cell, miss);
+ StoreTransitionMapFlags flags) {
+ DCHECK_EQ(0, flags & ~kStoreTransitionMapFlagsMask);
+ if (flags & kCheckPrototypeValidity) {
+ Node* maybe_validity_cell =
+ LoadObjectField(transition_map, Map::kPrototypeValidityCellOffset);
+ CheckPrototypeValidityCell(maybe_validity_cell, miss);
+ }
TNode<Uint32T> bitfield3 = LoadMapBitField3(transition_map);
CSA_ASSERT(this, IsClearWord32<Map::IsDictionaryMapBit>(bitfield3));
@@ -971,7 +984,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
Node* factor = IntPtrConstant(DescriptorArray::kEntrySize);
TNode<IntPtrT> last_key_index = UncheckedCast<IntPtrT>(IntPtrAdd(
IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor)));
- if (validate_transition_handler) {
+ if (flags & kValidateTransitionHandler) {
Node* key = LoadWeakFixedArrayElement(descriptors, last_key_index);
GotoIf(WordNotEqual(key, p->name), miss);
} else {
@@ -981,16 +994,20 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
p->name));
}
Node* details = LoadDetailsByKeyIndex(descriptors, last_key_index);
- if (validate_transition_handler) {
+ if (flags & kValidateTransitionHandler) {
// Follow transitions only in the following cases:
// 1) name is a non-private symbol and attributes equal to NONE,
// 2) name is a private symbol and attributes equal to DONT_ENUM.
Label attributes_ok(this);
- const int kAttributesDontDeleteReadOnlyMask =
+ const int kKindAndAttributesDontDeleteReadOnlyMask =
+ PropertyDetails::KindField::kMask |
PropertyDetails::kAttributesDontDeleteMask |
PropertyDetails::kAttributesReadOnlyMask;
- // Both DontDelete and ReadOnly attributes must not be set.
- GotoIf(IsSetWord32(details, kAttributesDontDeleteReadOnlyMask), miss);
+ STATIC_ASSERT(kData == 0);
+ // Both DontDelete and ReadOnly attributes must not be set and it has to be
+ // a kData property.
+ GotoIf(IsSetWord32(details, kKindAndAttributesDontDeleteReadOnlyMask),
+ miss);
// DontEnum attribute is allowed only for private symbols and vice versa.
Branch(Word32Equal(
@@ -1035,9 +1052,8 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
Node* value_map = LoadMap(value);
// While supporting mutable HeapNumbers would be straightforward, such
// objects should not end up here anyway.
- CSA_ASSERT(this,
- WordNotEqual(value_map,
- LoadRoot(Heap::kMutableHeapNumberMapRootIndex)));
+ CSA_ASSERT(this, WordNotEqual(value_map,
+ LoadRoot(RootIndex::kMutableHeapNumberMap)));
Branch(IsHeapNumberMap(value_map), &all_fine, bailout);
}
@@ -1060,7 +1076,8 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
&all_fine);
// Cleared weak references count as FieldType::None, which can't hold any
// value.
- TNode<Map> field_type_map = CAST(ToWeakHeapObject(field_type, bailout));
+ TNode<Map> field_type_map =
+ CAST(GetHeapObjectAssumeWeak(field_type, bailout));
// FieldType::Class(...) performs a map check.
Branch(WordEqual(LoadMap(value), field_type_map), &all_fine, bailout);
}
@@ -1270,7 +1287,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TNode<MaybeObject> maybe_transition_map =
LoadHandlerDataField(handler, 1);
TNode<Map> transition_map =
- CAST(ToWeakHeapObject(maybe_transition_map, miss));
+ CAST(GetHeapObjectAssumeWeak(maybe_transition_map, miss));
GotoIf(IsDeprecatedMap(transition_map), miss);
@@ -1312,8 +1329,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
&if_add_normal);
TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_holder));
- TNode<Object> holder = ToWeakHeapObject(maybe_holder, miss);
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_holder));
+ TNode<Object> holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)),
&if_store_global_proxy);
@@ -1374,11 +1391,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
[=] { return LoadHandlerDataField(handler, 3); },
[=] { return LoadHandlerDataField(handler, 2); });
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_context));
- TNode<Object> context =
- Select<Object>(IsClearedWeakHeapObject(maybe_context),
- [=] { return SmiConstant(0); },
- [=] { return ToWeakHeapObject(maybe_context); });
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ TNode<Object> context = Select<Object>(
+ IsCleared(maybe_context), [=] { return SmiConstant(0); },
+ [=] { return GetHeapObjectAssumeWeak(maybe_context); });
Node* foreign = LoadObjectField(call_handler_info,
CallHandlerInfo::kJsCallbackOffset);
@@ -1455,7 +1471,7 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context, proxy,
p->name, p->value, p->receiver, language_mode);
} else {
- Node* name = ToName(p->context, p->name);
+ Node* name = CallBuiltin(Builtins::kToName, p->context, p->name);
TailCallBuiltin(Builtins::kProxySetProperty, p->context, proxy, name,
p->value, p->receiver, language_mode);
}
@@ -1575,7 +1591,7 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
GotoIf(TaggedIsSmi(maybe_field_type), &done);
// Check that value type matches the field type.
{
- Node* field_type = ToWeakHeapObject(maybe_field_type, bailout);
+ Node* field_type = GetHeapObjectAssumeWeak(maybe_field_type, bailout);
Branch(WordEqual(LoadMap(value), field_type), &done, bailout);
}
BIND(&done);
@@ -1855,7 +1871,7 @@ void AccessorAssembler::EmitElementLoad(
GotoIf(IsDetachedBuffer(buffer), miss);
// Bounds check.
- Node* length = SmiUntag(LoadTypedArrayLength(CAST(object)));
+ Node* length = SmiUntag(LoadJSTypedArrayLength(CAST(object)));
GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
@@ -2082,7 +2098,7 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
Comment("check if string");
GotoIfNot(IsStringInstanceType(instance_type), slow);
Comment("load string character");
- Node* length = LoadAndUntagObjectField(receiver, String::kLengthOffset);
+ TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
GotoIfNot(UintPtrLessThan(index, length), slow);
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
TailCallBuiltin(Builtins::kStringCharAt, NoContextConstant(), receiver,
@@ -2395,7 +2411,8 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
BIND(&try_polymorphic);
{
- TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ TNode<HeapObject> strong_feedback =
+ GetHeapObjectIfStrong(feedback, &miss);
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &stub_call);
HandlePolymorphicCase(recv_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss, 2);
@@ -2443,7 +2460,7 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit);
BIND(&try_polymorphic);
- TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
{
// Check polymorphic case.
Comment("LoadIC_try_polymorphic");
@@ -2480,7 +2497,7 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
{
// Check megamorphic case.
- GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ GotoIfNot(WordEqual(feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&try_uninitialized);
TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
@@ -2490,9 +2507,8 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
BIND(&try_uninitialized);
{
// Check uninitialized case.
- GotoIfNot(
- WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- miss);
+ GotoIfNot(WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol)),
+ miss);
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
p->context, p->receiver, p->name, p->slot, p->vector);
@@ -2508,7 +2524,7 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
// Optimistically write the state transition to the vector.
StoreFeedbackVectorSlot(p->vector, p->slot,
- LoadRoot(Heap::kpremonomorphic_symbolRootIndex),
+ LoadRoot(RootIndex::kpremonomorphic_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
StoreWeakReferenceInFeedbackVector(p->vector, p->slot, receiver_map,
kPointerSize, SMI_PARAMETERS);
@@ -2534,7 +2550,7 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
{
// Undo the optimistic state transition.
StoreFeedbackVectorSlot(p->vector, p->slot,
- LoadRoot(Heap::kuninitialized_symbolRootIndex),
+ LoadRoot(RootIndex::kuninitialized_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
@@ -2580,9 +2596,9 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
BIND(&if_property_cell);
{
// Load value or try handler case if the weak reference is cleared.
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_weak_ref));
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
- CAST(ToWeakHeapObject(maybe_weak_ref, try_handler));
+ CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, try_handler));
TNode<Object> value =
LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(value, TheHoleConstant()), miss);
@@ -2616,8 +2632,7 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(vector, slot, kPointerSize, slot_mode);
TNode<Object> handler = CAST(feedback_element);
- GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- miss);
+ GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)), miss);
OnNonExistent on_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF
? OnNonExistent::kThrowReferenceError
@@ -2660,7 +2675,7 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
}
BIND(&try_polymorphic);
- TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
{
// Check polymorphic case.
Comment("KeyedLoadIC_try_polymorphic");
@@ -2673,9 +2688,9 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
{
// Check megamorphic case.
Comment("KeyedLoadIC_try_megamorphic");
- GotoIfNot(WordEqual(strong_feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &try_polymorphic_name);
+ GotoIfNot(
+ WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
+ &try_polymorphic_name);
// TODO(jkummerow): Inline this? Or some of it?
TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, p->context, p->receiver,
p->name, p->slot, p->vector);
@@ -2738,29 +2753,40 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged);
- var_unique.Bind(p->name); // Dummy initialization.
- Label if_index(this), if_unique_name(this), if_notunique(this), slow(this);
+ VARIABLE(var_unique, MachineRepresentation::kTagged, p->name);
+ Label if_index(this), if_unique_name(this), if_notunique(this),
+ if_other(this, Label::kDeferred), if_runtime(this, Label::kDeferred);
Node* receiver = p->receiver;
- GotoIf(TaggedIsSmi(receiver), &slow);
- Node* receiver_map = LoadMap(receiver);
- Node* instance_type = LoadMapInstanceType(receiver_map);
+ GotoIf(TaggedIsSmi(receiver), &if_runtime);
+
+ TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ &if_other, &if_notunique);
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
- &if_notunique);
+ BIND(&if_other);
+ {
+ Node* name = CallBuiltin(Builtins::kToName, p->context, p->name);
+ var_unique.Bind(name);
+ TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique,
+ &if_runtime, &if_notunique);
+ }
BIND(&if_index);
{
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
GenericElementLoad(receiver, receiver_map, instance_type, var_index.value(),
- &slow);
+ &if_runtime);
}
BIND(&if_unique_name);
{
LoadICParameters pp = *p;
pp.name = var_unique.value();
- GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, &slow);
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
+ &if_runtime);
}
BIND(&if_notunique);
@@ -2769,10 +2795,11 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
// Ideally we could return undefined directly here if the name is not
// found in the string table, i.e. it was never internalized, but that
// invariant doesn't hold with named property interceptors (at this
- // point), so we take the {slow} path instead.
+ // point), so we take the {if_runtime} path instead.
Label if_in_string_table(this);
- TryInternalizeString(p->name, &if_index, &var_index, &if_in_string_table,
- &var_unique, &slow, &slow);
+ TryInternalizeString(var_unique.value(), &if_index, &var_index,
+ &if_in_string_table, &var_unique, &if_runtime,
+ &if_runtime);
BIND(&if_in_string_table);
{
@@ -2783,21 +2810,23 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
// cache. We may want to re-evaluate that in the future.
LoadICParameters pp = *p;
pp.name = var_unique.value();
- GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, &slow,
- kDontUseStubCache);
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
+ &if_runtime, kDontUseStubCache);
}
} else {
- Goto(&slow);
+ Goto(&if_runtime);
}
}
- BIND(&slow);
+ BIND(&if_runtime);
{
Comment("KeyedLoadGeneric_slow");
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
// TODO(jkummerow): Should we use the GetProperty TF stub instead?
- TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
- p->name);
+ TailCallRuntime(Runtime::kGetProperty, p->context, p->receiver,
+ var_unique.value());
}
}
@@ -2868,7 +2897,7 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
}
BIND(&try_polymorphic);
- TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
{
// Check polymorphic case.
Comment("StoreIC_try_polymorphic");
@@ -2880,9 +2909,9 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&try_megamorphic);
{
// Check megamorphic case.
- GotoIfNot(WordEqual(strong_feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &try_uninitialized);
+ GotoIfNot(
+ WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
+ &try_uninitialized);
TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
&if_handler, &var_handler, &miss);
@@ -2890,9 +2919,9 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&try_uninitialized);
{
// Check uninitialized case.
- GotoIfNot(WordEqual(strong_feedback,
- LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- &miss);
+ GotoIfNot(
+ WordEqual(strong_feedback, LoadRoot(RootIndex::kuninitialized_symbol)),
+ &miss);
TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context, p->receiver,
p->name, p->value, p->slot, p->vector);
}
@@ -2912,9 +2941,9 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
BIND(&if_property_cell);
{
Label try_handler(this), miss(this, Label::kDeferred);
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_weak_ref));
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
- CAST(ToWeakHeapObject(maybe_weak_ref, &try_handler));
+ CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, &try_handler));
ExitPoint direct_exit(this);
StoreGlobalIC_PropertyCellCase(property_cell, pp->value, &direct_exit,
@@ -2926,7 +2955,7 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
TNode<MaybeObject> handler = LoadFeedbackVectorSlot(
pp->vector, pp->slot, kPointerSize, SMI_PARAMETERS);
- GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)),
&miss);
StoreICParameters p = *pp;
@@ -3049,7 +3078,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
}
BIND(&try_polymorphic);
- TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
{
// CheckPolymorphic case.
Comment("KeyedStoreIC_try_polymorphic");
@@ -3063,9 +3092,9 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
{
// Check megamorphic case.
Comment("KeyedStoreIC_try_megamorphic");
- GotoIfNot(WordEqual(strong_feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &try_polymorphic_name);
+ GotoIfNot(
+ WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
+ &try_polymorphic_name);
TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context,
p->receiver, p->name, p->value, p->slot, p->vector);
}
@@ -3123,7 +3152,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<MaybeObject> maybe_transition_map =
LoadHandlerDataField(CAST(handler), 1);
TNode<Map> transition_map =
- CAST(ToWeakHeapObject(maybe_transition_map, &miss));
+ CAST(GetHeapObjectAssumeWeak(maybe_transition_map, &miss));
GotoIf(IsDeprecatedMap(transition_map), &miss);
Node* code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
CSA_ASSERT(this, IsCode(code));
@@ -3133,7 +3162,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
}
BIND(&try_polymorphic);
- TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
{
Comment("StoreInArrayLiteralIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
@@ -3145,15 +3174,14 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
BIND(&try_megamorphic);
{
Comment("StoreInArrayLiteralIC_try_megamorphic");
- CSA_ASSERT(
- this,
- Word32Or(WordEqual(strong_feedback,
- LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- WordEqual(strong_feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex))));
- GotoIfNot(WordEqual(strong_feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &miss);
+ CSA_ASSERT(this,
+ Word32Or(WordEqual(strong_feedback,
+ LoadRoot(RootIndex::kuninitialized_symbol)),
+ WordEqual(strong_feedback,
+ LoadRoot(RootIndex::kmegamorphic_symbol))));
+ GotoIfNot(
+ WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
+ &miss);
TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context,
p->value, p->receiver, p->name);
}
@@ -3183,6 +3211,31 @@ void AccessorAssembler::GenerateLoadIC() {
LoadIC(&p);
}
+void AccessorAssembler::GenerateLoadIC_Megamorphic() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ ExitPoint direct_exit(this);
+ TVARIABLE(MaybeObject, var_handler);
+ Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
+
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, name, &if_handler,
+ &var_handler, &miss);
+
+ BIND(&if_handler);
+ LoadICParameters p(context, receiver, name, slot, vector);
+ HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
+
+ BIND(&miss);
+ direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
+ slot, vector);
+}
+
void AccessorAssembler::GenerateLoadIC_Noninlined() {
typedef LoadWithVectorDescriptor Descriptor;
@@ -3238,6 +3291,19 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
}
+void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
+ typedef LoadDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadFeedbackVectorForStub();
+
+ TailCallBuiltin(Builtins::kLoadIC_Megamorphic, context, receiver, name, slot,
+ vector);
+}
+
void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
typedef LoadGlobalWithVectorDescriptor Descriptor;
@@ -3280,6 +3346,19 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
KeyedLoadIC(&p);
}
+void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadICGeneric(&p);
+}
+
void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
typedef LoadDescriptor Descriptor;
@@ -3293,17 +3372,17 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
vector);
}
-void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
- typedef LoadWithVectorDescriptor Descriptor;
+void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
+ typedef LoadDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadFeedbackVectorForStub();
- LoadICParameters p(context, receiver, name, slot, vector);
- KeyedLoadICGeneric(&p);
+ TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, context, receiver, name,
+ slot, vector);
}
void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
@@ -3414,6 +3493,76 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
StoreInArrayLiteralIC(&p);
}
+void AccessorAssembler::GenerateCloneObjectIC_Slow() {
+ typedef CloneObjectWithVectorDescriptor Descriptor;
+ TNode<HeapObject> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ // The Slow case uses the same call interface as CloneObjectIC, so that it
+ // can be tail called from it. However, the feedback slot and vector are not
+ // used.
+
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSFunction> object_fn =
+ CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
+ TNode<Map> initial_map = CAST(
+ LoadObjectField(object_fn, JSFunction::kPrototypeOrInitialMapOffset));
+ CSA_ASSERT(this, IsMap(initial_map));
+
+ TNode<JSObject> result = CAST(AllocateJSObjectFromMap(initial_map));
+
+ {
+ Label did_set_proto_if_needed(this);
+ TNode<BoolT> is_null_proto = SmiNotEqual(
+ SmiAnd(flags, SmiConstant(ObjectLiteral::kHasNullPrototype)),
+ SmiConstant(Smi::kZero));
+ GotoIfNot(is_null_proto, &did_set_proto_if_needed);
+
+ CallRuntime(Runtime::kInternalSetPrototype, context, result,
+ NullConstant());
+
+ Goto(&did_set_proto_if_needed);
+ BIND(&did_set_proto_if_needed);
+ }
+
+ ReturnIf(IsNullOrUndefined(source), result);
+
+ CSA_ASSERT(this, IsJSReceiver(source));
+
+ Label call_runtime(this, Label::kDeferred);
+ Label done(this);
+
+ TNode<Map> map = LoadMap(source);
+ TNode<Int32T> type = LoadMapInstanceType(map);
+ {
+ Label cont(this);
+ GotoIf(IsJSObjectInstanceType(type), &cont);
+ GotoIfNot(IsStringInstanceType(type), &done);
+ Branch(SmiEqual(LoadStringLengthAsSmi(CAST(source)), SmiConstant(0)), &done,
+ &call_runtime);
+ BIND(&cont);
+ }
+
+ GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(source))), &call_runtime);
+
+ ForEachEnumerableOwnProperty(
+ context, map, CAST(source),
+ [=](TNode<Name> key, TNode<Object> value) {
+ KeyedStoreGenericGenerator::SetPropertyInLiteral(state(), context,
+ result, key, value);
+ },
+ &call_runtime);
+ Goto(&done);
+
+ BIND(&call_runtime);
+ CallRuntime(Runtime::kCopyDataProperties, context, result, source);
+
+ Goto(&done);
+ BIND(&done);
+ Return(result);
+}
+
void AccessorAssembler::GenerateCloneObjectIC() {
typedef CloneObjectWithVectorDescriptor Descriptor;
Node* source = Parameter(Descriptor::kSource);
@@ -3502,7 +3651,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
}
BIND(&try_polymorphic);
- TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
{
Comment("CloneObjectIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
@@ -3513,16 +3662,16 @@ void AccessorAssembler::GenerateCloneObjectIC() {
BIND(&try_megamorphic);
{
Comment("CloneObjectIC_try_megamorphic");
- CSA_ASSERT(
- this,
- Word32Or(WordEqual(strong_feedback,
- LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- WordEqual(strong_feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex))));
- GotoIfNot(WordEqual(strong_feedback,
- LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &miss);
- TailCallRuntime(Runtime::kCloneObjectIC_Slow, context, source, flags);
+ CSA_ASSERT(this,
+ Word32Or(WordEqual(strong_feedback,
+ LoadRoot(RootIndex::kuninitialized_symbol)),
+ WordEqual(strong_feedback,
+ LoadRoot(RootIndex::kmegamorphic_symbol))));
+ GotoIfNot(
+ WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
+ &miss);
+ TailCallBuiltin(Builtins::kCloneObjectIC_Slow, context, source, flags, slot,
+ vector);
}
BIND(&miss);
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 0de48e021a..3d92ab26c3 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -28,18 +28,22 @@ class AccessorAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
void GenerateLoadIC();
+ void GenerateLoadIC_Megamorphic();
void GenerateLoadIC_Noninlined();
void GenerateLoadIC_Uninitialized();
void GenerateLoadICTrampoline();
+ void GenerateLoadICTrampoline_Megamorphic();
void GenerateKeyedLoadIC();
- void GenerateKeyedLoadICTrampoline();
void GenerateKeyedLoadIC_Megamorphic();
void GenerateKeyedLoadIC_PolymorphicName();
+ void GenerateKeyedLoadICTrampoline();
+ void GenerateKeyedLoadICTrampoline_Megamorphic();
void GenerateStoreIC();
void GenerateStoreICTrampoline();
void GenerateStoreGlobalIC();
void GenerateStoreGlobalICTrampoline();
void GenerateCloneObjectIC();
+ void GenerateCloneObjectIC_Slow();
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
@@ -106,10 +110,16 @@ class AccessorAssembler : public CodeStubAssembler {
void HandleStoreICHandlerCase(
const StoreICParameters* p, TNode<MaybeObject> handler, Label* miss,
ICMode ic_mode, ElementSupport support_elements = kOnlyProperties);
+ enum StoreTransitionMapFlags {
+ kCheckPrototypeValidity = 1 << 0,
+ kValidateTransitionHandler = 1 << 1,
+ kStoreTransitionMapFlagsMask =
+ kCheckPrototypeValidity | kValidateTransitionHandler,
+ };
void HandleStoreICTransitionMapHandlerCase(const StoreICParameters* p,
TNode<Map> transition_map,
Label* miss,
- bool validate_transition_handler);
+ StoreTransitionMapFlags flags);
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index 9016e9ba18..ebe64437c6 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -162,9 +162,8 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
&call_with_any_feedback);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kString));
- Callable callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
- var_result.Bind(CallStub(callable, context, lhs, rhs));
+ var_result.Bind(
+ CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs));
Goto(&end);
}
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index d87ec4fdb1..e3115bdbcd 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
+class CallOptimization {
public:
CallOptimization(Isolate* isolate, Handle<Object> function);
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 86ea4f9d3b..73ab0de645 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -140,7 +140,7 @@ Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
// static
Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
- MaybeObjectHandle holder,
+ const MaybeObjectHandle& holder,
Handle<Smi> smi_handler) {
Handle<JSReceiver> end; // null handle, means full prototype chain lookup.
MaybeObjectHandle data1 = holder;
@@ -168,7 +168,7 @@ Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
KeyedAccessLoadMode LoadHandler::GetKeyedAccessLoadMode(MaybeObject* handler) {
DisallowHeapAllocation no_gc;
if (handler->IsSmi()) {
- int const raw_handler = Smi::cast(handler->ToSmi())->value();
+ int const raw_handler = handler->cast<Smi>()->value();
Kind const kind = KindBits::decode(raw_handler);
if ((kind == kElement || kind == kIndexedString) &&
AllowOutOfBoundsBits::decode(raw_handler)) {
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 305577a2df..72ab68140e 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -150,7 +150,7 @@ class LoadHandler final : public DataHandler {
// needed (e.g., for "nonexistent"), null_value() may be passed in.
static Handle<Object> LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
- MaybeObjectHandle holder,
+ const MaybeObjectHandle& holder,
Handle<Smi> smi_handler);
// Creates a data handler that represents a prototype chain check followed
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 640bf7250c..101703dc28 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -50,11 +50,10 @@ void IC::update_receiver_map(Handle<Object> receiver) {
bool IC::IsHandler(MaybeObject* object) {
HeapObject* heap_object;
return (object->IsSmi() && (object != nullptr)) ||
- (object->ToWeakHeapObject(&heap_object) &&
+ (object->GetHeapObjectIfWeak(&heap_object) &&
(heap_object->IsMap() || heap_object->IsPropertyCell())) ||
- (object->ToStrongHeapObject(&heap_object) &&
- (heap_object->IsDataHandler() ||
- heap_object->IsCode()));
+ (object->GetHeapObjectIfStrong(&heap_object) &&
+ (heap_object->IsDataHandler() || heap_object->IsCode()));
}
bool IC::AddressIsDeoptimizedCode() const {
diff --git a/deps/v8/src/ic/ic-stats.cc b/deps/v8/src/ic/ic-stats.cc
index c305209d48..0c33863d3d 100644
--- a/deps/v8/src/ic/ic-stats.cc
+++ b/deps/v8/src/ic/ic-stats.cc
@@ -95,7 +95,7 @@ ICInfo::ICInfo()
is_constructor(false),
is_optimized(false),
map(nullptr),
- is_dictionary_map(0),
+ is_dictionary_map(false),
number_of_own_descriptors(0) {}
void ICInfo::Reset() {
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 9237441ac9..3ca62d0bb4 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -1260,9 +1260,8 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
return result;
}
-
bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
// Disable ICs for non-JSObjects for now.
Handle<Object> object = it->GetReceiver();
if (object->IsJSProxy()) return true;
@@ -1319,7 +1318,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
if (it->ExtendingNonExtensible(receiver)) return false;
it->PrepareTransitionToDataProperty(receiver, value, NONE,
- store_mode);
+ store_origin);
return it->IsCacheableTransition();
}
}
@@ -1328,7 +1327,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
receiver = it->GetStoreTarget<JSObject>();
if (it->ExtendingNonExtensible(receiver)) return false;
- it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
+ it->PrepareTransitionToDataProperty(receiver, value, NONE, store_origin);
return it->IsCacheableTransition();
}
@@ -1381,7 +1380,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
if (MigrateDeprecated(object)) {
@@ -1424,15 +1423,15 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
use_ic = false;
}
}
- if (use_ic) UpdateCaches(&it, value, store_mode);
+ if (use_ic) UpdateCaches(&it, value, store_origin);
MAYBE_RETURN_NULL(
- Object::SetProperty(&it, value, language_mode(), store_mode));
+ Object::SetProperty(&it, value, language_mode(), store_origin));
return value;
}
void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
if (state() == UNINITIALIZED && !IsStoreGlobalIC()) {
// This is the first time we execute this inline cache. Transition
// to premonomorphic state to delay setting the monomorphic state.
@@ -1443,7 +1442,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
}
MaybeObjectHandle handler;
- if (LookupForWrite(lookup, value, store_mode)) {
+ if (LookupForWrite(lookup, value, store_origin)) {
if (IsStoreGlobalIC()) {
if (lookup->state() == LookupIterator::DATA &&
lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
@@ -1988,8 +1987,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (MigrateDeprecated(object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Runtime::SetObjectProperty(isolate(), object, key,
- value, language_mode()),
+ isolate(), result,
+ Runtime::SetObjectProperty(isolate(), object, key, value,
+ language_mode(), StoreOrigin::kMaybeKeyed),
Object);
return result;
}
@@ -2004,11 +2004,10 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if ((key->IsInternalizedString() &&
!String::cast(*key)->AsArrayIndex(&index)) ||
key->IsSymbol()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), store_handle,
- StoreIC::Store(object, Handle<Name>::cast(key), value,
- JSReceiver::MAY_BE_STORE_FROM_KEYED),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), store_handle,
+ StoreIC::Store(object, Handle<Name>::cast(key),
+ value, StoreOrigin::kMaybeKeyed),
+ Object);
if (vector_needs_update()) {
if (ConfigureVectorState(MEGAMORPHIC, key)) {
set_slow_stub_reason("unhandled internalized string key");
@@ -2062,10 +2061,11 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
bool receiver_was_cow =
object->IsJSArray() &&
Handle<JSArray>::cast(object)->elements()->IsCowArray();
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), store_handle,
- Runtime::SetObjectProperty(isolate(), object, key,
- value, language_mode()),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), store_handle,
+ Runtime::SetObjectProperty(isolate(), object, key, value, language_mode(),
+ StoreOrigin::kMaybeKeyed),
+ Object);
if (use_ic) {
if (!old_receiver_map.is_null()) {
@@ -2359,7 +2359,8 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
- Runtime::SetObjectProperty(isolate, global, name, value, language_mode));
+ Runtime::SetObjectProperty(isolate, global, name, value, language_mode,
+ StoreOrigin::kMaybeKeyed));
}
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
@@ -2406,7 +2407,8 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
LanguageMode language_mode = GetLanguageModeFromSlotKind(kind);
RETURN_RESULT_OR_FAILURE(
isolate,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
+ StoreOrigin::kMaybeKeyed));
}
RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Slow) {
@@ -2446,7 +2448,8 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
LanguageMode language_mode = GetLanguageModeFromSlotKind(kind);
RETURN_RESULT_OR_FAILURE(
isolate,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
+ StoreOrigin::kMaybeKeyed));
}
}
@@ -2595,8 +2598,9 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
if (V8_UNLIKELY(FLAG_runtime_stats)) {
RETURN_RESULT_OR_FAILURE(
- isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
- language_mode));
+ isolate,
+ Runtime::SetObjectProperty(isolate, receiver, name, value,
+ language_mode, StoreOrigin::kMaybeKeyed));
}
DCHECK(info->IsCompatibleReceiver(*receiver));
@@ -2747,9 +2751,9 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
it.Next();
- MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED),
- ReadOnlyRoots(isolate).exception());
+ MAYBE_RETURN(
+ Object::SetProperty(&it, value, language_mode, StoreOrigin::kNamed),
+ ReadOnlyRoots(isolate).exception());
return *value;
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 0a831b757f..05bde1ff61 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -36,7 +36,7 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot);
- virtual ~IC() {}
+ virtual ~IC() = default;
State state() const { return state_; }
inline Address address() const;
@@ -88,7 +88,7 @@ class IC {
bool vector_needs_update() {
return (!vector_set_ &&
(state() != MEGAMORPHIC ||
- Smi::ToInt(nexus()->GetFeedbackExtra()->ToSmi()) != ELEMENT));
+ Smi::ToInt(nexus()->GetFeedbackExtra()->cast<Smi>()) != ELEMENT));
}
// Configure for most states.
@@ -296,11 +296,10 @@ class StoreIC : public IC {
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode =
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+ StoreOrigin store_origin = StoreOrigin::kNamed);
bool LookupForWrite(LookupIterator* it, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode);
+ StoreOrigin store_origin);
protected:
// Stub accessors.
@@ -312,7 +311,7 @@ class StoreIC : public IC {
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode);
+ StoreOrigin store_origin);
private:
MaybeObjectHandle ComputeHandler(LookupIterator* lookup);
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 23c49c8d73..2b2f15bb82 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -43,6 +43,18 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
TNode<Object> key, TNode<Object> value,
LanguageMode language_mode);
+ // Set an own property
+ void SetPropertyInLiteral(TNode<Context> context, TNode<JSObject> receiver,
+ TNode<Map> map, TNode<Name> key,
+ TNode<Object> value) {
+ Label done(this);
+ ExitPoint exit_point(this,
+ [this, &done](Node* result) { this->Goto(&done); });
+ EmitGenericPropertyStoreInLiteral(context, receiver, map, key, value,
+ &exit_point);
+ BIND(&done);
+ }
+
private:
enum UpdateLength {
kDontChangeLength,
@@ -78,6 +90,12 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Nothing<LanguageMode>());
}
+ void EmitGenericPropertyStoreInLiteral(TNode<Context> context,
+ TNode<JSObject> receiver,
+ TNode<Map> map, TNode<Name> key,
+ TNode<Object> value,
+ ExitPoint* exit_point);
+
void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
Label* non_fast_elements,
Label* only_fast_elements);
@@ -111,6 +129,10 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Variable* var_accessor_pair,
Variable* var_accessor_holder,
Label* readonly, Label* bailout);
+
+ TNode<Map> FindCandidateStoreICTransitionMapHandler(TNode<Map> map,
+ TNode<Name> name,
+ Label* slow);
};
void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state) {
@@ -141,6 +163,14 @@ void KeyedStoreGenericGenerator::SetProperty(
assembler.SetProperty(context, receiver, key, value, language_mode);
}
+void KeyedStoreGenericGenerator::SetPropertyInLiteral(
+ compiler::CodeAssemblerState* state, TNode<Context> context,
+ TNode<JSObject> receiver, TNode<Name> key, TNode<Object> value) {
+ KeyedStoreGenericAssembler assembler(state);
+ TNode<Map> map = assembler.LoadMap(receiver);
+ assembler.SetPropertyInLiteral(context, receiver, map, key, value);
+}
+
void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
Node* receiver_map, Label* non_fast_elements, Label* only_fast_elements) {
VARIABLE(var_map, MachineRepresentation::kTagged);
@@ -294,7 +324,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
Label check_double_elements(this), check_cow_elements(this);
Node* elements_map = LoadMap(elements);
- GotoIf(WordNotEqual(elements_map, LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ GotoIf(WordNotEqual(elements_map, LoadRoot(RootIndex::kFixedArrayMap)),
&check_double_elements);
// FixedArray backing store -> Smi or object elements.
@@ -355,7 +385,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
{
Label transition_to_double(this), transition_to_object(this);
Node* native_context = LoadNativeContext(context);
- Branch(WordEqual(LoadMap(value), LoadRoot(Heap::kHeapNumberMapRootIndex)),
+ Branch(WordEqual(LoadMap(value), LoadRoot(RootIndex::kHeapNumberMap)),
&transition_to_double, &transition_to_object);
BIND(&transition_to_double);
{
@@ -398,7 +428,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
}
BIND(&check_double_elements);
- Node* fixed_double_array_map = LoadRoot(Heap::kFixedDoubleArrayMapRootIndex);
+ Node* fixed_double_array_map = LoadRoot(RootIndex::kFixedDoubleArrayMap);
GotoIf(WordNotEqual(elements_map, fixed_double_array_map),
&check_cow_elements);
// FixedDoubleArray backing store -> double elements.
@@ -656,6 +686,71 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
BIND(&ok_to_write);
}
+TNode<Map> KeyedStoreGenericAssembler::FindCandidateStoreICTransitionMapHandler(
+ TNode<Map> map, TNode<Name> name, Label* slow) {
+ TVARIABLE(Map, var_transition_map);
+ Label simple_transition(this), transition_array(this),
+ found_handler_candidate(this);
+
+ TNode<MaybeObject> maybe_handler =
+ LoadMaybeWeakObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
+
+ // Smi -> slow,
+ // Cleared weak reference -> slow
+ // weak reference -> simple_transition
+ // strong reference -> transition_array
+ TVARIABLE(Object, var_transition_map_or_array);
+ DispatchMaybeObject(maybe_handler, slow, slow, &simple_transition,
+ &transition_array, &var_transition_map_or_array);
+
+ BIND(&simple_transition);
+ {
+ var_transition_map = CAST(var_transition_map_or_array.value());
+ Goto(&found_handler_candidate);
+ }
+
+ BIND(&transition_array);
+ {
+ TNode<Map> maybe_handler_map =
+ LoadMap(CAST(var_transition_map_or_array.value()));
+ GotoIfNot(IsTransitionArrayMap(maybe_handler_map), slow);
+
+ TVARIABLE(IntPtrT, var_name_index);
+ Label if_found_candidate(this);
+ TNode<TransitionArray> transitions =
+ CAST(var_transition_map_or_array.value());
+ TransitionLookup(name, transitions, &if_found_candidate, &var_name_index,
+ slow);
+
+ BIND(&if_found_candidate);
+ {
+ // Given that
+ // 1) transitions with the same name are ordered in the transition
+ // array by PropertyKind and then by PropertyAttributes values,
+ // 2) kData < kAccessor,
+ // 3) NONE == 0,
+ // 4) properties with private symbol names are guaranteed to be
+ // non-enumerable (so DONT_ENUM bit in attributes is always set),
+ // the resulting map of transitioning store if it exists in the
+ // transition array is expected to be the first among the transitions
+ // with the same name.
+ // See TransitionArray::CompareDetails() for details.
+ STATIC_ASSERT(kData == 0);
+ STATIC_ASSERT(NONE == 0);
+ const int kKeyToTargetOffset = (TransitionArray::kEntryTargetIndex -
+ TransitionArray::kEntryKeyIndex) *
+ kPointerSize;
+ var_transition_map = CAST(GetHeapObjectAssumeWeak(
+ LoadArrayElement(transitions, WeakFixedArray::kHeaderSize,
+ var_name_index.value(), kKeyToTargetOffset)));
+ Goto(&found_handler_candidate);
+ }
+ }
+
+ BIND(&found_handler_candidate);
+ return var_transition_map.value();
+}
+
void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<JSReceiver> receiver, TNode<Map> receiver_map,
const StoreICParameters* p, ExitPoint* exit_point, Label* slow,
@@ -705,71 +800,15 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&lookup_transition);
{
Comment("lookup transition");
- TVARIABLE(Map, var_transition_map);
- Label simple_transition(this), transition_array(this),
- found_handler_candidate(this);
- TNode<MaybeObject> maybe_handler = LoadMaybeWeakObjectField(
- receiver_map, Map::kTransitionsOrPrototypeInfoOffset);
-
- // SMI -> slow
- // cleared weak reference -> slow
- // weak reference -> simple_transition
- // strong reference -> transition_array
- TVARIABLE(Object, var_transition_map_or_array);
- DispatchMaybeObject(maybe_handler, slow, slow, &simple_transition,
- &transition_array, &var_transition_map_or_array);
-
- BIND(&simple_transition);
- {
- var_transition_map = CAST(var_transition_map_or_array.value());
- Goto(&found_handler_candidate);
- }
-
- BIND(&transition_array);
- {
- TNode<Map> maybe_handler_map =
- LoadMap(CAST(var_transition_map_or_array.value()));
- GotoIfNot(IsTransitionArrayMap(maybe_handler_map), slow);
-
- TVARIABLE(IntPtrT, var_name_index);
- Label if_found_candidate(this);
- TNode<TransitionArray> transitions =
- CAST(var_transition_map_or_array.value());
- TransitionLookup(p->name, transitions, &if_found_candidate,
- &var_name_index, slow);
-
- BIND(&if_found_candidate);
- {
- // Given that
- // 1) transitions with the same name are ordered in the transition
- // array by PropertyKind and then by PropertyAttributes values,
- // 2) kData < kAccessor,
- // 3) NONE == 0,
- // 4) properties with private symbol names are guaranteed to be
- // non-enumerable (so DONT_ENUM bit in attributes is always set),
- // the resulting map of transitioning store if it exists in the
- // transition array is expected to be the first among the transitions
- // with the same name.
- // See TransitionArray::CompareDetails() for details.
- STATIC_ASSERT(kData == 0);
- STATIC_ASSERT(NONE == 0);
- const int kKeyToTargetOffset = (TransitionArray::kEntryTargetIndex -
- TransitionArray::kEntryKeyIndex) *
- kPointerSize;
- var_transition_map = CAST(ToWeakHeapObject(
- LoadArrayElement(transitions, WeakFixedArray::kHeaderSize,
- var_name_index.value(), kKeyToTargetOffset)));
- Goto(&found_handler_candidate);
- }
- }
-
- BIND(&found_handler_candidate);
- {
- // Validate the transition handler candidate and apply the transition.
- HandleStoreICTransitionMapHandlerCase(p, var_transition_map.value(),
- slow, true);
- exit_point->Return(p->value);
- }
+ TNode<Map> transition_map = FindCandidateStoreICTransitionMapHandler(
+ receiver_map, CAST(p->name), slow);
+
+ // Validate the transition handler candidate and apply the transition.
+ HandleStoreICTransitionMapHandlerCase(
+ p, transition_map, slow,
+ StoreTransitionMapFlags(kCheckPrototypeValidity |
+ kValidateTransitionHandler));
+ exit_point->Return(p->value);
}
}
@@ -952,7 +991,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
{
Comment("KeyedStoreGeneric_slow");
if (language_mode.IsJust()) {
- TailCallRuntime(Runtime::kSetProperty, context, receiver, key, value,
+ TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key, value,
SmiConstant(language_mode.FromJust()));
} else {
TVARIABLE(Smi, var_language_mode, SmiConstant(LanguageMode::kStrict));
@@ -961,7 +1000,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
var_language_mode = SmiConstant(LanguageMode::kSloppy);
Goto(&call_runtime);
BIND(&call_runtime);
- TailCallRuntime(Runtime::kSetProperty, context, receiver, key, value,
+ TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key, value,
var_language_mode.value());
}
}
@@ -1011,7 +1050,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
// Optimistically write the state transition to the vector.
StoreFeedbackVectorSlot(vector, slot,
- LoadRoot(Heap::kpremonomorphic_symbolRootIndex),
+ LoadRoot(RootIndex::kpremonomorphic_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
StoreICParameters p(context, receiver, name, value, slot, vector);
@@ -1021,7 +1060,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
{
// Undo the optimistic state transition.
StoreFeedbackVectorSlot(vector, slot,
- LoadRoot(Heap::kuninitialized_symbolRootIndex),
+ LoadRoot(RootIndex::kuninitialized_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
receiver, name);
@@ -1048,13 +1087,132 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
BIND(&slow);
{
- CallRuntime(Runtime::kSetProperty, context, receiver, unique_name, value,
- SmiConstant(language_mode));
+ CallRuntime(Runtime::kSetKeyedProperty, context, receiver, unique_name,
+ value, SmiConstant(language_mode));
Goto(&done);
}
BIND(&done);
}
+// Sets data properties as in PropertyDefinitionEvaluation --- Does not invoke
+// own setters or traverse the prototype chain.
+void KeyedStoreGenericAssembler::EmitGenericPropertyStoreInLiteral(
+ TNode<Context> context, TNode<JSObject> receiver, TNode<Map> map,
+ TNode<Name> key, TNode<Object> value, ExitPoint* exit_point) {
+ CSA_ASSERT(this, IsSimpleObjectMap(map));
+
+ // This should only be used for storing data properties in object literals.
+ CSA_ASSERT(this, HasInstanceType(receiver, JS_OBJECT_TYPE));
+
+ Label stub_cache(this), fast_properties(this), dictionary_properties(this),
+ accessor(this), call_runtime(this, Label::kDeferred), done(this);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3),
+ &dictionary_properties, &fast_properties);
+
+ BIND(&fast_properties);
+ {
+ Comment("fast property store");
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ Label descriptor_found(this), lookup_transition(this);
+
+ TVARIABLE(IntPtrT, var_name_index);
+ DescriptorLookup(key, descriptors, bit_field3, &descriptor_found,
+ &var_name_index, &lookup_transition);
+
+ BIND(&descriptor_found);
+ {
+ TNode<IntPtrT> name_index = var_name_index.value();
+ TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, name_index);
+ Label data_property(this);
+ JumpIfDataProperty(details, &data_property, nullptr);
+
+ // Reconfigure the accessor to a data property via runtime call.
+ // TODO(caitp): reconfigure the property details inlinr here.
+ Goto(&call_runtime);
+
+ BIND(&data_property);
+ {
+ // TODO(caitp): consider only checking for names associated with
+ // protectors that can apply to non-prototype JSObjects (currently, only
+ // [Symbol.isConcatSpreadable]), and request this behaviour with an
+ // enum parameter.
+ CheckForAssociatedProtector(key, &call_runtime);
+ OverwriteExistingFastDataProperty(receiver, map, descriptors,
+ name_index, details, value,
+ &call_runtime, false);
+ exit_point->Return(value);
+ }
+ }
+
+ BIND(&lookup_transition);
+ {
+ Comment("lookup transition");
+ TNode<Map> transition_map =
+ FindCandidateStoreICTransitionMapHandler(map, key, &call_runtime);
+
+ // Validate the transition handler candidate and apply the transition.
+ StoreICParameters p(context, receiver, key, value, nullptr, nullptr);
+ HandleStoreICTransitionMapHandlerCase(&p, transition_map, &call_runtime,
+ kValidateTransitionHandler);
+ exit_point->Return(value);
+ }
+ }
+
+ BIND(&dictionary_properties);
+ {
+ Comment("dictionary property store");
+ TVARIABLE(IntPtrT, var_name_index);
+ Label dictionary_found(this, &var_name_index), not_found(this);
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
+ NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+ &var_name_index, &not_found);
+ BIND(&dictionary_found);
+ {
+ Label overwrite(this);
+ TNode<Uint32T> details = LoadDetailsByKeyIndex<NameDictionary>(
+ properties, var_name_index.value());
+ JumpIfDataProperty(details, &overwrite, nullptr);
+
+ // Reconfigure the accessor to a data property via runtime call.
+ Goto(&call_runtime);
+
+ BIND(&overwrite);
+ {
+ // See above TODO regarding non-pertinent checks
+ CheckForAssociatedProtector(key, &call_runtime);
+ StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
+ value);
+ exit_point->Return(value);
+ }
+ }
+
+ BIND(&not_found);
+ {
+ // See above TODO regarding non-pertinent checks
+ CheckForAssociatedProtector(key, &call_runtime);
+
+ // This method should always be invoked on a new JSObject literal ---
+ // it should be impossible for the object to be made non-extensible, or to
+ // be a prototype map/
+ CSA_ASSERT(this, IsExtensibleNonPrototypeMap(map));
+
+ Label add_dictionary_property_slow(this);
+ Add<NameDictionary>(properties, key, value,
+ &add_dictionary_property_slow);
+ exit_point->Return(value);
+
+ BIND(&add_dictionary_property_slow);
+ exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty, context,
+ receiver, key, value);
+ }
+ }
+
+ BIND(&call_runtime);
+ exit_point->ReturnCallRuntime(Runtime::kStoreDataPropertyInLiteral, context,
+ receiver, key, value);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 0934c96cc8..9442a54935 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -30,6 +30,11 @@ class KeyedStoreGenericGenerator {
TNode<Context> context, TNode<Object> receiver,
TNode<Object> key, TNode<Object> value,
LanguageMode language_mode);
+
+ static void SetPropertyInLiteral(compiler::CodeAssemblerState* state,
+ TNode<Context> context,
+ TNode<JSObject> receiver, TNode<Name> key,
+ TNode<Object> value);
};
class StoreICUninitializedGenerator {
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index ec2b558dc2..8598c6c1da 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -120,7 +120,7 @@ class IdentityMap : public IdentityMapBase {
void* v = nullptr;
bool deleted_something = DeleteEntry(key, &v);
if (deleted_value != nullptr && deleted_something) {
- *deleted_value = (V) reinterpret_cast<intptr_t>(v);
+ *deleted_value = *reinterpret_cast<V*>(&v);
}
return deleted_something;
}
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index ea0d871248..380091cb4a 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -661,16 +661,15 @@ InjectedScript.prototype = {
if (InjectedScriptHost.subtype(obj) === "error") {
try {
- var stack = obj.stack;
- var message = obj.message && obj.message.length ? ": " + obj.message : "";
- var firstCallFrame = /^\s+at\s/m.exec(stack);
- var stackMessageEnd = firstCallFrame ? firstCallFrame.index : -1;
- if (stackMessageEnd !== -1) {
- var stackTrace = stack.substr(stackMessageEnd);
- return className + message + "\n" + stackTrace;
- }
- return className + message;
+ const stack = obj.stack;
+ if (stack.substr(0, className.length) === className)
+ return stack;
+ const message = obj.message;
+ const index = /* suppressBlacklist */ stack.indexOf(message);
+ const messageWithStack = index !== -1 ? stack.substr(index) : message;
+ return className + ': ' + messageWithStack;
} catch(e) {
+ return className;
}
}
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 0d1b8d6e89..296dc4c631 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -738,7 +738,7 @@ InjectedScript::ContextScope::ContextScope(V8InspectorSessionImpl* session,
: InjectedScript::Scope(session),
m_executionContextId(executionContextId) {}
-InjectedScript::ContextScope::~ContextScope() {}
+InjectedScript::ContextScope::~ContextScope() = default;
Response InjectedScript::ContextScope::findInjectedScript(
V8InspectorSessionImpl* session) {
@@ -749,7 +749,7 @@ InjectedScript::ObjectScope::ObjectScope(V8InspectorSessionImpl* session,
const String16& remoteObjectId)
: InjectedScript::Scope(session), m_remoteObjectId(remoteObjectId) {}
-InjectedScript::ObjectScope::~ObjectScope() {}
+InjectedScript::ObjectScope::~ObjectScope() = default;
Response InjectedScript::ObjectScope::findInjectedScript(
V8InspectorSessionImpl* session) {
@@ -770,7 +770,7 @@ InjectedScript::CallFrameScope::CallFrameScope(V8InspectorSessionImpl* session,
const String16& remoteObjectId)
: InjectedScript::Scope(session), m_remoteCallFrameId(remoteObjectId) {}
-InjectedScript::CallFrameScope::~CallFrameScope() {}
+InjectedScript::CallFrameScope::~CallFrameScope() = default;
Response InjectedScript::CallFrameScope::findInjectedScript(
V8InspectorSessionImpl* session) {
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 32969a6e7c..a5fb681060 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -60,7 +60,7 @@ class EvaluateCallback {
protocol::Maybe<protocol::Runtime::ExceptionDetails>
exceptionDetails) = 0;
virtual void sendFailure(const protocol::DispatchResponse& response) = 0;
- virtual ~EvaluateCallback() {}
+ virtual ~EvaluateCallback() = default;
};
class InjectedScript final {
@@ -153,7 +153,7 @@ class InjectedScript final {
class ContextScope : public Scope {
public:
ContextScope(V8InspectorSessionImpl*, int executionContextId);
- ~ContextScope();
+ ~ContextScope() override;
private:
Response findInjectedScript(V8InspectorSessionImpl*) override;
@@ -165,7 +165,7 @@ class InjectedScript final {
class ObjectScope : public Scope {
public:
ObjectScope(V8InspectorSessionImpl*, const String16& remoteObjectId);
- ~ObjectScope();
+ ~ObjectScope() override;
const String16& objectGroupName() const { return m_objectGroupName; }
v8::Local<v8::Value> object() const { return m_object; }
@@ -181,7 +181,7 @@ class InjectedScript final {
class CallFrameScope : public Scope {
public:
CallFrameScope(V8InspectorSessionImpl*, const String16& remoteCallFrameId);
- ~CallFrameScope();
+ ~CallFrameScope() override;
size_t frameOrdinal() const { return m_frameOrdinal; }
private:
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
index 923274236d..b199032359 100644
--- a/deps/v8/src/inspector/remote-object-id.h
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -17,7 +17,7 @@ class RemoteObjectIdBase {
protected:
RemoteObjectIdBase();
- ~RemoteObjectIdBase() {}
+ ~RemoteObjectIdBase() = default;
std::unique_ptr<protocol::DictionaryValue> parseInjectedScriptId(
const String16&);
@@ -28,7 +28,7 @@ class RemoteObjectIdBase {
class RemoteObjectId final : public RemoteObjectIdBase {
public:
static Response parse(const String16&, std::unique_ptr<RemoteObjectId>*);
- ~RemoteObjectId() {}
+ ~RemoteObjectId() = default;
int id() const { return m_id; }
private:
@@ -40,7 +40,7 @@ class RemoteObjectId final : public RemoteObjectIdBase {
class RemoteCallFrameId final : public RemoteObjectIdBase {
public:
static Response parse(const String16&, std::unique_ptr<RemoteCallFrameId>*);
- ~RemoteCallFrameId() {}
+ ~RemoteCallFrameId() = default;
int frameOrdinal() const { return m_frameOrdinal; }
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index fbcb0f4338..eb77ddd5fb 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -116,13 +116,13 @@ ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
}
}
// Figure out how many bytes the result will require
- if (ch < (UChar32)0x80) {
+ if (ch < static_cast<UChar32>(0x80)) {
bytesToWrite = 1;
- } else if (ch < (UChar32)0x800) {
+ } else if (ch < static_cast<UChar32>(0x800)) {
bytesToWrite = 2;
- } else if (ch < (UChar32)0x10000) {
+ } else if (ch < static_cast<UChar32>(0x10000)) {
bytesToWrite = 3;
- } else if (ch < (UChar32)0x110000) {
+ } else if (ch < static_cast<UChar32>(0x110000)) {
bytesToWrite = 4;
} else {
bytesToWrite = 3;
@@ -370,10 +370,9 @@ static inline void putUTF8Triple(char*& buffer, UChar ch) {
} // namespace
-String16::String16() {}
+String16::String16() = default;
-String16::String16(const String16& other)
- : m_impl(other.m_impl), hash_code(other.hash_code) {}
+String16::String16(const String16& other) = default;
String16::String16(String16&& other) V8_NOEXCEPT
: m_impl(std::move(other.m_impl)),
@@ -394,11 +393,7 @@ String16::String16(const char* characters, size_t size) {
String16::String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
-String16& String16::operator=(const String16& other) {
- m_impl = other.m_impl;
- hash_code = other.hash_code;
- return *this;
-}
+String16& String16::operator=(const String16& other) = default;
String16& String16::operator=(String16&& other) V8_NOEXCEPT {
m_impl = std::move(other.m_impl);
@@ -471,7 +466,7 @@ String16 String16::stripWhiteSpace() const {
return String16(characters16() + start, end + 1 - start);
}
-String16Builder::String16Builder() {}
+String16Builder::String16Builder() = default;
void String16Builder::append(const String16& s) {
m_buffer.insert(m_buffer.end(), s.characters16(),
@@ -547,7 +542,7 @@ String16 String16::fromUTF8(const char* stringStart, size_t length) {
UChar* bufferCurrent = bufferStart;
const char* stringCurrent = stringStart;
if (convertUTF8ToUTF16(&stringCurrent, stringStart + length, &bufferCurrent,
- bufferCurrent + buffer.size(), 0,
+ bufferCurrent + buffer.size(), nullptr,
true) != conversionOK)
return String16();
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.cc b/deps/v8/src/inspector/v8-console-agent-impl.cc
index 96ffdc593c..66c96110d7 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-console-agent-impl.cc
@@ -24,7 +24,7 @@ V8ConsoleAgentImpl::V8ConsoleAgentImpl(
m_frontend(frontendChannel),
m_enabled(false) {}
-V8ConsoleAgentImpl::~V8ConsoleAgentImpl() {}
+V8ConsoleAgentImpl::~V8ConsoleAgentImpl() = default;
Response V8ConsoleAgentImpl::enable() {
if (m_enabled) return Response::OK();
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 4bb0bf904e..6d39deeb4c 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -13,6 +13,7 @@
#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-runtime-agent-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/tracing/trace-event.h"
#include "include/v8-inspector.h"
@@ -202,7 +203,7 @@ V8ConsoleMessage::V8ConsoleMessage(V8MessageOrigin origin, double timestamp,
m_exceptionId(0),
m_revokedExceptionId(0) {}
-V8ConsoleMessage::~V8ConsoleMessage() {}
+V8ConsoleMessage::~V8ConsoleMessage() = default;
void V8ConsoleMessage::setLocation(const String16& url, unsigned lineNumber,
unsigned columnNumber,
@@ -477,12 +478,34 @@ V8ConsoleMessageStorage::V8ConsoleMessageStorage(V8InspectorImpl* inspector,
V8ConsoleMessageStorage::~V8ConsoleMessageStorage() { clear(); }
+namespace {
+
+void TraceV8ConsoleMessageEvent(V8MessageOrigin origin, ConsoleAPIType type) {
+ // Change in this function requires adjustment of Catapult/Telemetry metric
+ // tracing/tracing/metrics/console_error_metric.html.
+ // See https://crbug.com/880432
+ if (origin == V8MessageOrigin::kException) {
+ TRACE_EVENT_INSTANT0("v8.console", "V8ConsoleMessage::Exception",
+ TRACE_EVENT_SCOPE_THREAD);
+ } else if (type == ConsoleAPIType::kError) {
+ TRACE_EVENT_INSTANT0("v8.console", "V8ConsoleMessage::Error",
+ TRACE_EVENT_SCOPE_THREAD);
+ } else if (type == ConsoleAPIType::kAssert) {
+ TRACE_EVENT_INSTANT0("v8.console", "V8ConsoleMessage::Assert",
+ TRACE_EVENT_SCOPE_THREAD);
+ }
+}
+
+} // anonymous namespace
+
void V8ConsoleMessageStorage::addMessage(
std::unique_ptr<V8ConsoleMessage> message) {
int contextGroupId = m_contextGroupId;
V8InspectorImpl* inspector = m_inspector;
if (message->type() == ConsoleAPIType::kClear) clear();
+ TraceV8ConsoleMessageEvent(message->origin(), message->type());
+
inspector->forEachSession(
contextGroupId, [&message](V8InspectorSessionImpl* session) {
if (message->origin() == V8MessageOrigin::kConsole)
@@ -542,6 +565,13 @@ bool V8ConsoleMessageStorage::countReset(int contextId, const String16& id) {
return true;
}
+double V8ConsoleMessageStorage::timeLog(int contextId, const String16& id) {
+ std::map<String16, double>& time = m_data[contextId].m_time;
+ auto it = time.find(id);
+ if (it == time.end()) return 0.0;
+ return m_inspector->client()->currentTimeMS() - it->second;
+}
+
double V8ConsoleMessageStorage::timeEnd(int contextId, const String16& id) {
std::map<String16, double>& time = m_data[contextId].m_time;
auto it = time.find(id);
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index d030778a5b..cca5b47265 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -120,6 +120,7 @@ class V8ConsoleMessageStorage {
int count(int contextId, const String16& id);
bool countReset(int contextId, const String16& id);
void time(int contextId, const String16& id);
+ double timeLog(int contextId, const String16& id);
double timeEnd(int contextId, const String16& id);
bool hasTimer(int contextId, const String16& id);
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 752b50fa36..ef4c7ccd1d 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -63,6 +63,7 @@ class ConsoleHelper {
void reportCall(ConsoleAPIType type) {
if (!m_info.Length()) return;
std::vector<v8::Local<v8::Value>> arguments;
+ arguments.reserve(m_info.Length());
for (int i = 0; i < m_info.Length(); ++i) arguments.push_back(m_info[i]);
reportCall(type, arguments);
}
@@ -75,6 +76,14 @@ class ConsoleHelper {
reportCall(type, arguments);
}
+ void reportCallAndReplaceFirstArgument(ConsoleAPIType type,
+ const String16& message) {
+ std::vector<v8::Local<v8::Value>> arguments;
+ arguments.push_back(toV8String(m_isolate, message));
+ for (int i = 1; i < m_info.Length(); ++i) arguments.push_back(m_info[i]);
+ reportCall(type, arguments);
+ }
+
void reportCallWithArgument(ConsoleAPIType type, const String16& message) {
std::vector<v8::Local<v8::Value>> arguments(1,
toV8String(m_isolate, message));
@@ -106,7 +115,7 @@ class ConsoleHelper {
bool firstArgToBoolean(bool defaultValue) {
if (m_info.Length() < 1) return defaultValue;
if (m_info[0]->IsBoolean()) return m_info[0].As<v8::Boolean>()->Value();
- return m_info[0]->BooleanValue(m_context).FromMaybe(defaultValue);
+ return m_info[0]->BooleanValue(m_context->GetIsolate());
}
String16 firstArgToString(const String16& defaultValue,
@@ -143,7 +152,7 @@ class ConsoleHelper {
}
void forEachSession(std::function<void(V8InspectorSessionImpl*)> callback) {
- m_inspector->forEachSession(m_groupId, callback);
+ m_inspector->forEachSession(m_groupId, std::move(callback));
}
private:
@@ -385,10 +394,9 @@ static void timeFunction(const v8::debug::ConsoleCallArguments& info,
static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext,
- bool timelinePrefix, V8InspectorImpl* inspector) {
+ bool timeLog, V8InspectorImpl* inspector) {
ConsoleHelper helper(info, consoleContext, inspector);
String16 protocolTitle = helper.firstArgToString("default", false);
- if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
const String16& timerId =
protocolTitle + "@" +
consoleContextToString(inspector->isolate(), consoleContext);
@@ -399,13 +407,22 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
return;
}
inspector->client()->consoleTimeEnd(toStringView(protocolTitle));
- double elapsed = helper.consoleMessageStorage()->timeEnd(
- helper.contextId(),
- protocolTitle + "@" +
- consoleContextToString(inspector->isolate(), consoleContext));
+ String16 title = protocolTitle + "@" +
+ consoleContextToString(inspector->isolate(), consoleContext);
+ double elapsed;
+ if (timeLog) {
+ elapsed =
+ helper.consoleMessageStorage()->timeLog(helper.contextId(), title);
+ } else {
+ elapsed =
+ helper.consoleMessageStorage()->timeEnd(helper.contextId(), title);
+ }
String16 message =
protocolTitle + ": " + String16::fromDouble(elapsed) + "ms";
- helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
+ if (timeLog)
+ helper.reportCallAndReplaceFirstArgument(ConsoleAPIType::kLog, message);
+ else
+ helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
}
void V8Console::Time(const v8::debug::ConsoleCallArguments& info,
@@ -413,6 +430,11 @@ void V8Console::Time(const v8::debug::ConsoleCallArguments& info,
timeFunction(info, consoleContext, false, m_inspector);
}
+void V8Console::TimeLog(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ timeEndFunction(info, consoleContext, true, m_inspector);
+}
+
void V8Console::TimeEnd(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
timeEndFunction(info, consoleContext, false, m_inspector);
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index 2e47012807..03d89ced10 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -88,6 +88,8 @@ class V8Console : public v8::debug::ConsoleDelegate {
const v8::debug::ConsoleContext& consoleContext) override;
void Time(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext& consoleContext) override;
+ void TimeLog(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
void TimeEnd(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext& consoleContext) override;
void TimeStamp(const v8::debug::ConsoleCallArguments&,
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index a27af98d8d..d227526d64 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -160,9 +160,8 @@ String16 breakpointHint(const V8DebuggerScript& script, int lineNumber,
int columnNumber) {
int offset = script.offset(lineNumber, columnNumber);
if (offset == V8DebuggerScript::kNoOffset) return String16();
- const String16& source = script.source();
String16 hint =
- source.substring(offset, kBreakpointHintMaxLength).stripWhiteSpace();
+ script.source(offset, kBreakpointHintMaxLength).stripWhiteSpace();
for (size_t i = 0; i < hint.length(); ++i) {
if (hint[i] == '\r' || hint[i] == '\n' || hint[i] == ';') {
return hint.substring(0, i);
@@ -183,8 +182,8 @@ void adjustBreakpointLocation(const V8DebuggerScript& script,
intptr_t searchRegionOffset = std::max(
sourceOffset - kBreakpointHintMaxSearchOffset, static_cast<intptr_t>(0));
size_t offset = sourceOffset - searchRegionOffset;
- String16 searchArea = script.source().substring(
- searchRegionOffset, offset + kBreakpointHintMaxSearchOffset);
+ String16 searchArea = script.source(searchRegionOffset,
+ offset + kBreakpointHintMaxSearchOffset);
size_t nextMatch = searchArea.find(hint, offset);
size_t prevMatch = searchArea.reverseFind(hint, offset);
@@ -317,7 +316,7 @@ V8DebuggerAgentImpl::V8DebuggerAgentImpl(
m_frontend(frontendChannel),
m_isolate(m_inspector->isolate()) {}
-V8DebuggerAgentImpl::~V8DebuggerAgentImpl() {}
+V8DebuggerAgentImpl::~V8DebuggerAgentImpl() = default;
void V8DebuggerAgentImpl::enableImpl() {
m_enabled = true;
@@ -334,7 +333,7 @@ void V8DebuggerAgentImpl::enableImpl() {
if (isPaused()) {
didPause(0, v8::Local<v8::Value>(), std::vector<v8::debug::BreakpointId>(),
- false, false, false, false);
+ v8::debug::kException, false, false, false);
}
}
@@ -837,7 +836,7 @@ Response V8DebuggerAgentImpl::searchInContent(
return Response::Error("No script for id: " + scriptId);
std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
- searchInTextByLinesImpl(m_session, it->second->source(), query,
+ searchInTextByLinesImpl(m_session, it->second->source(0), query,
optionalCaseSensitive.fromMaybe(false),
optionalIsRegex.fromMaybe(false));
*results = protocol::Array<protocol::Debugger::SearchMatch>::create();
@@ -859,10 +858,6 @@ Response V8DebuggerAgentImpl::setScriptSource(
if (it == m_scripts.end()) {
return Response::Error("No script with given id found");
}
- if (it->second->isModule()) {
- // TODO(kozyatinskiy): LiveEdit should support ES6 module
- return Response::Error("Editing module's script is not supported.");
- }
int contextId = it->second->executionContextId();
InspectedContext* inspected = m_inspector->getContext(contextId);
if (!inspected) {
@@ -927,7 +922,7 @@ Response V8DebuggerAgentImpl::getScriptSource(const String16& scriptId,
ScriptsMap::iterator it = m_scripts.find(scriptId);
if (it == m_scripts.end())
return Response::Error("No script for id: " + scriptId);
- *scriptSource = it->second->source();
+ *scriptSource = it->second->source(0);
return Response::OK();
}
@@ -1380,7 +1375,7 @@ void V8DebuggerAgentImpl::didParseSource(
v8::HandleScope handles(m_isolate);
if (!success) {
DCHECK(!script->isSourceLoadedLazily());
- String16 scriptSource = script->source();
+ String16 scriptSource = script->source(0);
script->setSourceURL(findSourceURL(scriptSource, false));
script->setSourceMappingURL(findSourceMapURL(scriptSource, false));
}
@@ -1440,8 +1435,7 @@ void V8DebuggerAgentImpl::didParseSource(
scriptRef->endLine(), scriptRef->endColumn(), contextId,
scriptRef->hash(), std::move(executionContextAuxDataParam),
isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
- isModuleParam, static_cast<int>(scriptRef->source().length()),
- std::move(stackTrace));
+ isModuleParam, scriptRef->length(), std::move(stackTrace));
}
} else {
m_frontend.scriptFailedToParse(
@@ -1449,7 +1443,7 @@ void V8DebuggerAgentImpl::didParseSource(
scriptRef->endLine(), scriptRef->endColumn(), contextId,
scriptRef->hash(), std::move(executionContextAuxDataParam),
std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam,
- static_cast<int>(scriptRef->source().length()), std::move(stackTrace));
+ scriptRef->length(), std::move(stackTrace));
}
if (!success) {
@@ -1512,7 +1506,8 @@ void V8DebuggerAgentImpl::didParseSource(
void V8DebuggerAgentImpl::didPause(
int contextId, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
- bool isPromiseRejection, bool isUncaught, bool isOOMBreak, bool isAssert) {
+ v8::debug::ExceptionType exceptionType, bool isUncaught, bool isOOMBreak,
+ bool isAssert) {
v8::HandleScope handles(m_isolate);
std::vector<BreakReason> hitReasons;
@@ -1528,7 +1523,7 @@ void V8DebuggerAgentImpl::didPause(
m_session->findInjectedScript(contextId, injectedScript);
if (injectedScript) {
String16 breakReason =
- isPromiseRejection
+ exceptionType == v8::debug::kPromiseRejection
? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
: protocol::Debugger::Paused::ReasonEnum::Exception;
std::unique_ptr<protocol::Runtime::RemoteObject> obj;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 65f7677b47..9806c85f48 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -142,8 +142,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
// Interface for V8InspectorImpl
void didPause(int contextId, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
- bool isPromiseRejection, bool isUncaught, bool isOOMBreak,
- bool isAssert);
+ v8::debug::ExceptionType exceptionType, bool isUncaught,
+ bool isOOMBreak, bool isAssert);
void didContinue();
void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index babb7700c6..c1efd2dba1 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -20,7 +20,7 @@ const char kGlobalDebuggerScriptHandleLabel[] = "DevTools debugger";
// Multiplikation in
// eingeschränkten Branchingprogrammmodellen" by Woelfe.
// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
-String16 calculateHash(const String16& str) {
+String16 calculateHash(v8::Isolate* isolate, v8::Local<v8::String> source) {
static uint64_t prime[] = {0x3FB75161, 0xAB1F4E4F, 0x82675BC5, 0xCD924D35,
0x81ABE279};
static uint64_t random[] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476,
@@ -34,9 +34,14 @@ String16 calculateHash(const String16& str) {
const size_t hashesSize = arraysize(hashes);
size_t current = 0;
+
+ std::unique_ptr<UChar[]> buffer(new UChar[source->Length()]);
+ int written = source->Write(
+ isolate, reinterpret_cast<uint16_t*>(buffer.get()), 0, source->Length());
+
const uint32_t* data = nullptr;
- size_t sizeInBytes = sizeof(UChar) * str.length();
- data = reinterpret_cast<const uint32_t*>(str.characters16());
+ size_t sizeInBytes = sizeof(UChar) * written;
+ data = reinterpret_cast<const uint32_t*>(buffer.get());
for (size_t i = 0; i < sizeInBytes / 4; ++i) {
uint32_t d = v8::internal::ReadUnalignedUInt32(
reinterpret_cast<v8::internal::Address>(data + i));
@@ -76,7 +81,7 @@ String16 calculateHash(const String16& str) {
String16Builder hash;
for (size_t i = 0; i < hashesSize; ++i)
- hash.appendUnsignedAsHex((uint32_t)hashes[i]);
+ hash.appendUnsignedAsHex(static_cast<uint32_t>(hashes[i]));
return hash.toString();
}
@@ -121,12 +126,29 @@ class ActualScript : public V8DebuggerScript {
bool isLiveEdit() const override { return m_isLiveEdit; }
bool isModule() const override { return m_isModule; }
- const String16& source() const override { return m_source; }
+ String16 source(size_t pos, size_t len) const override {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::String> v8Source;
+ if (!script()->Source().ToLocal(&v8Source)) return String16();
+ if (pos >= static_cast<size_t>(v8Source->Length())) return String16();
+ size_t substringLength =
+ std::min(len, static_cast<size_t>(v8Source->Length()) - pos);
+ std::unique_ptr<UChar[]> buffer(new UChar[substringLength]);
+ v8Source->Write(m_isolate, reinterpret_cast<uint16_t*>(buffer.get()),
+ static_cast<int>(pos), static_cast<int>(substringLength));
+ return String16(buffer.get(), substringLength);
+ }
int startLine() const override { return m_startLine; }
int startColumn() const override { return m_startColumn; }
int endLine() const override { return m_endLine; }
int endColumn() const override { return m_endColumn; }
bool isSourceLoadedLazily() const override { return false; }
+ int length() const override {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::String> v8Source;
+ if (!script()->Source().ToLocal(&v8Source)) return 0;
+ return v8Source->Length();
+ }
const String16& sourceMappingURL() const override {
return m_sourceMappingURL;
@@ -138,7 +160,6 @@ class ActualScript : public V8DebuggerScript {
void setSource(const String16& newSource, bool preview,
v8::debug::LiveEditResult* result) override {
- DCHECK(!isModule());
v8::EscapableHandleScope scope(m_isolate);
v8::Local<v8::String> v8Source = toV8String(m_isolate, newSource);
if (!m_script.Get(m_isolate)->SetScriptSource(v8Source, preview, result)) {
@@ -213,7 +234,13 @@ class ActualScript : public V8DebuggerScript {
}
const String16& hash() const override {
- if (m_hash.isEmpty()) m_hash = calculateHash(source());
+ if (m_hash.isEmpty()) {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::String> v8Source;
+ if (script()->Source().ToLocal(&v8Source)) {
+ m_hash = calculateHash(m_isolate, v8Source);
+ }
+ }
DCHECK(!m_hash.isEmpty());
return m_hash;
}
@@ -264,10 +291,6 @@ class ActualScript : public V8DebuggerScript {
USE(script->ContextId().To(&m_executionContextId));
- if (script->Source().ToLocal(&tmp)) {
- m_source = toProtocolString(m_isolate, tmp);
- }
-
m_isModule = script->IsModule();
m_script.Reset(m_isolate, script);
@@ -277,7 +300,6 @@ class ActualScript : public V8DebuggerScript {
String16 m_sourceMappingURL;
bool m_isLiveEdit = false;
bool m_isModule = false;
- String16 m_source;
mutable String16 m_hash;
int m_startLine = 0;
int m_startColumn = 0;
@@ -309,8 +331,9 @@ class WasmVirtualScript : public V8DebuggerScript {
UNREACHABLE();
}
bool isSourceLoadedLazily() const override { return true; }
- const String16& source() const override {
- return m_wasmTranslation->GetSource(m_id, m_functionIndex);
+ String16 source(size_t pos, size_t len) const override {
+ return m_wasmTranslation->GetSource(m_id, m_functionIndex)
+ .substring(pos, len);
}
int startLine() const override {
return m_wasmTranslation->GetStartLine(m_id, m_functionIndex);
@@ -324,6 +347,9 @@ class WasmVirtualScript : public V8DebuggerScript {
int endColumn() const override {
return m_wasmTranslation->GetEndColumn(m_id, m_functionIndex);
}
+ int length() const override {
+ return static_cast<int>(source(0, UINT_MAX).length());
+ }
bool getPossibleBreakpoints(
const v8::debug::Location& start, const v8::debug::Location& end,
@@ -427,7 +453,7 @@ V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate, String16 id,
String16 url)
: m_id(std::move(id)), m_url(std::move(url)), m_isolate(isolate) {}
-V8DebuggerScript::~V8DebuggerScript() {}
+V8DebuggerScript::~V8DebuggerScript() = default;
void V8DebuggerScript::setSourceURL(const String16& sourceURL) {
if (sourceURL.length() > 0) {
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 38e6448f48..116b965afc 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -60,7 +60,7 @@ class V8DebuggerScript {
const String16& sourceURL() const { return m_url; }
virtual const String16& sourceMappingURL() const = 0;
- virtual const String16& source() const = 0;
+ virtual String16 source(size_t pos, size_t len = UINT_MAX) const = 0;
virtual const String16& hash() const = 0;
virtual int startLine() const = 0;
virtual int startColumn() const = 0;
@@ -70,6 +70,7 @@ class V8DebuggerScript {
virtual bool isLiveEdit() const = 0;
virtual bool isModule() const = 0;
virtual bool isSourceLoadedLazily() const = 0;
+ virtual int length() const = 0;
void setSourceURL(const String16&);
virtual void setSourceMappingURL(const String16&) = 0;
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index ccc674af43..5f826b56a9 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -480,7 +480,7 @@ void V8Debugger::clearContinueToLocation() {
void V8Debugger::handleProgramBreak(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& breakpointIds,
- bool isPromiseRejection, bool isUncaught) {
+ v8::debug::ExceptionType exceptionType, bool isUncaught) {
// Don't allow nested breaks.
if (isPaused()) return;
@@ -523,12 +523,12 @@ void V8Debugger::handleProgramBreak(
m_inspector->forEachSession(
contextGroupId, [&pausedContext, &exception, &breakpointIds,
- &isPromiseRejection, &isUncaught, &scheduledOOMBreak,
+ &exceptionType, &isUncaught, &scheduledOOMBreak,
&scheduledAssertBreak](V8InspectorSessionImpl* session) {
if (session->debuggerAgent()->acceptsPause(scheduledOOMBreak)) {
session->debuggerAgent()->didPause(
InspectedContext::contextId(pausedContext), exception,
- breakpointIds, isPromiseRejection, isUncaught, scheduledOOMBreak,
+ breakpointIds, exceptionType, isUncaught, scheduledOOMBreak,
scheduledAssertBreak);
}
});
@@ -576,7 +576,7 @@ void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
bool is_live_edited, bool has_compile_error) {
int contextId;
if (!script->ContextId().To(&contextId)) return;
- if (script->IsWasm()) {
+ if (script->IsWasm() && script->SourceMappingURL().IsEmpty()) {
WasmTranslation* wasmTranslation = &m_wasmTranslation;
m_inspector->forEachSession(
m_inspector->contextGroupId(contextId),
@@ -608,12 +608,11 @@ void V8Debugger::BreakProgramRequested(
void V8Debugger::ExceptionThrown(v8::Local<v8::Context> pausedContext,
v8::Local<v8::Value> exception,
- v8::Local<v8::Value> promise,
- bool isUncaught) {
- bool isPromiseRejection = promise->IsPromise();
+ v8::Local<v8::Value> promise, bool isUncaught,
+ v8::debug::ExceptionType exceptionType) {
std::vector<v8::debug::BreakpointId> break_points_hit;
- handleProgramBreak(pausedContext, exception, break_points_hit,
- isPromiseRejection, isUncaught);
+ handleProgramBreak(pausedContext, exception, break_points_hit, exceptionType,
+ isUncaught);
}
bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
@@ -751,11 +750,37 @@ v8::MaybeLocal<v8::Value> V8Debugger::generatorScopes(
return getTargetScopes(context, generator, GENERATOR);
}
+v8::MaybeLocal<v8::Uint32> V8Debugger::stableObjectId(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
+ DCHECK(value->IsObject());
+ if (m_stableObjectId.IsEmpty()) {
+ m_stableObjectId.Reset(m_isolate, v8::debug::WeakMap::New(m_isolate));
+ }
+ v8::Local<v8::debug::WeakMap> stableObjectId =
+ m_stableObjectId.Get(m_isolate);
+ v8::Local<v8::Value> idValue;
+ if (!stableObjectId->Get(context, value).ToLocal(&idValue) ||
+ !idValue->IsUint32()) {
+ idValue = v8::Integer::NewFromUnsigned(m_isolate, ++m_lastStableObjectId);
+ stableObjectId->Set(context, value, idValue).ToLocalChecked();
+ }
+ return idValue.As<v8::Uint32>();
+}
+
v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
v8::Local<v8::Array> properties;
if (!v8::debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
return v8::MaybeLocal<v8::Array>();
+ if (value->IsObject()) {
+ v8::Local<v8::Uint32> id;
+ if (stableObjectId(context, value).ToLocal(&id)) {
+ createDataProperty(
+ context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[StableObjectId]]"));
+ createDataProperty(context, properties, properties->Length(), id);
+ }
+ }
if (value->IsFunction()) {
v8::Local<v8::Function> function = value.As<v8::Function>();
v8::Local<v8::Object> location;
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 72962dde31..a99653add6 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -40,7 +40,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
public v8::debug::AsyncEventDelegate {
public:
V8Debugger(v8::Isolate*, V8InspectorImpl*);
- ~V8Debugger();
+ ~V8Debugger() override;
bool enabled() const;
v8::Isolate* isolate() const { return m_isolate; }
@@ -145,7 +145,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
void handleProgramBreak(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
- bool isPromiseRejection = false, bool isUncaught = false);
+ v8::debug::ExceptionType exception_type = v8::debug::kException,
+ bool isUncaught = false);
enum ScopeTargetKind {
FUNCTION,
@@ -181,7 +182,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
const std::vector<v8::debug::BreakpointId>& break_points_hit) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
- v8::Local<v8::Value> promise, bool is_uncaught) override;
+ v8::Local<v8::Value> promise, bool is_uncaught,
+ v8::debug::ExceptionType exception_type) override;
bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
const v8::debug::Location& start,
const v8::debug::Location& end) override;
@@ -189,6 +191,9 @@ class V8Debugger : public v8::debug::DebugDelegate,
int currentContextGroupId();
bool asyncStepOutOfFunction(int targetContextGroupId, bool onlyAtReturn);
+ v8::MaybeLocal<v8::Uint32> stableObjectId(v8::Local<v8::Context>,
+ v8::Local<v8::Value>);
+
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
int m_enableCount;
@@ -245,6 +250,9 @@ class V8Debugger : public v8::debug::DebugDelegate,
std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback;
+ uint32_t m_lastStableObjectId = 0;
+ v8::Global<v8::debug::WeakMap> m_stableObjectId;
+
WasmTranslation m_wasmTranslation;
DISALLOW_COPY_AND_ASSIGN(V8Debugger);
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index f255287c03..e50fe0e893 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -153,7 +153,7 @@ V8HeapProfilerAgentImpl::V8HeapProfilerAgentImpl(
m_state(state),
m_hasTimer(false) {}
-V8HeapProfilerAgentImpl::~V8HeapProfilerAgentImpl() {}
+V8HeapProfilerAgentImpl::~V8HeapProfilerAgentImpl() = default;
void V8HeapProfilerAgentImpl::restore() {
if (m_state->booleanProperty(HeapProfilerAgentState::heapProfilerEnabled,
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 62790a6335..5422b5e12f 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -359,7 +359,8 @@ V8Console* V8InspectorImpl::console() {
}
void V8InspectorImpl::forEachContext(
- int contextGroupId, std::function<void(InspectedContext*)> callback) {
+ int contextGroupId,
+ const std::function<void(InspectedContext*)>& callback) {
auto it = m_contexts.find(contextGroupId);
if (it == m_contexts.end()) return;
std::vector<int> ids;
@@ -376,7 +377,8 @@ void V8InspectorImpl::forEachContext(
}
void V8InspectorImpl::forEachSession(
- int contextGroupId, std::function<void(V8InspectorSessionImpl*)> callback) {
+ int contextGroupId,
+ const std::function<void(V8InspectorSessionImpl*)>& callback) {
auto it = m_sessions.find(contextGroupId);
if (it == m_sessions.end()) return;
std::vector<int> ids;
@@ -411,9 +413,9 @@ V8InspectorImpl::EvaluateScope::~EvaluateScope() {
class V8InspectorImpl::EvaluateScope::TerminateTask : public v8::Task {
public:
TerminateTask(v8::Isolate* isolate, std::shared_ptr<CancelToken> token)
- : m_isolate(isolate), m_token(token) {}
+ : m_isolate(isolate), m_token(std::move(token)) {}
- void Run() {
+ void Run() override {
// CancelToken contains m_canceled bool which may be changed from main
// thread, so lock mutex first.
v8::base::LockGuard<v8::base::Mutex> lock(&m_token->m_mutex);
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 2124ba6250..70eaf0eb20 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -120,9 +120,10 @@ class V8InspectorImpl : public V8Inspector {
InspectedContext* getContext(int contextId) const;
V8Console* console();
void forEachContext(int contextGroupId,
- std::function<void(InspectedContext*)> callback);
- void forEachSession(int contextGroupId,
- std::function<void(V8InspectorSessionImpl*)> callback);
+ const std::function<void(InspectedContext*)>& callback);
+ void forEachSession(
+ int contextGroupId,
+ const std::function<void(V8InspectorSessionImpl*)>& callback);
class EvaluateScope {
public:
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 85861a05bf..5053d4dd78 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -34,7 +34,7 @@ class V8InspectorSessionImpl : public V8InspectorSession,
static std::unique_ptr<V8InspectorSessionImpl> create(
V8InspectorImpl*, int contextGroupId, int sessionId,
V8Inspector::Channel*, const StringView& state);
- ~V8InspectorSessionImpl();
+ ~V8InspectorSessionImpl() override;
V8InspectorImpl* inspector() const { return m_inspector; }
V8ConsoleAgentImpl* consoleAgent() { return m_consoleAgent.get(); }
diff --git a/deps/v8/src/inspector/v8-regex.h b/deps/v8/src/inspector/v8-regex.h
index 0c4136fc8b..0ce779542d 100644
--- a/deps/v8/src/inspector/v8-regex.h
+++ b/deps/v8/src/inspector/v8-regex.h
@@ -20,7 +20,8 @@ class V8Regex {
public:
V8Regex(V8InspectorImpl*, const String16&, bool caseSensitive,
bool multiline = false);
- int match(const String16&, int startFrom = 0, int* matchLength = 0) const;
+ int match(const String16&, int startFrom = 0,
+ int* matchLength = nullptr) const;
bool isValid() const { return !m_regex.IsEmpty(); }
const String16& errorMessage() const { return m_errorMessage; }
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index d0ae633945..9e3697cf9e 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -226,7 +226,7 @@ V8RuntimeAgentImpl::V8RuntimeAgentImpl(
m_inspector(session->inspector()),
m_enabled(false) {}
-V8RuntimeAgentImpl::~V8RuntimeAgentImpl() {}
+V8RuntimeAgentImpl::~V8RuntimeAgentImpl() = default;
void V8RuntimeAgentImpl::evaluate(
const String16& expression, Maybe<String16> objectGroup,
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.cc b/deps/v8/src/inspector/v8-schema-agent-impl.cc
index d7b6cdcb01..07bbd35d97 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.cc
@@ -14,7 +14,7 @@ V8SchemaAgentImpl::V8SchemaAgentImpl(V8InspectorSessionImpl* session,
protocol::DictionaryValue* state)
: m_session(session), m_frontend(frontendChannel) {}
-V8SchemaAgentImpl::~V8SchemaAgentImpl() {}
+V8SchemaAgentImpl::~V8SchemaAgentImpl() = default;
Response V8SchemaAgentImpl::getDomains(
std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 21ca98d911..ae41344a7b 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -223,10 +223,10 @@ V8StackTraceImpl::V8StackTraceImpl(
const V8StackTraceId& externalParent)
: m_frames(std::move(frames)),
m_maxAsyncDepth(maxAsyncDepth),
- m_asyncParent(asyncParent),
+ m_asyncParent(std::move(asyncParent)),
m_externalParent(externalParent) {}
-V8StackTraceImpl::~V8StackTraceImpl() {}
+V8StackTraceImpl::~V8StackTraceImpl() = default;
std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
return std::unique_ptr<V8StackTrace>(new V8StackTraceImpl(
@@ -392,7 +392,7 @@ AsyncStackTrace::AsyncStackTrace(
m_suspendedTaskId(nullptr),
m_description(description),
m_frames(std::move(frames)),
- m_asyncParent(asyncParent),
+ m_asyncParent(std::move(asyncParent)),
m_externalParent(externalParent) {
DCHECK(m_contextGroupId || (!externalParent.IsInvalid() && m_frames.empty()));
}
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
index f049871202..c5d1f8c6a2 100644
--- a/deps/v8/src/inspector/wasm-translation.cc
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -62,7 +62,7 @@ class WasmTranslation::TranslatorImpl {
TransLocation(WasmTranslation* translation, String16 script_id, int line,
int column)
: translation(translation),
- script_id(script_id),
+ script_id(std::move(script_id)),
line(line),
column(column) {}
};
@@ -74,7 +74,7 @@ class WasmTranslation::TranslatorImpl {
int index) = 0;
virtual const String16 GetHash(v8::Isolate*, int index) = 0;
- virtual ~TranslatorImpl() {}
+ virtual ~TranslatorImpl() = default;
class RawTranslator;
class DisassemblingTranslator;
@@ -238,7 +238,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
return builder.toString();
}
- String16 GetFakeScriptId(const String16 script_id, int func_index) {
+ String16 GetFakeScriptId(const String16& script_id, int func_index) {
return String16::concat(script_id, '-', String16::fromInteger(func_index));
}
String16 GetFakeScriptId(const TransLocation* loc) {
diff --git a/deps/v8/src/instruction-stream.cc b/deps/v8/src/instruction-stream.cc
index 4b2a9012d6..20cb4ece16 100644
--- a/deps/v8/src/instruction-stream.cc
+++ b/deps/v8/src/instruction-stream.cc
@@ -51,16 +51,18 @@ void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
uint32_t* size) {
EmbeddedData d = EmbeddedData::FromIsolate(isolate);
- const uint32_t page_size = static_cast<uint32_t>(AllocatePageSize());
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
+ const uint32_t page_size =
+ static_cast<uint32_t>(page_allocator->AllocatePageSize());
const uint32_t allocated_size = RoundUp(d.size(), page_size);
uint8_t* allocated_bytes = static_cast<uint8_t*>(
- AllocatePages(GetRandomMmapAddr(), allocated_size, page_size,
- PageAllocator::kReadWrite));
+ AllocatePages(page_allocator, isolate->heap()->GetRandomMmapAddr(),
+ allocated_size, page_size, PageAllocator::kReadWrite));
CHECK_NOT_NULL(allocated_bytes);
std::memcpy(allocated_bytes, d.data(), d.size());
- CHECK(SetPermissions(allocated_bytes, allocated_size,
+ CHECK(SetPermissions(page_allocator, allocated_bytes, allocated_size,
PageAllocator::kReadExecute));
*data = allocated_bytes;
@@ -72,8 +74,10 @@ void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
// static
void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
uint32_t size) {
- const uint32_t page_size = static_cast<uint32_t>(AllocatePageSize());
- CHECK(FreePages(data, RoundUp(size, page_size)));
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
+ const uint32_t page_size =
+ static_cast<uint32_t>(page_allocator->AllocatePageSize());
+ CHECK(FreePages(page_allocator, data, RoundUp(size, page_size)));
}
} // namespace internal
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 7438731c20..3eca6f65b4 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -16,6 +16,9 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
// InterfaceDescriptor owns a copy of the registers array.
register_params_ = NewArray<Register>(register_parameter_count, no_reg);
for (int i = 0; i < register_parameter_count; i++) {
+ // The value of the root register must be reserved, thus any uses
+ // within the calling convention are disallowed.
+ DCHECK_NE(registers[i], kRootRegister);
register_params_[i] = registers[i];
}
}
@@ -260,13 +263,21 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
VectorRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // TODO(jgruber): This DCHECK could be enabled if RegisterBase::ListOf were
+ // to allow no_reg entries.
+ // DCHECK(!AreAliased(ReceiverRegister(), NameRegister(), SlotRegister(),
+ // VectorRegister(), kRootRegister));
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
}
void StoreWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
SlotRegister(), VectorRegister()};
+ // TODO(jgruber): This DCHECK could be enabled if RegisterBase::ListOf were
+ // to allow no_reg entries.
+ // DCHECK(!AreAliased(ReceiverRegister(), NameRegister(), kRootRegister));
int len = arraysize(registers) - kStackArgumentsCount;
data->InitializePlatformSpecific(len, registers);
}
@@ -333,6 +344,11 @@ void WasmGrowMemoryDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void WasmThrowDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index ee9abac9ea..ae64b05582 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -60,7 +60,7 @@ namespace internal {
V(StringAt) \
V(StringSubstring) \
V(GetProperty) \
- V(ArgumentAdaptor) \
+ V(ArgumentsAdaptor) \
V(ApiCallback) \
V(ApiGetter) \
V(GrowArrayElements) \
@@ -74,6 +74,7 @@ namespace internal {
V(FrameDropperTrampoline) \
V(RunMicrotasks) \
V(WasmGrowMemory) \
+ V(WasmThrow) \
V(CloneObjectWithVector) \
BUILTIN_LIST_TFS(V)
@@ -211,7 +212,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
typedef CallInterfaceDescriptorData::Flags Flags;
CallInterfaceDescriptor() : data_(nullptr) {}
- virtual ~CallInterfaceDescriptor() {}
+ virtual ~CallInterfaceDescriptor() = default;
CallInterfaceDescriptor(CallDescriptors::Key key)
: data_(CallDescriptors::call_descriptor_data(key)) {}
@@ -306,19 +307,32 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
explicit name() : base(key()) {} \
static inline CallDescriptors::Key key();
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
-// TODO(jgruber,v8:6666): Keep kRootRegister free unconditionally.
+#if defined(V8_TARGET_ARCH_IA32)
+// To support all possible cases, we must limit the number of register args for
+// TFS builtins on ia32 to 3. Out of the 6 allocatable registers, esi is taken
+// as the context register and ebx is the root register. One register must
+// remain available to store the jump/call target. Thus 3 registers remain for
+// arguments. The reason this applies to TFS builtins specifically is because
+// this becomes relevant for builtins used as targets of Torque function
+// pointers (which must have a register available to store the target).
+// TODO(jgruber): Ideally we should just decrement kMaxBuiltinRegisterParams but
+// that comes with its own set of complications. It's possible, but requires
+// refactoring the calling convention of other existing stubs.
constexpr int kMaxBuiltinRegisterParams = 4;
+constexpr int kMaxTFSBuiltinRegisterParams = 3;
#else
constexpr int kMaxBuiltinRegisterParams = 5;
+constexpr int kMaxTFSBuiltinRegisterParams = kMaxBuiltinRegisterParams;
#endif
+STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
#define DECLARE_DEFAULT_DESCRIPTOR(name, base) \
DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
protected: \
static const int kRegisterParams = \
- kParameterCount > kMaxBuiltinRegisterParams ? kMaxBuiltinRegisterParams \
- : kParameterCount; \
+ kParameterCount > kMaxTFSBuiltinRegisterParams \
+ ? kMaxTFSBuiltinRegisterParams \
+ : kParameterCount; \
static const int kStackParams = kParameterCount - kRegisterParams; \
void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
override { \
@@ -428,7 +442,7 @@ class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
- MachineType::Int32()) // kRequestedSize
+ MachineType::IntPtr()) // kRequestedSize
DECLARE_DESCRIPTOR(AllocateDescriptor, CallInterfaceDescriptor)
};
@@ -601,6 +615,15 @@ class LoadWithVectorDescriptor : public LoadDescriptor {
DECLARE_DESCRIPTOR(LoadWithVectorDescriptor, LoadDescriptor)
static const Register VectorRegister();
+
+#if V8_TARGET_ARCH_IA32
+ static const bool kPassLastArgsOnStack = true;
+#else
+ static const bool kPassLastArgsOnStack = false;
+#endif
+
+ // Pass vector through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
};
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
@@ -611,9 +634,15 @@ class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
MachineType::AnyTagged()) // kVector
DECLARE_DESCRIPTOR(LoadGlobalWithVectorDescriptor, LoadGlobalDescriptor)
+#if V8_TARGET_ARCH_IA32
+ // On ia32, LoadWithVectorDescriptor passes vector on the stack and thus we
+ // need to choose a new register here.
+ static const Register VectorRegister() { return edx; }
+#else
static const Register VectorRegister() {
return LoadWithVectorDescriptor::VectorRegister();
}
+#endif
};
class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
@@ -639,10 +668,9 @@ class FastNewObjectDescriptor : public CallInterfaceDescriptor {
class RecordWriteDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kObject, kSlot, kIsolate, kRememberedSet, kFPMode)
+ DEFINE_PARAMETERS(kObject, kSlot, kRememberedSet, kFPMode)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
MachineType::Pointer(), // kSlot
- MachineType::Pointer(), // kIsolate
MachineType::TaggedSigned(), // kRememberedSet
MachineType::TaggedSigned()) // kFPMode
@@ -690,12 +718,12 @@ class CallTrampolineDescriptor : public CallInterfaceDescriptor {
class CallVarargsDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kArgumentsList,
- kArgumentsLength)
+ DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kArgumentsLength,
+ kArgumentsList)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kActualArgumentsCount
- MachineType::AnyTagged(), // kArgumentsList
- MachineType::Int32()) // kArgumentsLength
+ MachineType::Int32(), // kArgumentsLength
+ MachineType::AnyTagged()) // kArgumentsList
DECLARE_DESCRIPTOR(CallVarargsDescriptor, CallInterfaceDescriptor)
};
@@ -727,9 +755,10 @@ class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_JS_PARAMETERS(kArgumentsList, kArgumentsLength)
- DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged(), // kArgumentsList
- MachineType::Int32()) // kArgumentsLength
+ DEFINE_JS_PARAMETERS(kArgumentsLength, kArgumentsList)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kArgumentsLength
+ MachineType::AnyTagged()) // kArgumentsList
+
DECLARE_DESCRIPTOR(ConstructVarargsDescriptor, CallInterfaceDescriptor)
};
@@ -759,6 +788,7 @@ class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
// TODO(ishell): consider merging this with ArrayConstructorDescriptor
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
+ // TODO(jgruber): Remove the unused allocation site parameter.
DEFINE_JS_PARAMETERS(kAllocationSite)
DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged());
@@ -879,11 +909,11 @@ class StringSubstringDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(StringSubstringDescriptor, CallInterfaceDescriptor)
};
-class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
+class ArgumentsAdaptorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kExpectedArgumentsCount)
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32())
- DECLARE_DESCRIPTOR(ArgumentAdaptorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ArgumentsAdaptorDescriptor, CallInterfaceDescriptor)
};
class CppBuiltinAdaptorDescriptor : public CallInterfaceDescriptor {
@@ -913,6 +943,9 @@ class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
class ApiCallbackDescriptor : public CallInterfaceDescriptor {
public:
+ // TODO(jgruber): This could be simplified to pass call data on the stack
+ // since this is what the CallApiCallbackStub anyways. This would free a
+ // register.
DEFINE_PARAMETERS_NO_CONTEXT(kTargetContext, kCallData, kHolder,
kApiFunctionAddress)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTargetContext
@@ -981,15 +1014,24 @@ class InterpreterPushArgsThenCallDescriptor : public CallInterfaceDescriptor {
class InterpreterPushArgsThenConstructDescriptor
: public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kNumberOfArguments, kNewTarget, kConstructor,
- kFeedbackElement, kFirstArgument)
+ DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kConstructor,
+ kNewTarget, kFeedbackElement)
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kNumberOfArguments
- MachineType::AnyTagged(), // kNewTarget
+ MachineType::Pointer(), // kFirstArgument
MachineType::AnyTagged(), // kConstructor
- MachineType::AnyTagged(), // kFeedbackElement
- MachineType::Pointer()) // kFirstArgument
+ MachineType::AnyTagged(), // kNewTarget
+ MachineType::AnyTagged()) // kFeedbackElement
DECLARE_DESCRIPTOR(InterpreterPushArgsThenConstructDescriptor,
CallInterfaceDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ static const bool kPassLastArgsOnStack = true;
+#else
+ static const bool kPassLastArgsOnStack = false;
+#endif
+
+ // Pass constructor, new target and feedback element through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
};
class InterpreterCEntry1Descriptor : public CallInterfaceDescriptor {
@@ -1044,6 +1086,14 @@ class WasmGrowMemoryDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmGrowMemoryDescriptor, CallInterfaceDescriptor)
};
+class WasmThrowDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kException)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result 1
+ MachineType::AnyTagged()) // kException
+ DECLARE_DESCRIPTOR(WasmThrowDescriptor, CallInterfaceDescriptor)
+};
+
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 33731599c8..a7c95aae7b 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -23,7 +23,7 @@ class RegisterTransferWriter final
public NON_EXPORTED_BASE(ZoneObject) {
public:
RegisterTransferWriter(BytecodeArrayBuilder* builder) : builder_(builder) {}
- ~RegisterTransferWriter() override {}
+ ~RegisterTransferWriter() override = default;
void EmitLdar(Register input) override { builder_->OutputLdarRaw(input); }
@@ -797,6 +797,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedPropertyNoFeedback(
+ Register object, const AstRawString* name) {
+ size_t name_index = GetConstantPoolEntry(name);
+ OutputLdaNamedPropertyNoFeedback(object, name_index);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot) {
OutputLdaKeyedProperty(object, feedback_slot);
@@ -847,6 +854,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedPropertyNoFeedback(
+ Register object, const AstRawString* name, LanguageMode language_mode) {
+ size_t name_index = GetConstantPoolEntry(name);
+ OutputStaNamedPropertyNoFeedback(object, name_index,
+ static_cast<uint8_t>(language_mode));
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
Register object, const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
@@ -973,6 +988,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayFromIterable() {
+ OutputCreateArrayFromIterable();
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
size_t constant_properties_entry, int literal_index, int flags,
Register output) {
@@ -1375,6 +1395,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallAnyReceiver(Register callable,
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallNoFeedback(Register callable,
+ RegisterList args) {
+ OutputCallNoFeedback(callable, args, args.register_count());
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CallWithSpread(Register callable,
RegisterList args,
int feedback_slot) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 3feda90495..bf5909d8e4 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -120,6 +120,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& LoadNamedProperty(Register object,
const AstRawString* name,
int feedback_slot);
+ // Named load property without feedback
+ BytecodeArrayBuilder& LoadNamedPropertyNoFeedback(Register object,
+ const AstRawString* name);
+
// Keyed load property. The key should be in the accumulator.
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
// Named load property of the @@iterator symbol.
@@ -145,6 +149,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
const AstRawString* name,
int feedback_slot,
LanguageMode language_mode);
+
+ // Store a property named by a property name without feedback slot. The value
+ // to be stored should be in the accumulator.
+ BytecodeArrayBuilder& StoreNamedPropertyNoFeedback(
+ Register object, const AstRawString* name, LanguageMode language_mode);
+
// Store a property named by a constant from the constant pool. The value to
// be stored should be in the accumulator.
BytecodeArrayBuilder& StoreNamedProperty(Register object,
@@ -234,6 +244,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& CreateArrayLiteral(size_t constant_elements_entry,
int literal_index, int flags);
BytecodeArrayBuilder& CreateEmptyArrayLiteral(int literal_index);
+ BytecodeArrayBuilder& CreateArrayFromIterable();
BytecodeArrayBuilder& CreateObjectLiteral(size_t constant_properties_entry,
int literal_index, int flags,
Register output);
@@ -276,6 +287,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& CallAnyReceiver(Register callable, RegisterList args,
int feedback_slot);
+ // Call a JS function with an any receiver, possibly (but not necessarily)
+ // undefined. The JSFunction or Callable to be called should be in |callable|.
+ // The arguments should be in |args|, with the receiver in |args[0]|.
+ BytecodeArrayBuilder& CallNoFeedback(Register callable, RegisterList args);
+
// Tail call into a JS function. The JSFunction or Callable to be called
// should be in |callable|. The arguments should be in |args|, with the
// receiver in |args[0]|. Type feedback is recorded in the |feedback_slot| in
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index e684eb410b..fa0ff9e4a8 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -71,18 +71,8 @@ uint32_t BytecodeDecoder::DecodeUnsignedOperand(Address operand_start,
namespace {
-const char* NameForRuntimeId(uint32_t idx) {
- switch (idx) {
-#define CASE(name, nargs, ressize) \
- case Runtime::k##name: \
- return #name; \
- case Runtime::kInline##name: \
- return "_" #name;
- FOR_EACH_INTRINSIC(CASE)
-#undef CASE
- default:
- UNREACHABLE();
- }
+const char* NameForRuntimeId(Runtime::FunctionId idx) {
+ return Runtime::FunctionForId(idx)->name;
}
const char* NameForNativeContextIndex(uint32_t idx) {
@@ -160,8 +150,9 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
break;
}
case interpreter::OperandType::kRuntimeId:
- os << "[" << NameForRuntimeId(DecodeUnsignedOperand(
- operand_start, op_type, operand_scale))
+ os << "["
+ << NameForRuntimeId(static_cast<Runtime::FunctionId>(
+ DecodeUnsignedOperand(operand_start, op_type, operand_scale)))
<< "]";
break;
case interpreter::OperandType::kImm:
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 11a19443e1..b00d3773cd 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -29,7 +29,7 @@ namespace interpreter {
// Scoped class tracking context objects created by the visitor. Represents
// mutations of the context chain within the function body, allowing pushing and
// popping of the current {context_register} during visitation.
-class BytecodeGenerator::ContextScope BASE_EMBEDDED {
+class BytecodeGenerator::ContextScope {
public:
ContextScope(BytecodeGenerator* generator, Scope* scope)
: generator_(generator),
@@ -94,7 +94,7 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
// Scoped class for tracking control statements entered by the
// visitor. The pattern derives AstGraphBuilder::ControlScope.
-class BytecodeGenerator::ControlScope BASE_EMBEDDED {
+class BytecodeGenerator::ControlScope {
public:
explicit ControlScope(BytecodeGenerator* generator)
: generator_(generator), outer_(generator->execution_control()),
@@ -402,7 +402,7 @@ class BytecodeGenerator::ControlScopeForIteration final
loop_builder_(loop_builder) {
generator->loop_depth_++;
}
- ~ControlScopeForIteration() { generator()->loop_depth_--; }
+ ~ControlScopeForIteration() override { generator()->loop_depth_--; }
protected:
bool Execute(Command command, Statement* statement,
@@ -908,7 +908,7 @@ BytecodeGenerator::BytecodeGenerator(
execution_context_(nullptr),
execution_result_(nullptr),
incoming_new_target_or_generator_(),
- dummy_feedback_slot_(),
+ dummy_feedback_slot_(feedback_spec(), FeedbackSlotKind::kCompareOp),
generator_jump_table_(nullptr),
suspend_count_(0),
loop_depth_(0),
@@ -1820,7 +1820,11 @@ bool BytecodeGenerator::ShouldOptimizeAsOneShot() const {
if (loop_depth_ > 0) return false;
- return info()->literal()->is_top_level() || info()->literal()->is_iife();
+ // A non-top-level iife is likely to be executed multiple times and so
+ // shouldn`t be optimized as one-shot.
+ bool is_toplevel_iife = info()->literal()->is_iife() &&
+ current_scope()->outer_scope()->is_script_scope();
+ return info()->literal()->is_toplevel() || is_toplevel_iife;
}
void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
@@ -1859,6 +1863,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
DCHECK_NE(property->kind(), ClassLiteral::Property::PRIVATE_FIELD);
Register key = register_allocator()->GrowRegisterList(&args);
+ builder()->SetExpressionAsStatementPosition(property->key());
BuildLoadPropertyKey(property, key);
if (property->is_static()) {
// The static prototype property is read only. We handle the non
@@ -1968,13 +1973,13 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
}
void BytecodeGenerator::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* expr) {
+ InitializeClassFieldsStatement* stmt) {
RegisterList args = register_allocator()->NewRegisterList(3);
Register constructor = args[0], key = args[1], value = args[2];
builder()->MoveRegister(builder()->Receiver(), constructor);
- for (int i = 0; i < expr->fields()->length(); i++) {
- ClassLiteral::Property* property = expr->fields()->at(i);
+ for (int i = 0; i < stmt->fields()->length(); i++) {
+ ClassLiteral::Property* property = stmt->fields()->at(i);
if (property->is_computed_name()) {
DCHECK_EQ(property->kind(), ClassLiteral::Property::PUBLIC_FIELD);
@@ -1993,6 +1998,7 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
BuildLoadPropertyKey(property, key);
}
+ builder()->SetExpressionAsStatementPosition(property->value());
VisitForRegisterValue(property->value(), value);
VisitSetHomeObject(value, constructor, property);
@@ -2231,7 +2237,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()
->LoadLiteral(Smi::FromEnum(LanguageMode::kSloppy))
.StoreAccumulatorInRegister(args[3])
- .CallRuntime(Runtime::kSetProperty, args);
+ .CallRuntime(Runtime::kSetKeyedProperty, args);
Register value = args[2];
VisitSetHomeObject(value, literal, property);
}
@@ -2364,116 +2370,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->LoadAccumulatorWithRegister(literal);
}
-void BytecodeGenerator::BuildArrayLiteralElementsInsertion(
- Register array, int first_spread_index, ZonePtrList<Expression>* elements,
- bool skip_constants) {
- DCHECK_LT(first_spread_index, elements->length());
-
- Register index = register_allocator()->NewRegister();
- int array_index = 0;
-
- ZonePtrList<Expression>::iterator iter = elements->begin();
- ZonePtrList<Expression>::iterator first_spread_or_end =
- first_spread_index >= 0 ? elements->begin() + first_spread_index
- : elements->end();
-
- // Evaluate subexpressions and store them into the array.
- FeedbackSlot keyed_store_slot;
- for (; iter != first_spread_or_end; ++iter, array_index++) {
- Expression* subexpr = *iter;
- DCHECK(!subexpr->IsSpread());
- if (skip_constants && subexpr->IsCompileTimeValue()) continue;
- if (keyed_store_slot.IsInvalid()) {
- keyed_store_slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
- }
- builder()
- ->LoadLiteral(Smi::FromInt(array_index))
- .StoreAccumulatorInRegister(index);
- VisitForAccumulatorValue(subexpr);
- builder()->StoreKeyedProperty(
- array, index, feedback_index(keyed_store_slot), language_mode());
- }
- if (iter != elements->end()) {
- builder()->LoadLiteral(array_index).StoreAccumulatorInRegister(index);
-
- // Handle the first spread element and everything that follows.
- FeedbackSlot element_slot = feedback_spec()->AddStoreInArrayLiteralICSlot();
- FeedbackSlot index_slot = feedback_spec()->AddBinaryOpICSlot();
- // TODO(neis): Only create length_slot when there are holes.
- FeedbackSlot length_slot =
- feedback_spec()->AddStoreICSlot(LanguageMode::kStrict);
- for (; iter != elements->end(); ++iter) {
- Expression* subexpr = *iter;
- if (subexpr->IsSpread()) {
- BuildArrayLiteralSpread(subexpr->AsSpread(), array, index, index_slot,
- element_slot);
- } else if (!subexpr->IsTheHoleLiteral()) {
- // literal[index++] = subexpr
- VisitForAccumulatorValue(subexpr);
- builder()
- ->StoreInArrayLiteral(array, index, feedback_index(element_slot))
- .LoadAccumulatorWithRegister(index)
- .UnaryOperation(Token::INC, feedback_index(index_slot))
- .StoreAccumulatorInRegister(index);
- } else {
- // literal.length = ++index
- auto length = ast_string_constants()->length_string();
- builder()
- ->LoadAccumulatorWithRegister(index)
- .UnaryOperation(Token::INC, feedback_index(index_slot))
- .StoreAccumulatorInRegister(index)
- .StoreNamedProperty(array, length, feedback_index(length_slot),
- LanguageMode::kStrict);
- }
- }
- }
- builder()->LoadAccumulatorWithRegister(array);
-}
-
-void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- expr->InitDepthAndFlags();
- uint8_t flags = CreateArrayLiteralFlags::Encode(
- expr->IsFastCloningSupported(), expr->ComputeFlags());
-
- bool is_empty = expr->is_empty();
- bool optimize_as_one_shot = ShouldOptimizeAsOneShot();
- size_t entry;
- if (is_empty && optimize_as_one_shot) {
- entry = builder()->EmptyArrayBoilerplateDescriptionConstantPoolEntry();
- } else if (!is_empty) {
- entry = builder()->AllocateDeferredConstantPoolEntry();
- array_literals_.push_back(std::make_pair(expr, entry));
- }
-
- if (optimize_as_one_shot) {
- // Create array literal without any allocation sites
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->LoadConstantPoolEntry(entry)
- .StoreAccumulatorInRegister(args[0])
- .LoadLiteral(Smi::FromInt(flags))
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kCreateArrayLiteralWithoutAllocationSite, args);
- } else if (is_empty) {
- // Empty array literal fast-path.
- int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- DCHECK(expr->IsFastCloningSupported());
- builder()->CreateEmptyArrayLiteral(literal_index);
- return;
- } else {
- // Deep-copy the literal boilerplate
- int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- builder()->CreateArrayLiteral(entry, literal_index, flags);
- }
-
- Register literal = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(literal);
- // Insert all elements except the constant ones, since they are already there.
- BuildArrayLiteralElementsInsertion(literal, expr->first_spread_index(),
- expr->values(), true);
-}
-
void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
Register index,
FeedbackSlot index_slot,
@@ -2513,6 +2409,154 @@ void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
loop_builder.JumpToHeader(loop_depth_);
}
+void BytecodeGenerator::BuildCreateArrayLiteral(
+ ZonePtrList<Expression>* elements, ArrayLiteral* expr) {
+ RegisterAllocationScope register_scope(this);
+ Register index = register_allocator()->NewRegister();
+ Register array = register_allocator()->NewRegister();
+ SharedFeedbackSlot element_slot(feedback_spec(),
+ FeedbackSlotKind::kStoreInArrayLiteral);
+ ZonePtrList<Expression>::iterator current = elements->begin();
+ ZonePtrList<Expression>::iterator end = elements->end();
+ bool is_empty = elements->is_empty();
+
+ if (!is_empty && (*current)->IsSpread()) {
+ // If we have a leading spread, use CreateArrayFromIterable to create
+ // an array from it and then add the remaining components to that array.
+ VisitForAccumulatorValue(*current);
+ builder()->CreateArrayFromIterable().StoreAccumulatorInRegister(array);
+
+ if (++current != end) {
+ // If there are remaning elements, prepare the index register that is
+ // used for adding those elements. The next index is the length of the
+ // newly created array.
+ auto length = ast_string_constants()->length_string();
+ int length_load_slot = feedback_index(feedback_spec()->AddLoadICSlot());
+ builder()
+ ->LoadNamedProperty(array, length, length_load_slot)
+ .StoreAccumulatorInRegister(index);
+ }
+ } else if (expr != nullptr) {
+ // There are some elements before the first (if any) spread, and we can
+ // use a boilerplate when creating the initial array from those elements.
+
+ // First, allocate a constant pool entry for the boilerplate that will
+ // be created during finalization, and will contain all the constant
+ // elements before the first spread. This also handle the empty array case
+ // and one-shot optimization.
+ uint8_t flags = CreateArrayLiteralFlags::Encode(
+ expr->IsFastCloningSupported(), expr->ComputeFlags());
+ bool optimize_as_one_shot = ShouldOptimizeAsOneShot();
+ size_t entry;
+ if (is_empty && optimize_as_one_shot) {
+ entry = builder()->EmptyArrayBoilerplateDescriptionConstantPoolEntry();
+ } else if (!is_empty) {
+ entry = builder()->AllocateDeferredConstantPoolEntry();
+ array_literals_.push_back(std::make_pair(expr, entry));
+ }
+
+ if (optimize_as_one_shot) {
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadConstantPoolEntry(entry)
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(Smi::FromInt(flags))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kCreateArrayLiteralWithoutAllocationSite, args);
+ } else if (is_empty) {
+ // Empty array literal fast-path.
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
+ DCHECK(expr->IsFastCloningSupported());
+ builder()->CreateEmptyArrayLiteral(literal_index);
+ } else {
+ // Create array literal from boilerplate.
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
+ builder()->CreateArrayLiteral(entry, literal_index, flags);
+ }
+ builder()->StoreAccumulatorInRegister(array);
+
+ // Insert the missing non-constant elements, up until the first spread
+ // index, into the initial array (the remaining elements will be inserted
+ // below).
+ DCHECK_EQ(current, elements->begin());
+ ZonePtrList<Expression>::iterator first_spread_or_end =
+ expr->first_spread_index() >= 0 ? current + expr->first_spread_index()
+ : end;
+ int array_index = 0;
+ for (; current != first_spread_or_end; ++current, array_index++) {
+ Expression* subexpr = *current;
+ DCHECK(!subexpr->IsSpread());
+ // Skip the constants.
+ if (subexpr->IsCompileTimeValue()) continue;
+
+ builder()
+ ->LoadLiteral(Smi::FromInt(array_index))
+ .StoreAccumulatorInRegister(index);
+ VisitForAccumulatorValue(subexpr);
+ builder()->StoreInArrayLiteral(array, index,
+ feedback_index(element_slot.Get()));
+ }
+
+ if (current != end) {
+ // If there are remaining elements, prepare the index register
+ // to store the next element, which comes from the first spread.
+ builder()->LoadLiteral(array_index).StoreAccumulatorInRegister(index);
+ }
+ } else {
+ // In other cases, we prepare an empty array to be filled in below.
+ DCHECK(!elements->is_empty());
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
+ builder()
+ ->CreateEmptyArrayLiteral(literal_index)
+ .StoreAccumulatorInRegister(array);
+ // Prepare the index for the first element.
+ builder()->LoadLiteral(Smi::FromInt(0)).StoreAccumulatorInRegister(index);
+ }
+
+ // Now build insertions for the remaining elements from current to end.
+ SharedFeedbackSlot index_slot(feedback_spec(), FeedbackSlotKind::kBinaryOp);
+ SharedFeedbackSlot length_slot(
+ feedback_spec(), feedback_spec()->GetStoreICSlot(LanguageMode::kStrict));
+ for (; current != end; ++current) {
+ Expression* subexpr = *current;
+ if (subexpr->IsSpread()) {
+ FeedbackSlot real_index_slot = index_slot.Get();
+ BuildArrayLiteralSpread(subexpr->AsSpread(), array, index,
+ real_index_slot, element_slot.Get());
+ } else if (!subexpr->IsTheHoleLiteral()) {
+ // literal[index++] = subexpr
+ VisitForAccumulatorValue(subexpr);
+ builder()
+ ->StoreInArrayLiteral(array, index,
+ feedback_index(element_slot.Get()))
+ .LoadAccumulatorWithRegister(index);
+ // Only increase the index if we are not the last element.
+ if (current + 1 != end) {
+ builder()
+ ->UnaryOperation(Token::INC, feedback_index(index_slot.Get()))
+ .StoreAccumulatorInRegister(index);
+ }
+ } else {
+ // literal.length = ++index
+ // length_slot is only used when there are holes.
+ auto length = ast_string_constants()->length_string();
+ builder()
+ ->LoadAccumulatorWithRegister(index)
+ .UnaryOperation(Token::INC, feedback_index(index_slot.Get()))
+ .StoreAccumulatorInRegister(index)
+ .StoreNamedProperty(array, length, feedback_index(length_slot.Get()),
+ LanguageMode::kStrict);
+ }
+ }
+
+ builder()->LoadAccumulatorWithRegister(array);
+}
+
+void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ expr->InitDepthAndFlags();
+ BuildCreateArrayLiteral(expr->values(), expr);
+}
+
void BytecodeGenerator::VisitStoreInArrayLiteral(StoreInArrayLiteral* expr) {
builder()->SetExpressionAsStatementPosition(expr);
RegisterAllocationScope register_scope(this);
@@ -2824,13 +2868,7 @@ void BytecodeGenerator::BuildLoadNamedProperty(Property* property,
Register object,
const AstRawString* name) {
if (ShouldOptimizeAsOneShot()) {
- RegisterList args = register_allocator()->NewRegisterList(2);
- size_t name_index = builder()->GetConstantPoolEntry(name);
- builder()
- ->MoveRegister(object, args[0])
- .LoadConstantPoolEntry(name_index)
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kInlineGetProperty, args);
+ builder()->LoadNamedPropertyNoFeedback(object, name);
} else {
FeedbackSlot slot = GetCachedLoadICSlot(property->obj(), name);
builder()->LoadNamedProperty(object, name, feedback_index(slot));
@@ -2847,16 +2885,7 @@ void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
}
if (ShouldOptimizeAsOneShot()) {
- RegisterList args = register_allocator()->NewRegisterList(4);
- size_t name_index = builder()->GetConstantPoolEntry(name);
- builder()
- ->MoveRegister(object, args[0])
- .StoreAccumulatorInRegister(args[2])
- .LoadConstantPoolEntry(name_index)
- .StoreAccumulatorInRegister(args[1])
- .LoadLiteral(Smi::FromEnum(language_mode()))
- .StoreAccumulatorInRegister(args[3])
- .CallRuntime(Runtime::kSetProperty, args);
+ builder()->StoreNamedPropertyNoFeedback(object, name, language_mode());
} else {
FeedbackSlot slot = GetCachedStoreICSlot(property->obj(), name);
builder()->StoreNamedProperty(object, name, feedback_index(slot),
@@ -3555,6 +3584,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// When a call contains a spread, a Call AST node is only created if there is
// exactly one spread, and it is the last argument.
bool is_spread_call = expr->only_last_arg_is_spread();
+ bool optimize_as_one_shot = ShouldOptimizeAsOneShot();
// TODO(petermarshall): We have a lot of call bytecodes that are very similar,
// see if we can reduce the number by adding a separate argument which
@@ -3579,7 +3609,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
- if (!is_spread_call) {
+ if (!is_spread_call && !optimize_as_one_shot) {
implicit_undefined_receiver = true;
} else {
// TODO(leszeks): There's no special bytecode for tail calls or spread
@@ -3615,7 +3645,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::OTHER_CALL: {
// Receiver is undefined for other calls.
- if (!is_spread_call) {
+ if (!is_spread_call && !optimize_as_one_shot) {
implicit_undefined_receiver = true;
} else {
// TODO(leszeks): There's no special bytecode for tail calls or spread
@@ -3679,20 +3709,25 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->SetExpressionPosition(expr);
- int feedback_slot_index = feedback_index(feedback_spec()->AddCallICSlot());
-
if (is_spread_call) {
DCHECK(!implicit_undefined_receiver);
- builder()->CallWithSpread(callee, args, feedback_slot_index);
+ builder()->CallWithSpread(callee, args,
+ feedback_index(feedback_spec()->AddCallICSlot()));
+ } else if (optimize_as_one_shot) {
+ DCHECK(!implicit_undefined_receiver);
+ builder()->CallNoFeedback(callee, args);
} else if (call_type == Call::NAMED_PROPERTY_CALL ||
call_type == Call::KEYED_PROPERTY_CALL ||
call_type == Call::RESOLVED_PROPERTY_CALL) {
DCHECK(!implicit_undefined_receiver);
- builder()->CallProperty(callee, args, feedback_slot_index);
+ builder()->CallProperty(callee, args,
+ feedback_index(feedback_spec()->AddCallICSlot()));
} else if (implicit_undefined_receiver) {
- builder()->CallUndefinedReceiver(callee, args, feedback_slot_index);
+ builder()->CallUndefinedReceiver(
+ callee, args, feedback_index(feedback_spec()->AddCallICSlot()));
} else {
- builder()->CallAnyReceiver(callee, args, feedback_slot_index);
+ builder()->CallAnyReceiver(
+ callee, args, feedback_index(feedback_spec()->AddCallICSlot()));
}
}
@@ -3722,17 +3757,12 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// mechanism for spreads in array literals.
// First generate the array containing all arguments.
- Register array = register_allocator()->NewRegister();
- int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- builder()
- ->CreateEmptyArrayLiteral(literal_index)
- .StoreAccumulatorInRegister(array);
- BuildArrayLiteralElementsInsertion(array, first_spread_index, args, false);
+ BuildCreateArrayLiteral(args, nullptr);
// Now pass that array to %reflect_construct.
RegisterList construct_args = register_allocator()->NewRegisterList(3);
+ builder()->StoreAccumulatorInRegister(construct_args[1]);
builder()->MoveRegister(constructor, construct_args[0]);
- builder()->MoveRegister(array, construct_args[1]);
VisitForRegisterValue(super->new_target_var(), construct_args[2]);
builder()->CallJSRuntime(Context::REFLECT_CONSTRUCT_INDEX, construct_args);
} else {
@@ -5175,11 +5205,7 @@ FeedbackSlot BytecodeGenerator::GetCachedCreateClosureSlot(
}
FeedbackSlot BytecodeGenerator::GetDummyCompareICSlot() {
- if (!dummy_feedback_slot_.IsInvalid()) {
- return dummy_feedback_slot_;
- }
- dummy_feedback_slot_ = feedback_spec()->AddCompareICSlot();
- return dummy_feedback_slot_;
+ return dummy_feedback_slot_.Get();
}
Runtime::FunctionId BytecodeGenerator::StoreToSuperRuntimeId() {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 47f1f83e12..3150245b0b 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -6,6 +6,7 @@
#define V8_INTERPRETER_BYTECODE_GENERATOR_H_
#include "src/ast/ast.h"
+#include "src/feedback-vector.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register.h"
@@ -182,11 +183,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildArrayLiteralSpread(Spread* spread, Register array, Register index,
FeedbackSlot index_slot,
FeedbackSlot element_slot);
- void BuildArrayLiteralElementsInsertion(Register array,
- int first_spread_index,
- ZonePtrList<Expression>* elements,
- bool skip_constants);
-
+ // Create Array literals. |expr| can be nullptr, but if provided,
+ // a boilerplate will be used to create an initial array for elements
+ // before the first spread.
+ void BuildCreateArrayLiteral(ZonePtrList<Expression>* elements,
+ ArrayLiteral* expr);
void BuildCreateObjectLiteral(Register literal, uint8_t flags, size_t entry);
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
@@ -373,7 +374,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Dummy feedback slot for compare operations, where we don't care about
// feedback
- FeedbackSlot dummy_feedback_slot_;
+ SharedFeedbackSlot dummy_feedback_slot_;
BytecodeJumpTable* generator_jump_table_;
int suspend_count_;
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index 04d1e35821..a2730cb64c 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -145,6 +145,19 @@ class BytecodeOperands : public AllStatic {
0 OPERAND_SCALE_LIST(OPERAND_SCALE_COUNT);
#undef OPERAND_SCALE_COUNT
+ static int OperandScaleAsIndex(OperandScale operand_scale) {
+ switch (operand_scale) {
+ case OperandScale::kSingle:
+ return 0;
+ case OperandScale::kDouble:
+ return 1;
+ case OperandScale::kQuadruple:
+ return 2;
+ default:
+ UNREACHABLE();
+ }
+ }
+
// Returns true if |accumulator_use| reads the accumulator.
static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
return accumulator_use == AccumulatorUse::kRead ||
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index 8509bd43e0..b270e3d38b 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -19,7 +19,7 @@ class BytecodeRegisterAllocator final {
// Enables observation of register allocation and free events.
class Observer {
public:
- virtual ~Observer() {}
+ virtual ~Observer() = default;
virtual void RegisterAllocateEvent(Register reg) = 0;
virtual void RegisterListAllocateEvent(RegisterList reg_list) = 0;
virtual void RegisterListFreeEvent(RegisterList reg_list) = 0;
@@ -29,7 +29,7 @@ class BytecodeRegisterAllocator final {
: next_register_index_(start_index),
max_register_count_(start_index),
observer_(nullptr) {}
- ~BytecodeRegisterAllocator() {}
+ ~BytecodeRegisterAllocator() = default;
// Returns a new register.
Register NewRegister() {
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 11794274b9..7ba7d3b602 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -23,8 +23,8 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
public:
class BytecodeWriter {
public:
- BytecodeWriter() {}
- virtual ~BytecodeWriter() {}
+ BytecodeWriter() = default;
+ virtual ~BytecodeWriter() = default;
// Called to emit a register transfer bytecode.
virtual void EmitLdar(Register input) = 0;
@@ -39,7 +39,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
BytecodeRegisterAllocator* register_allocator,
int fixed_registers_count, int parameter_count,
BytecodeWriter* bytecode_writer);
- virtual ~BytecodeRegisterOptimizer() {}
+ ~BytecodeRegisterOptimizer() override = default;
// Perform explicit register transfer operations.
void DoLdar(Register input) {
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 88cdae6ce5..60f30ee1d9 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -107,14 +107,13 @@ const char* Bytecodes::ToString(Bytecode bytecode) {
}
// static
-std::string Bytecodes::ToString(Bytecode bytecode, OperandScale operand_scale) {
- static const char kSeparator = '.';
-
+std::string Bytecodes::ToString(Bytecode bytecode, OperandScale operand_scale,
+ const char* separator) {
std::string value(ToString(bytecode));
if (operand_scale > OperandScale::kSingle) {
Bytecode prefix_bytecode = OperandScaleToPrefixBytecode(operand_scale);
std::string suffix = ToString(prefix_bytecode);
- return value.append(1, kSeparator).append(suffix);
+ return value.append(separator).append(suffix);
} else {
return value;
}
@@ -284,6 +283,7 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
case Bytecode::kDec:
case Bytecode::kTypeOf:
case Bytecode::kCallAnyReceiver:
+ case Bytecode::kCallNoFeedback:
case Bytecode::kCallProperty:
case Bytecode::kCallProperty0:
case Bytecode::kCallProperty1:
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 0e543877f7..39f61eb9bd 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -98,6 +98,8 @@ namespace interpreter {
/* Property loads (LoadIC) operations */ \
V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
+ V(LdaNamedPropertyNoFeedback, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kIdx) \
V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
\
@@ -110,6 +112,8 @@ namespace interpreter {
/* Propery stores (StoreIC) operations */ \
V(StaNamedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
+ V(StaNamedPropertyNoFeedback, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kFlag8) \
V(StaNamedOwnProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
V(StaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
@@ -194,6 +198,8 @@ namespace interpreter {
OperandType::kReg, OperandType::kIdx) \
V(CallUndefinedReceiver2, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
+ V(CallNoFeedback, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kRegList, OperandType::kRegCount) \
V(CallWithSpread, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
@@ -247,6 +253,7 @@ namespace interpreter {
OperandType::kIdx, OperandType::kFlag8) \
V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateArrayFromIterable, AccumulatorUse::kReadWrite) \
V(CreateEmptyArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx) \
V(CreateObjectLiteral, AccumulatorUse::kNone, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8, OperandType::kRegOut) \
@@ -468,8 +475,10 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Returns string representation of |bytecode|.
static const char* ToString(Bytecode bytecode);
- // Returns string representation of |bytecode|.
- static std::string ToString(Bytecode bytecode, OperandScale operand_scale);
+ // Returns string representation of |bytecode| combined with |operand_scale|
+ // using the optionally provided |separator|.
+ static std::string ToString(Bytecode bytecode, OperandScale operand_scale,
+ const char* separator = ".");
// Returns byte value of bytecode.
static uint8_t ToByte(Bytecode bytecode) {
@@ -664,6 +673,7 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kCallUndefinedReceiver0 ||
bytecode == Bytecode::kCallUndefinedReceiver1 ||
bytecode == Bytecode::kCallUndefinedReceiver2 ||
+ bytecode == Bytecode::kCallNoFeedback ||
bytecode == Bytecode::kConstruct ||
bytecode == Bytecode::kCallWithSpread ||
bytecode == Bytecode::kConstructWithSpread ||
@@ -684,12 +694,6 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kDebugBreakWide;
}
- // Returns true if the bytecode can be lazily deserialized.
- static constexpr bool IsLazy(Bytecode bytecode) {
- // Currently, all handlers are deserialized lazily.
- return true;
- }
-
// Returns true if the bytecode returns.
static constexpr bool Returns(Bytecode bytecode) {
#define OR_BYTECODE(NAME) || bytecode == Bytecode::k##NAME
@@ -801,6 +805,7 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
case Bytecode::kCallJSRuntime:
return ConvertReceiverMode::kNullOrUndefined;
case Bytecode::kCallAnyReceiver:
+ case Bytecode::kCallNoFeedback:
case Bytecode::kConstruct:
case Bytecode::kCallWithSpread:
case Bytecode::kConstructWithSpread:
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 3f3d38ce6e..f06983abfa 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -36,7 +36,7 @@ namespace interpreter {
// interpreter. Each instance of this class is intended to be used to
// generate exactly one FixedArray of constants via the ToFixedArray
// method.
-class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
public:
// Capacity of the 8-bit operand slice.
static const size_t k8BitCapacity = 1u << kBitsPerByte;
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 405e81bc76..fdc57776a8 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -16,11 +16,11 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class V8_EXPORT_PRIVATE ControlFlowBuilder BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ControlFlowBuilder {
public:
explicit ControlFlowBuilder(BytecodeArrayBuilder* builder)
: builder_(builder) {}
- virtual ~ControlFlowBuilder() {}
+ virtual ~ControlFlowBuilder() = default;
protected:
BytecodeArrayBuilder* builder() const { return builder_; }
@@ -41,7 +41,7 @@ class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
break_labels_(builder->zone()),
node_(node),
block_coverage_builder_(block_coverage_builder) {}
- virtual ~BreakableControlFlowBuilder();
+ ~BreakableControlFlowBuilder() override;
// This method is called when visiting break statements in the AST.
// Inserts a jump to an unbound label that is patched when the corresponding
@@ -113,7 +113,7 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
node, SourceRangeKind::kBody);
}
}
- ~LoopBuilder();
+ ~LoopBuilder() override;
void LoopHeader();
void LoopBody();
@@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE SwitchBuilder final
case_sites_(builder->zone()) {
case_sites_.resize(number_of_cases);
}
- ~SwitchBuilder();
+ ~SwitchBuilder() override; // NOLINT (modernize-use-equals-default)
// This method should be called by the SwitchBuilder owner when the case
// statement with |index| is emitted to update the case jump site.
@@ -186,7 +186,7 @@ class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
block_coverage_builder_(block_coverage_builder),
statement_(statement) {}
- ~TryCatchBuilder();
+ ~TryCatchBuilder() override;
void BeginTry(Register context);
void EndTry();
@@ -217,7 +217,7 @@ class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
block_coverage_builder_(block_coverage_builder),
statement_(statement) {}
- ~TryFinallyBuilder();
+ ~TryFinallyBuilder() override;
void BeginTry(Register context);
void LeaveTry();
@@ -260,7 +260,7 @@ class V8_EXPORT_PRIVATE ConditionalControlFlowBuilder final
node, SourceRangeKind::kElse);
}
}
- ~ConditionalControlFlowBuilder();
+ ~ConditionalControlFlowBuilder() override;
BytecodeLabels* then_labels() { return &then_labels_; }
BytecodeLabels* else_labels() { return &else_labels_; }
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 021fefad29..029c8dd1a6 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -21,7 +21,7 @@ class Isolate;
namespace interpreter {
// A helper class for constructing exception handler tables for the interpreter.
-class V8_EXPORT_PRIVATE HandlerTableBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE HandlerTableBuilder final {
public:
explicit HandlerTableBuilder(Zone* zone);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 15e2b1f091..cc8dfb1a30 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -744,12 +744,12 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
feedback,
HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
GotoIf(is_uninitialized, &initialize);
- CSA_ASSERT(this, IsWeakOrClearedHeapObject(feedback));
+ CSA_ASSERT(this, IsWeakOrCleared(feedback));
// If the weak reference is cleared, we have a new chance to become
// monomorphic.
Comment("check if weak reference is cleared");
- Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
+ Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
BIND(&initialize);
{
@@ -803,7 +803,7 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -948,12 +948,12 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
GotoIf(is_megamorphic, &construct);
Comment("check if weak reference");
- GotoIfNot(IsWeakOrClearedHeapObject(feedback), &check_allocation_site);
+ GotoIfNot(IsWeakOrCleared(feedback), &check_allocation_site);
// If the weak reference is cleared, we have a new chance to become
// monomorphic.
Comment("check if weak reference is cleared");
- Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
+ Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
BIND(&check_allocation_site);
{
@@ -976,7 +976,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
// Check if it is uninitialized.
Comment("check if uninitialized");
Node* is_uninitialized =
- WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+ WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
Branch(is_uninitialized, &initialize, &mark_megamorphic);
}
@@ -1054,7 +1054,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -1074,8 +1074,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
isolate(), InterpreterPushArgsMode::kArrayFunction);
Node* code_target = HeapConstant(callable.code());
var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), new_target, target,
- var_site.value(), args.base_reg_location()));
+ args.reg_count(), args.base_reg_location(), target,
+ new_target, var_site.value()));
Goto(&return_result);
}
@@ -1087,8 +1087,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
isolate(), InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), new_target, target,
- UndefinedConstant(), args.base_reg_location()));
+ args.reg_count(), args.base_reg_location(), target,
+ new_target, UndefinedConstant()));
Goto(&return_result);
}
@@ -1127,19 +1127,19 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
GotoIf(is_megamorphic, &construct);
Comment("check if weak reference");
- GotoIfNot(IsWeakOrClearedHeapObject(feedback), &check_initialized);
+ GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
// If the weak reference is cleared, we have a new chance to become
// monomorphic.
Comment("check if weak reference is cleared");
- Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
+ Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
BIND(&check_initialized);
{
// Check if it is uninitialized.
Comment("check if uninitialized");
Node* is_uninitialized =
- WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+ WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
Branch(is_uninitialized, &initialize, &mark_megamorphic);
}
@@ -1195,7 +1195,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -1212,8 +1212,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
- new_target, target, UndefinedConstant(),
- args.base_reg_location());
+ args.base_reg_location(), target, new_target,
+ UndefinedConstant());
}
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
@@ -1705,7 +1705,7 @@ Node* InterpreterAssembler::ImportRegisterFile(
StoreRegister(value, reg_index);
StoreFixedArrayElement(array, array_index,
- LoadRoot(Heap::kStaleRegisterRootIndex));
+ LoadRoot(RootIndex::kStaleRegister));
var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index afca2a8a32..d2dab6d8d8 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -9,6 +9,7 @@
#include "src/builtins/builtins-arguments-gen.h"
#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/code-events.h"
#include "src/code-factory.h"
#include "src/debug/debug.h"
@@ -517,6 +518,18 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
}
}
+// LdaPropertyNofeedback <object> <slot>
+//
+// Calls the GetProperty builtin for <object> and the key in the accumulator.
+IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ Node* context = GetContext();
+ Node* result = CallBuiltin(Builtins::kGetProperty, context, object, name);
+ SetAccumulator(result);
+ Dispatch();
+}
+
// KeyedLoadIC <object> <slot>
//
// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
@@ -582,6 +595,24 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
StaNamedProperty(ic);
}
+// StaNamedPropertyNoFeedback <object> <name_index>
+//
+// Calls the SetPropertyBuiltin for <object> and the name in constant pool entry
+// <name_index> with the value in the accumulator.
+IGNITION_HANDLER(StaNamedPropertyNoFeedback,
+ InterpreterStoreNamedPropertyAssembler) {
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ Node* value = GetAccumulator();
+ Node* language_mode = SmiFromInt32(BytecodeOperandFlag(2));
+ Node* context = GetContext();
+
+ Node* result = CallRuntime(Runtime::kSetNamedProperty, context, object, name,
+ value, language_mode);
+ SetAccumulator(result);
+ Dispatch();
+}
+
// StaKeyedProperty <object> <key> <slot>
//
// Calls the KeyedStoreIC at FeedbackVector slot <slot> for <object> and
@@ -1122,7 +1153,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- virtual ~UnaryNumericOpAssembler() {}
+ virtual ~UnaryNumericOpAssembler() = default;
// Must return a tagged value.
virtual TNode<Number> SmiOp(TNode<Smi> smi_value, Variable* var_feedback,
@@ -1273,7 +1304,7 @@ IGNITION_HANDLER(Negate, NegateAssemblerImpl) { UnaryOpWithFeedback(); }
IGNITION_HANDLER(ToName, InterpreterAssembler) {
Node* object = GetAccumulator();
Node* context = GetContext();
- Node* result = ToName(context, object);
+ Node* result = CallBuiltin(Builtins::kToName, context, object);
StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1501,6 +1532,16 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
CallJSAndDispatch(function, context, args, receiver_mode);
}
+ // Generates code to perform a JS call without collecting feedback.
+ void JSCallNoFeedback(ConvertReceiverMode receiver_mode) {
+ Node* function = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
+ Node* context = GetContext();
+
+ // Call the function and dispatch to the next handler.
+ CallJSAndDispatch(function, context, args, receiver_mode);
+ }
+
// Generates code to perform a JS call with a known number of arguments that
// collects type feedback.
void JSCallN(int arg_count, ConvertReceiverMode receiver_mode) {
@@ -1590,6 +1631,10 @@ IGNITION_HANDLER(CallUndefinedReceiver2, InterpreterJSCallAssembler) {
JSCallN(2, ConvertReceiverMode::kNullOrUndefined);
}
+IGNITION_HANDLER(CallNoFeedback, InterpreterJSCallAssembler) {
+ JSCallNoFeedback(ConvertReceiverMode::kAny);
+}
+
// CallRuntime <function_id> <first_arg> <arg_count>
//
// Call the runtime function |function_id| with the first argument in
@@ -2381,6 +2426,18 @@ IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
Dispatch();
}
+// CreateArrayFromIterable
+//
+// Spread the given iterable from the accumulator into a new JSArray.
+IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
+ Node* iterable = GetAccumulator();
+ Node* context = GetContext();
+ Node* result =
+ CallBuiltin(Builtins::kIterableToListWithSymbolLookup, context, iterable);
+ SetAccumulator(result);
+ Dispatch();
+}
+
// CreateObjectLiteral <element_idx> <literal_idx> <flags>
//
// Creates an object literal for literal index <literal_idx> with
@@ -3128,7 +3185,8 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
OperandScale operand_scale,
- int builtin_index) {
+ int builtin_index,
+ const AssemblerOptions& options) {
Zone zone(isolate->allocator(), ZONE_NAME);
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
@@ -3147,8 +3205,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
#undef CALL_GENERATOR
}
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(
- &state, AssemblerOptions::Default(isolate));
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state, options);
PROFILE(isolate, CodeCreateEvent(
CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(*code),
@@ -3195,7 +3252,9 @@ class DeserializeLazyAssembler : public InterpreterAssembler {
} // namespace
Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
- OperandScale operand_scale) {
+ OperandScale operand_scale,
+ int builtin_index,
+ const AssemblerOptions& options) {
Zone zone(isolate->allocator(), ZONE_NAME);
std::string debug_name = std::string("DeserializeLazy");
@@ -3210,11 +3269,11 @@ Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
debug_name.c_str(),
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison);
+ : PoisoningMitigationLevel::kDontPoison,
+ 0, builtin_index);
DeserializeLazyAssembler::Generate(&state, operand_scale);
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(
- &state, AssemblerOptions::Default(isolate));
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state, options);
PROFILE(isolate,
CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(*code), debug_name.c_str()));
diff --git a/deps/v8/src/interpreter/interpreter-generator.h b/deps/v8/src/interpreter/interpreter-generator.h
index bc3793a45f..a41e89f250 100644
--- a/deps/v8/src/interpreter/interpreter-generator.h
+++ b/deps/v8/src/interpreter/interpreter-generator.h
@@ -10,14 +10,19 @@
namespace v8 {
namespace internal {
+
+struct AssemblerOptions;
+
namespace interpreter {
extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
OperandScale operand_scale,
- int builtin_index);
+ int builtin_index,
+ const AssemblerOptions& options);
-extern Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
- OperandScale operand_scale);
+extern Handle<Code> GenerateDeserializeLazyHandler(
+ Isolate* isolate, OperandScale operand_scale, int builtin_index,
+ const AssemblerOptions& options);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 55e554e2e0..3e261bea9f 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -159,12 +159,6 @@ Node* IntrinsicsGenerator::IsArray(
return IsInstanceType(input, JS_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsJSProxy(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
- return IsInstanceType(input, JS_PROXY_TYPE);
-}
-
Node* IntrinsicsGenerator::IsTypedArray(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* input = __ LoadRegisterFromRegisterList(args, 0);
@@ -212,12 +206,6 @@ Node* IntrinsicsGenerator::HasProperty(
args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::GetProperty(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kGetProperty));
-}
-
Node* IntrinsicsGenerator::RejectPromise(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
@@ -244,18 +232,6 @@ Node* IntrinsicsGenerator::ToLength(
args, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
}
-Node* IntrinsicsGenerator::ToInteger(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToInteger));
-}
-
-Node* IntrinsicsGenerator::ToNumber(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToNumber));
-}
-
Node* IntrinsicsGenerator::ToObject(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
@@ -336,15 +312,6 @@ Node* IntrinsicsGenerator::CreateJSGeneratorObject(
Builtins::kCreateGeneratorObject);
}
-Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* generator = __ LoadRegisterFromRegisterList(args, 0);
- Node* const value =
- __ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset);
-
- return value;
-}
-
Node* IntrinsicsGenerator::GeneratorGetResumeMode(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* generator = __ LoadRegisterFromRegisterList(args, 0);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 04f662f0df..608b0afcac 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -19,16 +19,13 @@ namespace interpreter {
V(AsyncGeneratorYield, async_generator_yield, 3) \
V(CreateJSGeneratorObject, create_js_generator_object, 2) \
V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
- V(GeneratorGetInputOrDebugPos, generator_get_input_or_debug_pos, 1) \
V(GeneratorClose, generator_close, 1) \
V(GetImportMetaObject, get_import_meta_object, 0) \
V(Call, call, -1) \
V(CreateIterResultObject, create_iter_result_object, 2) \
V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
V(HasProperty, has_property, 2) \
- V(GetProperty, get_property, 2) \
V(IsArray, is_array, 1) \
- V(IsJSProxy, is_js_proxy, 1) \
V(IsJSReceiver, is_js_receiver, 1) \
V(IsSmi, is_smi, 1) \
V(IsTypedArray, is_typed_array, 1) \
@@ -36,8 +33,6 @@ namespace interpreter {
V(ResolvePromise, resolve_promise, 2) \
V(ToString, to_string, 1) \
V(ToLength, to_length, 1) \
- V(ToInteger, to_integer, 1) \
- V(ToNumber, to_number, 1) \
V(ToObject, to_object, 1)
class IntrinsicsHelper {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 0446ed494d..ca53fa674c 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -7,6 +7,7 @@
#include <fstream>
#include <memory>
+#include "builtins-generated/bytecodes-builtins-list.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
@@ -59,41 +60,47 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
}
+namespace {
+
+int BuiltinIndexFromBytecode(Bytecode bytecode, OperandScale operand_scale) {
+ int index = BytecodeOperands::OperandScaleAsIndex(operand_scale) *
+ kNumberOfBytecodeHandlers +
+ static_cast<int>(bytecode);
+ int offset = kBytecodeToBuiltinsMapping[index];
+ return offset >= 0 ? Builtins::kFirstBytecodeHandler + offset
+ : Builtins::kIllegalHandler;
+}
+
+} // namespace
+
Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
Bytecode bytecode, OperandScale operand_scale) {
- Code* code = GetBytecodeHandler(bytecode, operand_scale);
+ int builtin_index = BuiltinIndexFromBytecode(bytecode, operand_scale);
+ Builtins* builtins = isolate_->builtins();
+ Code* code = builtins->builtin(builtin_index);
// Already deserialized? Then just return the handler.
- if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code;
+ if (!Builtins::IsLazyDeserializer(code)) return code;
- DCHECK(FLAG_lazy_handler_deserialization);
+ DCHECK(FLAG_lazy_deserialization);
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
- code = Snapshot::DeserializeHandler(isolate_, bytecode, operand_scale);
+ code = Snapshot::DeserializeBuiltin(isolate_, builtin_index);
DCHECK(code->IsCode());
DCHECK_EQ(code->kind(), Code::BYTECODE_HANDLER);
- DCHECK(!isolate_->heap()->IsDeserializeLazyHandler(code));
+ DCHECK(!Builtins::IsLazyDeserializer(code));
SetBytecodeHandler(bytecode, operand_scale, code);
return code;
}
-Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale) {
- DCHECK(IsDispatchTableInitialized());
- DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
- size_t index = GetDispatchTableIndex(bytecode, operand_scale);
- Address code_entry = dispatch_table_[index];
- return Code::GetCodeFromTargetAddress(code_entry);
-}
-
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale,
Code* handler) {
DCHECK(handler->kind() == Code::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
- dispatch_table_[index] = handler->entry();
+ dispatch_table_[index] = handler->InstructionStart();
}
// static
@@ -101,20 +108,17 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
OperandScale operand_scale) {
static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
size_t index = static_cast<size_t>(bytecode);
- switch (operand_scale) {
- case OperandScale::kSingle:
- return index;
- case OperandScale::kDouble:
- return index + kEntriesPerOperandScale;
- case OperandScale::kQuadruple:
- return index + 2 * kEntriesPerOperandScale;
- }
- UNREACHABLE();
+ return index + BytecodeOperands::OperandScaleAsIndex(operand_scale) *
+ kEntriesPerOperandScale;
}
void Interpreter::IterateDispatchTable(RootVisitor* v) {
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
+
+ // If the handler is embedded, it is immovable.
+ if (InstructionStream::PcIsOffHeap(isolate_, code_entry)) continue;
+
Object* code = code_entry == kNullAddress
? nullptr
: Code::GetCodeFromTargetAddress(code_entry);
@@ -229,6 +233,52 @@ UnoptimizedCompilationJob* Interpreter::NewCompilationJob(
eager_inner_literals);
}
+void Interpreter::ForEachBytecode(
+ const std::function<void(Bytecode, OperandScale)>& f) {
+ constexpr OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
+ for (OperandScale operand_scale : kOperandScales) {
+ for (int i = 0; i < Bytecodes::kBytecodeCount; i++) {
+ f(Bytecodes::FromByte(i), operand_scale);
+ }
+ }
+}
+
+void Interpreter::InitializeDispatchTable() {
+ Builtins* builtins = isolate_->builtins();
+ Code* illegal = builtins->builtin(Builtins::kIllegalHandler);
+ int builtin_id = Builtins::kFirstBytecodeHandler;
+ ForEachBytecode([=, &builtin_id](Bytecode bytecode,
+ OperandScale operand_scale) {
+ Code* handler = illegal;
+ if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+#ifdef DEBUG
+ std::string builtin_name(Builtins::name(builtin_id));
+ std::string expected_name =
+ Bytecodes::ToString(bytecode, operand_scale, "") + "Handler";
+ DCHECK_EQ(expected_name, builtin_name);
+#endif
+ handler = builtins->builtin(builtin_id++);
+ }
+ SetBytecodeHandler(bytecode, operand_scale, handler);
+ });
+ DCHECK(builtin_id == Builtins::builtin_count);
+ DCHECK(IsDispatchTableInitialized());
+
+#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
+ if (!isolate_->serializer_enabled() && FLAG_perf_prof_unwinding_info) {
+ StdoutStream{}
+ << "Warning: The --perf-prof-unwinding-info flag can be passed at "
+ "mksnapshot time to get better results."
+ << std::endl;
+ }
+#endif
+}
+
bool Interpreter::IsDispatchTableInitialized() const {
return dispatch_table_[0] != kNullAddress;
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 5ded893798..5023b0ef00 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -36,7 +36,7 @@ class InterpreterAssembler;
class Interpreter {
public:
explicit Interpreter(Isolate* isolate);
- virtual ~Interpreter() {}
+ virtual ~Interpreter() = default;
// Returns the interrupt budget which should be used for the profiler counter.
static int InterruptBudget();
@@ -54,9 +54,6 @@ class Interpreter {
Code* GetAndMaybeDeserializeBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale);
- // Return bytecode handler for |bytecode| and |operand_scale|.
- Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
-
// Set the bytecode handler for |bytecode| and |operand_scale|.
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
Code* handler);
@@ -69,6 +66,10 @@ class Interpreter {
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
+ void ForEachBytecode(const std::function<void(Bytecode, OperandScale)>& f);
+
+ void InitializeDispatchTable();
+
bool IsDispatchTableInitialized() const;
Address dispatch_table_address() {
diff --git a/deps/v8/src/interpreter/setup-interpreter-internal.cc b/deps/v8/src/interpreter/setup-interpreter-internal.cc
deleted file mode 100644
index 8f2b565c00..0000000000
--- a/deps/v8/src/interpreter/setup-interpreter-internal.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/setup-interpreter.h"
-
-#include "src/handles-inl.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/interpreter/interpreter-generator.h"
-#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-namespace {
-void PrintBuiltinSize(Bytecode bytecode, OperandScale operand_scale,
- Handle<Code> code) {
- PrintF(stdout, "Ignition Handler, %s, %d\n",
- Bytecodes::ToString(bytecode, operand_scale).c_str(),
- code->InstructionSize());
-}
-} // namespace
-
-// static
-void SetupInterpreter::InstallBytecodeHandlers(Interpreter* interpreter) {
- DCHECK(!interpreter->IsDispatchTableInitialized());
- HandleScope scope(interpreter->isolate_);
- // Canonicalize handles, so that we can share constant pool entries pointing
- // to code targets without dereferencing their handles.
- CanonicalHandleScope canonical(interpreter->isolate_);
- Address* dispatch_table = interpreter->dispatch_table_;
-
- // Generate bytecode handlers for all bytecodes and scales.
- const OperandScale kOperandScales[] = {
-#define VALUE(Name, _) OperandScale::k##Name,
- OPERAND_SCALE_LIST(VALUE)
-#undef VALUE
- };
-
- for (OperandScale operand_scale : kOperandScales) {
-#define GENERATE_CODE(Name, ...) \
- InstallBytecodeHandler(interpreter->isolate_, dispatch_table, \
- Bytecode::k##Name, operand_scale);
- BYTECODE_LIST(GENERATE_CODE)
-#undef GENERATE_CODE
- }
-
- // Fill unused entries will the illegal bytecode handler.
- size_t illegal_index = Interpreter::GetDispatchTableIndex(
- Bytecode::kIllegal, OperandScale::kSingle);
- for (size_t index = 0; index < Interpreter::kDispatchTableSize; ++index) {
- if (dispatch_table[index] == kNullAddress) {
- dispatch_table[index] = dispatch_table[illegal_index];
- }
- }
-
- // Generate the DeserializeLazy handlers, one for each operand scale.
- Heap* heap = interpreter->isolate_->heap();
- DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler());
- heap->SetDeserializeLazyHandler(*GenerateDeserializeLazyHandler(
- interpreter->isolate_, OperandScale::kSingle));
- DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_wide());
- heap->SetDeserializeLazyHandlerWide(*GenerateDeserializeLazyHandler(
- interpreter->isolate_, OperandScale::kDouble));
- DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_extra_wide());
- heap->SetDeserializeLazyHandlerExtraWide(*GenerateDeserializeLazyHandler(
- interpreter->isolate_, OperandScale::kQuadruple));
-
- // Initialization should have been successful.
- DCHECK(interpreter->IsDispatchTableInitialized());
-}
-
-// static
-void SetupInterpreter::InstallBytecodeHandler(Isolate* isolate,
- Address* dispatch_table,
- Bytecode bytecode,
- OperandScale operand_scale) {
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
-
- size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
- // Here we explicitly set the bytecode handler to not be a builtin with an
- // index of kNoBuiltinId.
- // TODO(delphick): Use builtins version instead.
- Handle<Code> code = GenerateBytecodeHandler(isolate, bytecode, operand_scale,
- Builtins::kNoBuiltinId);
- dispatch_table[index] = code->entry();
-
- if (FLAG_print_builtin_size) PrintBuiltinSize(bytecode, operand_scale, code);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- std::string name = Bytecodes::ToString(bytecode, operand_scale);
- code->PrintBuiltinCode(isolate, name.c_str());
- }
-#endif // ENABLE_DISASSEMBLER
-}
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/interpreter/setup-interpreter.h b/deps/v8/src/interpreter/setup-interpreter.h
deleted file mode 100644
index 19b03f7f7f..0000000000
--- a/deps/v8/src/interpreter/setup-interpreter.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERPRETER_SETUP_INTERPRETER_H_
-#define V8_INTERPRETER_SETUP_INTERPRETER_H_
-
-#include "src/interpreter/bytecode-operands.h"
-#include "src/interpreter/bytecodes.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-class Interpreter;
-
-class SetupInterpreter {
- public:
- static void InstallBytecodeHandlers(Interpreter* interpreter);
-
- private:
- // Generates handler for given |bytecode| and |operand_scale|
- // and installs it into the |dispatch_table|.
- static void InstallBytecodeHandler(Isolate* isolate, Address* dispatch_table,
- Bytecode bytecode,
- OperandScale operand_scale);
-};
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
-
-#endif // V8_INTERPRETER_SETUP_INTERPRETER_H_
diff --git a/deps/v8/src/intl.h b/deps/v8/src/intl.h
index 5ec5381f40..a2b393bdaa 100644
--- a/deps/v8/src/intl.h
+++ b/deps/v8/src/intl.h
@@ -23,15 +23,15 @@ class TimeZone;
namespace v8 {
namespace internal {
-enum class IcuService {
+enum class ICUService {
kBreakIterator,
kCollator,
kDateFormat,
kNumberFormat,
kPluralRules,
- kResourceBundle,
kRelativeDateTimeFormatter,
- kListFormatter
+ kListFormatter,
+ kSegmenter
};
const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 017032c320..dcbe5bea23 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -11,6 +11,12 @@
namespace v8 {
namespace internal {
+base::AddressRegion Isolate::root_register_addressable_region() {
+ Address start = reinterpret_cast<Address>(this);
+ Address end = heap_.root_register_addressable_end();
+ return base::AddressRegion(start, end - start);
+}
+
bool Isolate::FromWritableHeapObject(HeapObject* obj, Isolate** isolate) {
i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(obj);
if (chunk->owner()->identity() == i::RO_SPACE) {
@@ -58,17 +64,6 @@ bool Isolate::has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole(this);
}
-Object* Isolate::get_wasm_caught_exception() {
- return thread_local_top_.wasm_caught_exception_;
-}
-
-void Isolate::set_wasm_caught_exception(Object* exception) {
- thread_local_top_.wasm_caught_exception_ = exception;
-}
-
-void Isolate::clear_wasm_caught_exception() {
- thread_local_top_.wasm_caught_exception_ = nullptr;
-}
void Isolate::clear_pending_message() {
thread_local_top_.pending_message_obj_ = ReadOnlyRoots(this).the_hole_value();
@@ -190,6 +185,11 @@ bool Isolate::IsArrayIteratorLookupChainIntact() {
return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
+bool Isolate::IsStringIteratorLookupChainIntact() {
+ PropertyCell* string_iterator_cell = heap()->string_iterator_protector();
+ return string_iterator_cell->value() == Smi::FromInt(kProtectorValid);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index eed52d9c19..94033f446b 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -15,6 +15,7 @@
#include "src/assembler-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/context-slot-cache.h"
+#include "src/ast/scopes.h"
#include "src/base/adapters.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
@@ -28,6 +29,7 @@
#include "src/compilation-statistics.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/debug/debug-frames.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -42,8 +44,10 @@
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
@@ -161,7 +165,6 @@ void ThreadLocalTop::Initialize(Isolate* isolate) {
}
void ThreadLocalTop::Free() {
- wasm_caught_exception_ = nullptr;
// Match unmatched PopPromise calls.
while (promise_on_stack_) isolate_->PopPromise();
}
@@ -252,7 +255,6 @@ void Isolate::IterateThread(ThreadVisitor* v, char* t) {
void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_exception_);
- v->VisitRootPointer(Root::kTop, nullptr, &thread->wasm_caught_exception_);
v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_message_obj_);
v->VisitRootPointer(Root::kTop, nullptr,
bit_cast<Object**>(&(thread->context_)));
@@ -409,82 +411,87 @@ class FrameArrayBuilder {
elements_ = isolate->factory()->NewFrameArray(Min(limit, 10));
}
- void AppendStandardFrame(StandardFrame* frame) {
- std::vector<FrameSummary> frames;
- frame->Summarize(&frames);
- // A standard frame may include many summarized frames (due to inlining).
- for (size_t i = frames.size(); i != 0 && !full(); i--) {
- const auto& summ = frames[i - 1];
- if (summ.IsJavaScript()) {
- //====================================================================
- // Handle a JavaScript frame.
- //====================================================================
- const auto& summary = summ.AsJavaScript();
-
- // Filter out internal frames that we do not want to show.
- if (!IsVisibleInStackTrace(summary.function())) continue;
-
- Handle<AbstractCode> abstract_code = summary.abstract_code();
- const int offset = summary.code_offset();
-
- bool is_constructor = summary.is_constructor();
- // Help CallSite::IsConstructor correctly detect hand-written
- // construct stubs.
- if (abstract_code->IsCode() &&
- Code::cast(*abstract_code)->is_construct_stub()) {
- is_constructor = true;
- }
+ void AppendAsyncFrame(Handle<JSGeneratorObject> generator_object) {
+ if (full()) return;
+ Handle<JSFunction> function(generator_object->function(), isolate_);
+ if (!IsVisibleInStackTrace(function)) return;
+ int flags = FrameArray::kIsAsync;
+ if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
- int flags = 0;
- Handle<JSFunction> function = summary.function();
- if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
- if (is_constructor) flags |= FrameArray::kIsConstructor;
-
- elements_ = FrameArray::AppendJSFrame(
- elements_, TheHoleToUndefined(isolate_, summary.receiver()),
- function, abstract_code, offset, flags);
- } else if (summ.IsWasmCompiled()) {
- //====================================================================
- // Handle a WASM compiled frame.
- //====================================================================
- const auto& summary = summ.AsWasmCompiled();
- if (summary.code()->kind() != wasm::WasmCode::kFunction) {
- continue;
- }
- Handle<WasmInstanceObject> instance = summary.wasm_instance();
- int flags = 0;
- if (instance->module_object()->is_asm_js()) {
- flags |= FrameArray::kIsAsmJsWasmFrame;
- if (WasmCompiledFrame::cast(frame)->at_to_number_conversion()) {
- flags |= FrameArray::kAsmJsAtNumberConversion;
- }
- } else {
- flags |= FrameArray::kIsWasmFrame;
- }
+ Handle<Object> receiver(generator_object->receiver(), isolate_);
+ Handle<AbstractCode> code(
+ AbstractCode::cast(function->shared()->GetBytecodeArray()), isolate_);
+ int offset = Smi::ToInt(generator_object->input_or_debug_pos());
+ // The stored bytecode offset is relative to a different base than what
+ // is used in the source position table, hence the subtraction.
+ offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+ elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code,
+ offset, flags);
+ }
+
+ bool AppendJavaScriptFrame(
+ FrameSummary::JavaScriptFrameSummary const& summary) {
+ // Filter out internal frames that we do not want to show.
+ if (!IsVisibleInStackTrace(summary.function())) return false;
+
+ Handle<AbstractCode> abstract_code = summary.abstract_code();
+ const int offset = summary.code_offset();
+
+ bool is_constructor = summary.is_constructor();
+ // Help CallSite::IsConstructor correctly detect hand-written
+ // construct stubs.
+ if (abstract_code->IsCode() &&
+ Code::cast(*abstract_code)->is_construct_stub()) {
+ is_constructor = true;
+ }
- elements_ = FrameArray::AppendWasmFrame(
- elements_, instance, summary.function_index(), summary.code(),
- summary.code_offset(), flags);
- } else if (summ.IsWasmInterpreted()) {
- //====================================================================
- // Handle a WASM interpreted frame.
- //====================================================================
- const auto& summary = summ.AsWasmInterpreted();
- Handle<WasmInstanceObject> instance = summary.wasm_instance();
- int flags = FrameArray::kIsWasmInterpretedFrame;
- DCHECK(!instance->module_object()->is_asm_js());
- elements_ = FrameArray::AppendWasmFrame(elements_, instance,
- summary.function_index(), {},
- summary.byte_offset(), flags);
+ int flags = 0;
+ Handle<JSFunction> function = summary.function();
+ if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
+ if (is_constructor) flags |= FrameArray::kIsConstructor;
+
+ elements_ = FrameArray::AppendJSFrame(
+ elements_, TheHoleToUndefined(isolate_, summary.receiver()), function,
+ abstract_code, offset, flags);
+ return true;
+ }
+
+ bool AppendWasmCompiledFrame(
+ FrameSummary::WasmCompiledFrameSummary const& summary) {
+ if (summary.code()->kind() != wasm::WasmCode::kFunction) return false;
+ Handle<WasmInstanceObject> instance = summary.wasm_instance();
+ int flags = 0;
+ if (instance->module_object()->is_asm_js()) {
+ flags |= FrameArray::kIsAsmJsWasmFrame;
+ if (summary.at_to_number_conversion()) {
+ flags |= FrameArray::kAsmJsAtNumberConversion;
}
+ } else {
+ flags |= FrameArray::kIsWasmFrame;
}
+
+ elements_ = FrameArray::AppendWasmFrame(
+ elements_, instance, summary.function_index(), summary.code(),
+ summary.code_offset(), flags);
+ return true;
}
- void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
+ bool AppendWasmInterpretedFrame(
+ FrameSummary::WasmInterpretedFrameSummary const& summary) {
+ Handle<WasmInstanceObject> instance = summary.wasm_instance();
+ int flags = FrameArray::kIsWasmInterpretedFrame;
+ DCHECK(!instance->module_object()->is_asm_js());
+ elements_ = FrameArray::AppendWasmFrame(elements_, instance,
+ summary.function_index(), {},
+ summary.byte_offset(), flags);
+ return true;
+ }
+
+ bool AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
Handle<JSFunction> function = handle(exit_frame->function(), isolate_);
// Filter out internal frames that we do not want to show.
- if (!IsVisibleInStackTrace(function)) return;
+ if (!IsVisibleInStackTrace(function)) return false;
Handle<Object> receiver(exit_frame->receiver(), isolate_);
Handle<Code> code(exit_frame->LookupCode(), isolate_);
@@ -498,6 +505,8 @@ class FrameArrayBuilder {
elements_ = FrameArray::AppendJSFrame(elements_, receiver, function,
Handle<AbstractCode>::cast(code),
offset, flags);
+
+ return true;
}
bool full() { return elements_->FrameCount() >= limit_; }
@@ -600,6 +609,89 @@ bool GetStackTraceLimit(Isolate* isolate, int* result) {
}
bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
+
+bool IsBuiltinFunction(Isolate* isolate, HeapObject* object,
+ Builtins::Name builtin_index) {
+ if (!object->IsJSFunction()) return false;
+ JSFunction* const function = JSFunction::cast(object);
+ return function->code() == isolate->builtins()->builtin(builtin_index);
+}
+
+void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
+ FrameArrayBuilder* builder) {
+ CHECK_EQ(Promise::kPending, promise->status());
+
+ while (!builder->full()) {
+ // Check that we have exactly one PromiseReaction on the {promise}.
+ if (!promise->reactions()->IsPromiseReaction()) return;
+ Handle<PromiseReaction> reaction(
+ PromiseReaction::cast(promise->reactions()), isolate);
+ if (!reaction->next()->IsSmi()) return;
+
+ // Check if the {reaction} has one of the known async function or
+ // async generator continuations as its fulfill handler.
+ if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
+ Builtins::kAsyncFunctionAwaitResolveClosure) ||
+ IsBuiltinFunction(isolate, reaction->fulfill_handler(),
+ Builtins::kAsyncGeneratorAwaitResolveClosure) ||
+ IsBuiltinFunction(isolate, reaction->fulfill_handler(),
+ Builtins::kAsyncGeneratorYieldResolveClosure)) {
+ // Now peak into the handlers' AwaitContext to get to
+ // the JSGeneratorObject for the async function.
+ Handle<Context> context(
+ JSFunction::cast(reaction->fulfill_handler())->context(), isolate);
+ Handle<JSGeneratorObject> generator_object(
+ JSGeneratorObject::cast(context->extension()), isolate);
+ CHECK(generator_object->is_suspended());
+
+ // Append async frame corresponding to the {generator_object}.
+ builder->AppendAsyncFrame(generator_object);
+
+ // Try to continue from here.
+ Handle<JSFunction> function(generator_object->function(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (IsAsyncGeneratorFunction(shared->kind())) {
+ Handle<Object> dot_generator_object(
+ generator_object->parameters_and_registers()->get(
+ DeclarationScope::kGeneratorObjectVarIndex +
+ shared->scope_info()->ParameterCount()),
+ isolate);
+ if (!dot_generator_object->IsJSAsyncGeneratorObject()) return;
+ Handle<JSAsyncGeneratorObject> async_generator_object =
+ Handle<JSAsyncGeneratorObject>::cast(dot_generator_object);
+ Handle<AsyncGeneratorRequest> async_generator_request(
+ AsyncGeneratorRequest::cast(async_generator_object->queue()),
+ isolate);
+ promise = handle(JSPromise::cast(async_generator_request->promise()),
+ isolate);
+ } else {
+ CHECK(IsAsyncFunction(shared->kind()));
+ Handle<Object> dot_promise(
+ generator_object->parameters_and_registers()->get(
+ DeclarationScope::kPromiseVarIndex +
+ shared->scope_info()->ParameterCount()),
+ isolate);
+ if (!dot_promise->IsJSPromise()) return;
+ promise = Handle<JSPromise>::cast(dot_promise);
+ }
+ } else {
+ // We have some generic promise chain here, so try to
+ // continue with the chained promise on the reaction
+ // (only works for native promise chains).
+ Handle<HeapObject> promise_or_capability(
+ reaction->promise_or_capability(), isolate);
+ if (promise_or_capability->IsJSPromise()) {
+ promise = Handle<JSPromise>::cast(promise_or_capability);
+ } else {
+ Handle<PromiseCapability> capability =
+ Handle<PromiseCapability>::cast(promise_or_capability);
+ if (!capability->promise()->IsJSPromise()) return;
+ promise = handle(JSPromise::cast(capability->promise()), isolate);
+ }
+ }
+ }
+}
+
} // namespace
Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
@@ -612,28 +704,72 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
FrameArrayBuilder builder(this, mode, limit, caller);
- for (StackFrameIterator iter(this); !iter.done() && !builder.full();
- iter.Advance()) {
- StackFrame* frame = iter.frame();
-
+ // Build the regular stack trace, and remember the last relevant
+ // frame ID and inlined index (for the async stack trace handling
+ // below, which starts from this last frame).
+ int last_frame_index = 0;
+ StackFrame::Id last_frame_id = StackFrame::NO_ID;
+ for (StackFrameIterator it(this); !it.done() && !builder.full();
+ it.Advance()) {
+ StackFrame* const frame = it.frame();
switch (frame->type()) {
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
case StackFrame::BUILTIN:
- builder.AppendStandardFrame(JavaScriptFrame::cast(frame));
+ case StackFrame::WASM_COMPILED:
+ case StackFrame::WASM_INTERPRETER_ENTRY: {
+ // A standard frame may include many summarized frames (due to
+ // inlining).
+ std::vector<FrameSummary> frames;
+ StandardFrame::cast(frame)->Summarize(&frames);
+ for (size_t i = frames.size(); i-- != 0 && !builder.full();) {
+ const auto& summary = frames[i];
+ if (summary.IsJavaScript()) {
+ //====================================================================
+ // Handle a JavaScript frame.
+ //====================================================================
+ auto const& java_script = summary.AsJavaScript();
+ if (builder.AppendJavaScriptFrame(java_script)) {
+ if (IsAsyncFunction(java_script.function()->shared()->kind())) {
+ last_frame_id = frame->id();
+ last_frame_index = static_cast<int>(i);
+ } else {
+ last_frame_id = StackFrame::NO_ID;
+ last_frame_index = 0;
+ }
+ }
+ } else if (summary.IsWasmCompiled()) {
+ //====================================================================
+ // Handle a WASM compiled frame.
+ //====================================================================
+ auto const& wasm_compiled = summary.AsWasmCompiled();
+ if (builder.AppendWasmCompiledFrame(wasm_compiled)) {
+ last_frame_id = StackFrame::NO_ID;
+ last_frame_index = 0;
+ }
+ } else if (summary.IsWasmInterpreted()) {
+ //====================================================================
+ // Handle a WASM interpreted frame.
+ //====================================================================
+ auto const& wasm_interpreted = summary.AsWasmInterpreted();
+ if (builder.AppendWasmInterpretedFrame(wasm_interpreted)) {
+ last_frame_id = StackFrame::NO_ID;
+ last_frame_index = 0;
+ }
+ }
+ }
break;
+ }
+
case StackFrame::BUILTIN_EXIT:
// BuiltinExitFrames are not standard frames, so they do not have
// Summarize(). However, they may have one JS frame worth showing.
- builder.AppendBuiltinExitFrame(BuiltinExitFrame::cast(frame));
- break;
- case StackFrame::WASM_COMPILED:
- builder.AppendStandardFrame(WasmCompiledFrame::cast(frame));
- break;
- case StackFrame::WASM_INTERPRETER_ENTRY:
- builder.AppendStandardFrame(WasmInterpreterEntryFrame::cast(frame));
+ if (builder.AppendBuiltinExitFrame(BuiltinExitFrame::cast(frame))) {
+ last_frame_id = StackFrame::NO_ID;
+ last_frame_index = 0;
+ }
break;
default:
@@ -641,6 +777,55 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
}
}
+ // If --async-stack-traces is enabled, and we ended on a regular JavaScript
+ // frame above, we can enrich the stack trace with async frames (if this
+ // last frame corresponds to an async function).
+ if (FLAG_async_stack_traces && last_frame_id != StackFrame::NO_ID) {
+ StackFrameIterator it(this);
+ while (it.frame()->id() != last_frame_id) it.Advance();
+ FrameInspector inspector(StandardFrame::cast(it.frame()), last_frame_index,
+ this);
+ FunctionKind const kind = inspector.GetFunction()->shared()->kind();
+ if (IsAsyncGeneratorFunction(kind)) {
+ Handle<Object> const dot_generator_object =
+ inspector.GetExpression(DeclarationScope::kGeneratorObjectVarIndex);
+ if (dot_generator_object->IsUndefined(this)) {
+ // The .generator_object was not yet initialized (i.e. we see a
+ // really early exception in the setup of the async generator).
+ } else {
+ // Check if there's a pending async request on the generator object.
+ Handle<JSAsyncGeneratorObject> async_generator_object =
+ Handle<JSAsyncGeneratorObject>::cast(dot_generator_object);
+ if (!async_generator_object->queue()->IsUndefined(this)) {
+ // Take the promise from the first async generatot request.
+ Handle<AsyncGeneratorRequest> request(
+ AsyncGeneratorRequest::cast(async_generator_object->queue()),
+ this);
+
+ // We can start collecting an async stack trace from the
+ // promise on the {request}.
+ Handle<JSPromise> promise(JSPromise::cast(request->promise()), this);
+ CaptureAsyncStackTrace(this, promise, &builder);
+ }
+ }
+ } else {
+ DCHECK(IsAsyncFunction(kind));
+ Handle<Object> const dot_promise =
+ inspector.GetExpression(DeclarationScope::kPromiseVarIndex);
+ if (dot_promise->IsJSPromise()) {
+ // We can start collecting an async stack trace from .promise here.
+ CaptureAsyncStackTrace(this, Handle<JSPromise>::cast(dot_promise),
+ &builder);
+ } else {
+ // If .promise was not yet initialized (i.e. we see a really
+ // early exception in the setup of the function), it holds
+ // the value undefined. Sanity check here to make sure that
+ // we're not peaking into the completely wrong stack slot.
+ CHECK(dot_promise->IsUndefined(this));
+ }
+ }
+ }
+
// TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
return factory()->NewJSArrayWithElements(builder.GetElements());
}
@@ -1124,19 +1309,6 @@ void ReportBootstrappingException(Handle<Object> exception,
#endif
}
-bool Isolate::is_catchable_by_wasm(Object* exception) {
- // TODO(titzer): thread WASM features here, or just remove this check?
- if (!FLAG_experimental_wasm_eh) return false;
- if (!is_catchable_by_javascript(exception) || !exception->IsJSError())
- return false;
- HandleScope scope(this);
- Handle<Object> exception_handle(exception, this);
- return JSReceiver::HasProperty(Handle<JSReceiver>::cast(exception_handle),
- factory()->InternalizeUtf8String(
- wasm::WasmException::kRuntimeIdStr))
- .IsJust();
-}
-
Object* Isolate::Throw(Object* raw_exception, MessageLocation* location) {
DCHECK(!has_pending_exception());
@@ -1310,11 +1482,10 @@ Object* Isolate::UnwindAndFindHandler() {
trap_handler::ClearThreadInWasm();
}
- if (!is_catchable_by_wasm(exception)) {
- break;
- }
- int stack_slots = 0; // Will contain stack slot count of frame.
+ // For WebAssembly frames we perform a lookup in the handler table.
+ if (!catchable_by_js) break;
WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame);
+ int stack_slots = 0; // Will contain stack slot count of frame.
int offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
if (offset < 0) break;
// Compute the stack pointer from the frame pointer. This ensures that
@@ -1324,10 +1495,10 @@ Object* Isolate::UnwindAndFindHandler() {
stack_slots * kPointerSize;
// This is going to be handled by Wasm, so we need to set the TLS flag
- // again.
+ // again. It was cleared above assuming the frame would be unwound.
trap_handler::SetThreadInWasm();
- set_wasm_caught_exception(exception);
+ // Gather information from the frame.
wasm::WasmCode* wasm_code =
wasm_engine()->code_manager()->LookupCode(frame->pc());
return FoundHandler(nullptr, wasm_code->instruction_start(), offset,
@@ -2229,9 +2400,16 @@ Handle<Context> Isolate::GetIncumbentContext() {
// 1st candidate: most-recently-entered author function's context
// if it's newer than the last Context::BackupIncumbentScope entry.
- if (!it.done() &&
- static_cast<const void*>(it.frame()) >
- static_cast<const void*>(top_backup_incumbent_scope())) {
+ //
+ // NOTE: This code assumes that the stack grows downward.
+ // This code doesn't work with ASAN because ASAN seems allocating stack
+ // separated for native C++ code and compiled JS code, and the following
+ // comparison doesn't make sense in ASAN.
+ // TODO(yukishiino): Make the implementation of BackupIncumbentScope more
+ // robust.
+ if (!it.done() && (!top_backup_incumbent_scope() ||
+ it.frame()->sp() < reinterpret_cast<Address>(
+ top_backup_incumbent_scope()))) {
Context* context = Context::cast(it.frame()->context());
return Handle<Context>(context->native_context(), this);
}
@@ -2274,10 +2452,6 @@ char* Isolate::RestoreThread(char* from) {
return from + sizeof(ThreadLocalTop);
}
-Isolate::ThreadDataTable::ThreadDataTable() : table_() {}
-
-Isolate::ThreadDataTable::~ThreadDataTable() {}
-
void Isolate::ReleaseSharedPtrs() {
while (managed_ptr_destructors_head_) {
ManagedPtrDestructor* l = managed_ptr_destructors_head_;
@@ -2313,6 +2487,14 @@ void Isolate::UnregisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
destructor->next_ = nullptr;
}
+void Isolate::SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine) {
+ DCHECK_NULL(wasm_engine_); // Only call once before {Init}.
+ wasm_engine_ = std::move(engine);
+ wasm_engine_->AddIsolate(this);
+ wasm::WasmCodeManager::InstallSamplingGCCallback(this);
+}
+
+// NOLINTNEXTLINE
Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
#if defined(USE_SIMULATOR)
delete simulator_;
@@ -2483,9 +2665,11 @@ Isolate::Isolate()
host_initialize_import_meta_object_callback_(nullptr),
load_start_time_ms_(0),
#ifdef V8_INTL_SUPPORT
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
language_singleton_regexp_matcher_(nullptr),
language_tag_regexp_matcher_(nullptr),
language_variant_regexp_matcher_(nullptr),
+#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
default_locale_(""),
#endif // V8_INTL_SUPPORT
serializer_enabled_(false),
@@ -2543,9 +2727,7 @@ Isolate::Isolate()
InitializeLoggingAndCounters();
debug_ = new Debug(this);
- tracing_cpu_profiler_.reset(new TracingCpuProfilerImpl(this));
-
- init_memcopy_functions(this);
+ init_memcopy_functions();
if (FLAG_embedded_builtins) {
#ifdef V8_MULTI_SNAPSHOTS
@@ -2602,20 +2784,27 @@ void Isolate::ClearSerializerData() {
external_reference_map_ = nullptr;
}
+bool Isolate::LogObjectRelocation() {
+ return FLAG_verify_predictable || logger()->is_logging() || is_profiling() ||
+ heap()->isolate()->logger()->is_listening_to_code_events() ||
+ (heap_profiler() != nullptr &&
+ heap_profiler()->is_tracking_object_moves()) ||
+ heap()->has_heap_object_allocation_tracker();
+}
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
debug()->Unload();
+ wasm_engine()->DeleteCompileJobsOnIsolate(this);
+
if (concurrent_recompilation_enabled()) {
optimizing_compile_dispatcher_->Stop();
delete optimizing_compile_dispatcher_;
optimizing_compile_dispatcher_ = nullptr;
}
- wasm_engine()->DeleteCompileJobsOnIsolate(this);
-
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
@@ -2661,7 +2850,10 @@ void Isolate::Deinit() {
heap_.TearDown();
logger_->TearDown();
- wasm_engine_.reset();
+ if (wasm_engine_) {
+ wasm_engine_->RemoveIsolate(this);
+ wasm_engine_.reset();
+ }
if (FLAG_embedded_builtins) {
if (DefaultEmbeddedBlob() == nullptr && embedded_blob() != nullptr) {
@@ -2683,6 +2875,10 @@ void Isolate::Deinit() {
delete root_index_map_;
root_index_map_ = nullptr;
+ delete compiler_zone_;
+ compiler_zone_ = nullptr;
+ compiler_cache_ = nullptr;
+
ClearSerializerData();
}
@@ -2710,6 +2906,7 @@ Isolate::~Isolate() {
date_cache_ = nullptr;
#ifdef V8_INTL_SUPPORT
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
delete language_singleton_regexp_matcher_;
language_singleton_regexp_matcher_ = nullptr;
@@ -2718,6 +2915,7 @@ Isolate::~Isolate() {
delete language_variant_regexp_matcher_;
language_variant_regexp_matcher_ = nullptr;
+#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
#endif // V8_INTL_SUPPORT
delete regexp_stack_;
@@ -2958,6 +3156,7 @@ bool Isolate::Init(StartupDeserializer* des) {
date_cache_ = new DateCache();
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
+
compiler_dispatcher_ =
new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
@@ -2978,9 +3177,9 @@ bool Isolate::Init(StartupDeserializer* des) {
// Setup the wasm engine.
if (wasm_engine_ == nullptr) {
- wasm_engine_ = wasm::WasmEngine::GetWasmEngine();
- wasm::WasmCodeManager::InstallSamplingGCCallback(this);
+ SetWasmEngine(wasm::WasmEngine::GetWasmEngine());
}
+ DCHECK_NOT_NULL(wasm_engine_);
deoptimizer_data_ = new DeoptimizerData(heap());
@@ -3001,6 +3200,10 @@ bool Isolate::Init(StartupDeserializer* des) {
InitializeThreadLocal();
+ // Profiler has to be created after ThreadLocal is initialized
+ // because it makes use of interrupts.
+ tracing_cpu_profiler_.reset(new TracingCpuProfilerImpl(this));
+
bootstrapper_->Initialize(create_heap_objects);
if (FLAG_embedded_builtins) {
@@ -3042,8 +3245,7 @@ bool Isolate::Init(StartupDeserializer* des) {
if (!create_heap_objects) des->DeserializeInto(this);
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
- setup_delegate_->SetupInterpreter(interpreter_);
-
+ interpreter_->InitializeDispatchTable();
heap_.NotifyDeserializationComplete();
}
delete setup_delegate_;
@@ -3069,7 +3271,7 @@ bool Isolate::Init(StartupDeserializer* des) {
if (FLAG_trace_turbo) {
// Create an empty file.
- std::ofstream(GetTurboCfgFileName().c_str(), std::ios_base::trunc);
+ std::ofstream(GetTurboCfgFileName(this).c_str(), std::ios_base::trunc);
}
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
@@ -3227,6 +3429,8 @@ void Isolate::DumpAndResetStats() {
}
if (V8_UNLIKELY(FLAG_runtime_stats ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
+ counters()->worker_thread_runtime_call_stats()->AddToMainTable(
+ counters()->runtime_call_stats());
counters()->runtime_call_stats()->Print();
counters()->runtime_call_stats()->Reset();
}
@@ -3257,8 +3461,7 @@ bool Isolate::use_optimizer() {
}
bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
- return NeedsSourcePositionsForProfiling() ||
- detailed_source_positions_for_profiling();
+ return NeedsSourcePositionsForProfiling() || FLAG_detailed_line_info;
}
bool Isolate::NeedsSourcePositionsForProfiling() const {
@@ -3552,6 +3755,15 @@ void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(!IsArrayIteratorLookupChainIntact());
}
+void Isolate::InvalidateStringIteratorProtector() {
+ DCHECK(factory()->string_iterator_protector()->value()->IsSmi());
+ DCHECK(IsStringIteratorLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ this, factory()->string_iterator_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsStringIteratorLookupChainIntact());
+}
+
void Isolate::InvalidateArrayBufferNeuteringProtector() {
DCHECK(factory()->array_buffer_neutering_protector()->value()->IsSmi());
DCHECK(IsArrayBufferNeuteringIntact());
@@ -3643,7 +3855,7 @@ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
-Handle<Symbol> Isolate::SymbolFor(Heap::RootListIndex dictionary_index,
+Handle<Symbol> Isolate::SymbolFor(RootIndex dictionary_index,
Handle<String> name, bool private_symbol) {
Handle<String> key = factory()->InternalizeString(name);
Handle<NameDictionary> dictionary =
@@ -3657,14 +3869,14 @@ Handle<Symbol> Isolate::SymbolFor(Heap::RootListIndex dictionary_index,
dictionary = NameDictionary::Add(this, dictionary, key, symbol,
PropertyDetails::Empty(), &entry);
switch (dictionary_index) {
- case Heap::kPublicSymbolTableRootIndex:
+ case RootIndex::kPublicSymbolTable:
symbol->set_is_public(true);
heap()->set_public_symbol_table(*dictionary);
break;
- case Heap::kApiSymbolTableRootIndex:
+ case RootIndex::kApiSymbolTable:
heap()->set_api_symbol_table(*dictionary);
break;
- case Heap::kApiPrivateSymbolTableRootIndex:
+ case RootIndex::kApiPrivateSymbolTable:
heap()->set_api_private_symbol_table(*dictionary);
break;
default:
@@ -3725,7 +3937,7 @@ void Isolate::FireCallCompletedCallback() {
if (!handle_scope_implementer()->CallDepthIsZero()) return;
bool run_microtasks =
- pending_microtask_count() &&
+ heap()->default_microtask_queue()->pending_microtask_count() &&
!handle_scope_implementer()->HasMicrotasksSuppressions() &&
handle_scope_implementer()->microtasks_policy() ==
v8::MicrotasksPolicy::kAuto;
@@ -3829,6 +4041,29 @@ void Isolate::SetHostInitializeImportMetaObjectCallback(
host_initialize_import_meta_object_callback_ = callback;
}
+MaybeHandle<Object> Isolate::RunPrepareStackTraceCallback(
+ Handle<Context> context, Handle<JSObject> error) {
+ v8::Local<v8::Context> api_context = Utils::ToLocal(context);
+
+ v8::Local<StackTrace> trace =
+ Utils::StackTraceToLocal(GetDetailedStackTrace(error));
+
+ v8::Local<v8::Value> stack;
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ this, stack,
+ prepare_stack_trace_callback_(api_context, Utils::ToLocal(error), trace),
+ MaybeHandle<Object>());
+ return Utils::OpenHandle(*stack);
+}
+
+void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
+ prepare_stack_trace_callback_ = callback;
+}
+
+bool Isolate::HasPrepareStackTraceCallback() const {
+ return prepare_stack_trace_callback_ != nullptr;
+}
+
void Isolate::SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
void* data) {
atomics_wait_callback_ = callback;
@@ -3945,18 +4180,9 @@ void Isolate::ReportPromiseReject(Handle<JSPromise> promise,
}
void Isolate::EnqueueMicrotask(Handle<Microtask> microtask) {
- Handle<FixedArray> queue(heap()->microtask_queue(), this);
- int num_tasks = pending_microtask_count();
- DCHECK_LE(num_tasks, queue->length());
- if (num_tasks == queue->length()) {
- queue = factory()->CopyFixedArrayAndGrow(queue, std::max(num_tasks, 8));
- heap()->set_microtask_queue(*queue);
- }
- DCHECK_LE(8, queue->length());
- DCHECK_LT(num_tasks, queue->length());
- DCHECK(queue->get(num_tasks)->IsUndefined(this));
- queue->set(num_tasks, *microtask);
- set_pending_microtask_count(num_tasks + 1);
+ Handle<MicrotaskQueue> microtask_queue(heap()->default_microtask_queue(),
+ this);
+ MicrotaskQueue::EnqueueMicrotask(this, microtask_queue, microtask);
}
@@ -3964,25 +4190,27 @@ void Isolate::RunMicrotasks() {
// Increase call depth to prevent recursive callbacks.
v8::Isolate::SuppressMicrotaskExecutionScope suppress(
reinterpret_cast<v8::Isolate*>(this));
- if (pending_microtask_count()) {
+ HandleScope scope(this);
+ Handle<MicrotaskQueue> microtask_queue(heap()->default_microtask_queue(),
+ this);
+ if (microtask_queue->pending_microtask_count()) {
is_running_microtasks_ = true;
TRACE_EVENT0("v8.execute", "RunMicrotasks");
TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
- HandleScope scope(this);
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> maybe_result = Execution::RunMicrotasks(
this, Execution::MessageHandling::kReport, &maybe_exception);
// If execution is terminating, bail out, clean up, and propagate to
// TryCatch scope.
if (maybe_result.is_null() && maybe_exception.is_null()) {
- heap()->set_microtask_queue(ReadOnlyRoots(heap()).empty_fixed_array());
- set_pending_microtask_count(0);
+ microtask_queue->set_queue(ReadOnlyRoots(heap()).empty_fixed_array());
+ microtask_queue->set_pending_microtask_count(0);
handle_scope_implementer()->LeaveMicrotaskContext();
SetTerminationOnExternalTryCatch();
}
- CHECK_EQ(0, pending_microtask_count());
- CHECK_EQ(0, heap()->microtask_queue()->length());
+ CHECK_EQ(0, microtask_queue->pending_microtask_count());
+ CHECK_EQ(0, microtask_queue->queue()->length());
is_running_microtasks_ = false;
}
FireMicrotasksCompletedCallback();
@@ -4007,10 +4235,17 @@ void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
}
}
-std::string Isolate::GetTurboCfgFileName() {
+// static
+std::string Isolate::GetTurboCfgFileName(Isolate* isolate) {
if (FLAG_trace_turbo_cfg_file == nullptr) {
std::ostringstream os;
- os << "turbo-" << base::OS::GetCurrentProcessId() << "-" << id() << ".cfg";
+ os << "turbo-" << base::OS::GetCurrentProcessId() << "-";
+ if (isolate != nullptr) {
+ os << isolate->id();
+ } else {
+ os << "any";
+ }
+ os << ".cfg";
return os.str();
} else {
return FLAG_trace_turbo_cfg_file;
@@ -4037,10 +4272,10 @@ void Isolate::CheckDetachedContextsAfterGC() {
if (length == 0) return;
int new_length = 0;
for (int i = 0; i < length; i += 2) {
- int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->ToSmi());
+ int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->cast<Smi>());
MaybeObject* context = detached_contexts->Get(i + 1);
- DCHECK(context->IsWeakHeapObject() || context->IsClearedWeakHeapObject());
- if (!context->IsClearedWeakHeapObject()) {
+ DCHECK(context->IsWeakOrCleared());
+ if (!context->IsCleared()) {
detached_contexts->Set(
new_length, MaybeObject::FromSmi(Smi::FromInt(mark_sweeps + 1)));
detached_contexts->Set(new_length + 1, context);
@@ -4057,9 +4292,9 @@ void Isolate::CheckDetachedContextsAfterGC() {
PrintF("%d detached contexts are collected out of %d\n",
length - new_length, length);
for (int i = 0; i < new_length; i += 2) {
- int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->ToSmi());
+ int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->cast<Smi>());
MaybeObject* context = detached_contexts->Get(i + 1);
- DCHECK(context->IsWeakHeapObject() || context->IsClearedWeakHeapObject());
+ DCHECK(context->IsWeakOrCleared());
if (mark_sweeps > 3) {
PrintF("detached context %p\n survived %d GCs (leak?)\n",
static_cast<void*>(context), mark_sweeps);
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index efd479c41e..ad124586cc 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -12,6 +12,7 @@
#include <vector>
#include "include/v8-inspector.h"
+#include "include/v8-internal.h"
#include "include/v8.h"
#include "src/allocation.h"
#include "src/base/atomicops.h"
@@ -58,42 +59,32 @@ namespace heap {
class HeapTester;
} // namespace heap
-class AccessCompilerData;
class AddressToIndexHashMap;
class AstStringConstants;
class Bootstrapper;
class BuiltinsConstantsTableBuilder;
class CancelableTaskManager;
class CodeEventDispatcher;
-class ExternalCodeEventListener;
-class CodeGenerator;
-class CodeRange;
-class CodeStubDescriptor;
class CodeTracer;
class CompilationCache;
class CompilationStatistics;
class CompilerDispatcher;
class ContextSlotCache;
class Counters;
-class CpuFeatures;
class Debug;
class DeoptimizerData;
class DescriptorLookupCache;
-class EmptyStatement;
class EternalHandles;
class ExternalCallbackScope;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
-class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
-class InstructionStream;
class Logger;
class MaterializedObjectStore;
class Microtask;
class OptimizingCompileDispatcher;
class PromiseOnStack;
-class Redirection;
class RegExpStack;
class RootVisitor;
class RuntimeProfiler;
@@ -102,10 +93,7 @@ class SetupIsolateDelegate;
class Simulator;
class StartupDeserializer;
class StandardFrame;
-class StatsTable;
-class StringTracker;
class StubCache;
-class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
@@ -119,6 +107,10 @@ namespace interpreter {
class Interpreter;
}
+namespace compiler {
+class PerIsolateCompilerCache;
+}
+
namespace wasm {
class WasmEngine;
}
@@ -395,8 +387,7 @@ class ThreadId {
inline void set_##name(type v) { name##_ = v; } \
inline type name() const { return name##_; }
-
-class ThreadLocalTop BASE_EMBEDDED {
+class ThreadLocalTop {
public:
// Does early low-level initialization that does not depend on the
// isolate being present.
@@ -435,9 +426,6 @@ class ThreadLocalTop BASE_EMBEDDED {
Context* context_ = nullptr;
ThreadId thread_id_ = ThreadId::Invalid();
Object* pending_exception_ = nullptr;
- // TODO(kschimpf): Change this to a stack of caught exceptions (rather than
- // just innermost catching try block).
- Object* wasm_caught_exception_ = nullptr;
// Communication channel between Isolate::FindHandler and the CEntry.
Context* pending_handler_context_ = nullptr;
@@ -531,7 +519,6 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(const intptr_t*, api_external_references, nullptr) \
V(AddressToIndexHashMap*, external_reference_map, nullptr) \
V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
- V(int, pending_microtask_count, 0) \
V(CompilationStatistics*, turbo_statistics, nullptr) \
V(CodeTracer*, code_tracer, nullptr) \
V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
@@ -553,8 +540,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(int, last_console_context_id, 0) \
V(v8_inspector::V8Inspector*, inspector, nullptr) \
V(bool, next_v8_call_is_safe_for_termination, false) \
- V(bool, only_terminate_in_safe_scope, false) \
- V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info)
+ V(bool, only_terminate_in_safe_scope, false)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
@@ -671,6 +657,8 @@ class Isolate : private HiddenFactory {
void ClearSerializerData();
+ bool LogObjectRelocation();
+
// Find the PerThread for this particular (isolate, thread) combination
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
@@ -717,11 +705,6 @@ class Isolate : private HiddenFactory {
inline void set_pending_exception(Object* exception_obj);
inline void clear_pending_exception();
- // Interface to wasm caught exception.
- inline Object* get_wasm_caught_exception();
- inline void set_wasm_caught_exception(Object* exception);
- inline void clear_wasm_caught_exception();
-
bool AreWasmThreadsEnabled(Handle<Context> context);
THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
@@ -758,7 +741,6 @@ class Isolate : private HiddenFactory {
bool IsExternalHandlerOnTop(Object* exception);
inline bool is_catchable_by_javascript(Object* exception);
- bool is_catchable_by_wasm(Object* exception);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
@@ -1016,6 +998,12 @@ class Isolate : private HiddenFactory {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
+
+ // kRootRegister may be used to address any location that falls into this
+ // region. Fields outside this region are not guaranteed to live at a static
+ // offset from kRootRegister.
+ inline base::AddressRegion root_register_addressable_region();
+
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
@@ -1103,6 +1091,7 @@ class Isolate : private HiddenFactory {
v8::internal::Factory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
// undefined behavior (as static_cast cannot cast across private bases).
+ // NOLINTNEXTLINE (google-readability-casting)
return (v8::internal::Factory*)this; // NOLINT(readability/casting)
}
@@ -1193,6 +1182,7 @@ class Isolate : private HiddenFactory {
}
#ifdef V8_INTL_SUPPORT
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
icu::RegexMatcher* language_singleton_regexp_matcher() {
return language_singleton_regexp_matcher_;
}
@@ -1204,6 +1194,7 @@ class Isolate : private HiddenFactory {
icu::RegexMatcher* language_variant_regexp_matcher() {
return language_variant_regexp_matcher_;
}
+#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
const std::string& default_locale() { return default_locale_; }
@@ -1212,6 +1203,7 @@ class Isolate : private HiddenFactory {
default_locale_ = locale;
}
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
void set_language_tag_regexp_matchers(
icu::RegexMatcher* language_singleton_regexp_matcher,
icu::RegexMatcher* language_tag_regexp_matcher,
@@ -1223,6 +1215,7 @@ class Isolate : private HiddenFactory {
language_tag_regexp_matcher_ = language_tag_regexp_matcher;
language_variant_regexp_matcher_ = language_variant_regexp_matcher;
}
+#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
#endif // V8_INTL_SUPPORT
static const int kProtectorValid = 1;
@@ -1244,6 +1237,18 @@ class Isolate : private HiddenFactory {
inline bool IsStringLengthOverflowIntact();
inline bool IsArrayIteratorLookupChainIntact();
+ // The StringIteratorProtector protects the original string iterating behavior
+ // for primitive strings. As long as the StringIteratorProtector is valid,
+ // iterating over a primitive string is guaranteed to be unobservable from
+ // user code and can thus be cut short. More specifically, the protector gets
+ // invalidated as soon as either String.prototype[Symbol.iterator] or
+ // String.prototype[Symbol.iterator]().next is modified. This guarantee does
+ // not apply to string objects (as opposed to primitives), since they could
+ // define their own Symbol.iterator.
+ // String.prototype itself does not need to be protected, since it is
+ // non-configurable and non-writable.
+ inline bool IsStringIteratorLookupChainIntact();
+
// Make sure we do check for neutered array buffers.
inline bool IsArrayBufferNeuteringIntact();
@@ -1284,6 +1289,7 @@ class Isolate : private HiddenFactory {
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
void InvalidateArrayIteratorProtector();
+ void InvalidateStringIteratorProtector();
void InvalidateArrayBufferNeuteringProtector();
V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
void InvalidatePromiseResolveProtector();
@@ -1332,6 +1338,10 @@ class Isolate : private HiddenFactory {
bool force_slow_path() const { return force_slow_path_; }
bool* force_slow_path_address() { return &force_slow_path_; }
+ DebugInfo::ExecutionMode* debug_execution_mode_address() {
+ return &debug_execution_mode_;
+ }
+
V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
V8_EXPORT_PRIVATE base::RandomNumberGenerator* fuzzer_rng();
@@ -1374,13 +1384,13 @@ class Isolate : private HiddenFactory {
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
- Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
- Handle<String> name, bool private_symbol);
+ Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
+ bool private_symbol);
void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
void CountUsage(v8::Isolate::UseCounterFeature feature);
- std::string GetTurboCfgFileName();
+ static std::string GetTurboCfgFileName(Isolate* isolate);
#if V8_SFI_HAS_UNIQUE_ID
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
@@ -1398,10 +1408,6 @@ class Isolate : private HiddenFactory {
return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
}
- Address pending_microtask_count_address() {
- return reinterpret_cast<Address>(&pending_microtask_count_);
- }
-
Address handle_scope_implementer_address() {
return reinterpret_cast<Address>(&handle_scope_implementer_);
}
@@ -1472,6 +1478,15 @@ class Isolate : private HiddenFactory {
interpreter::Interpreter* interpreter() const { return interpreter_; }
+ compiler::PerIsolateCompilerCache* compiler_cache() const {
+ return compiler_cache_;
+ }
+ void set_compiler_utils(compiler::PerIsolateCompilerCache* cache,
+ Zone* zone) {
+ compiler_cache_ = cache;
+ compiler_zone_ = zone;
+ }
+
AccountingAllocator* allocator() { return allocator_; }
CompilerDispatcher* compiler_dispatcher() const {
@@ -1490,6 +1505,11 @@ class Isolate : private HiddenFactory {
Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
Handle<Module> module);
+ void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
+ MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
+ Handle<JSObject> Error);
+ bool HasPrepareStackTraceCallback() const;
+
void SetRAILMode(RAILMode rail_mode);
RAILMode rail_mode() { return rail_mode_.Value(); }
@@ -1525,10 +1545,7 @@ class Isolate : private HiddenFactory {
}
wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
- void set_wasm_engine(std::shared_ptr<wasm::WasmEngine> engine) {
- DCHECK_NULL(wasm_engine_); // Only call once before {Init}.
- wasm_engine_ = std::move(engine);
- }
+ void SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine);
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
return top_backup_incumbent_scope_;
@@ -1556,8 +1573,7 @@ class Isolate : private HiddenFactory {
class ThreadDataTable {
public:
- ThreadDataTable();
- ~ThreadDataTable();
+ ThreadDataTable() = default;
PerIsolateThreadData* Lookup(ThreadId thread_id);
void Insert(PerIsolateThreadData* data);
@@ -1718,9 +1734,11 @@ class Isolate : private HiddenFactory {
double load_start_time_ms_;
#ifdef V8_INTL_SUPPORT
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
icu::RegexMatcher* language_singleton_regexp_matcher_;
icu::RegexMatcher* language_tag_regexp_matcher_;
icu::RegexMatcher* language_variant_regexp_matcher_;
+#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
std::string default_locale_;
#endif // V8_INTL_SUPPORT
@@ -1762,6 +1780,9 @@ class Isolate : private HiddenFactory {
interpreter::Interpreter* interpreter_;
+ compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
+ Zone* compiler_zone_ = nullptr;
+
CompilerDispatcher* compiler_dispatcher_;
typedef std::pair<InterruptCallback, void*> InterruptEntry;
@@ -1856,6 +1877,8 @@ class Isolate : private HiddenFactory {
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
nullptr;
+ PrepareStackTraceCallback prepare_stack_trace_callback_ = nullptr;
+
// TODO(kenton@cloudflare.com): This mutex can be removed if
// thread_data_table_ is always accessed under the isolate lock. I do not
// know if this is the case, so I'm preserving it for now.
@@ -1901,7 +1924,7 @@ class PromiseOnStack {
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
-class SaveContext BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE SaveContext {
public:
explicit SaveContext(Isolate* isolate);
~SaveContext();
@@ -1919,8 +1942,7 @@ class SaveContext BASE_EMBEDDED {
Address c_entry_fp_;
};
-
-class AssertNoContextChange BASE_EMBEDDED {
+class AssertNoContextChange {
#ifdef DEBUG
public:
explicit AssertNoContextChange(Isolate* isolate);
@@ -1937,8 +1959,7 @@ class AssertNoContextChange BASE_EMBEDDED {
#endif
};
-
-class ExecutionAccess BASE_EMBEDDED {
+class ExecutionAccess {
public:
explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
Lock(isolate);
@@ -1958,7 +1979,7 @@ class ExecutionAccess BASE_EMBEDDED {
// Support for checking for stack-overflows.
-class StackLimitCheck BASE_EMBEDDED {
+class StackLimitCheck {
public:
explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
@@ -2034,7 +2055,7 @@ class PostponeInterruptsScope : public InterruptsScope {
int intercept_mask = StackGuard::ALL_INTERRUPTS)
: InterruptsScope(isolate, intercept_mask,
InterruptsScope::kPostponeInterrupts) {}
- virtual ~PostponeInterruptsScope() = default;
+ ~PostponeInterruptsScope() override = default;
};
// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
@@ -2046,7 +2067,7 @@ class SafeForInterruptsScope : public InterruptsScope {
int intercept_mask = StackGuard::ALL_INTERRUPTS)
: InterruptsScope(isolate, intercept_mask,
InterruptsScope::kRunInterrupts) {}
- virtual ~SafeForInterruptsScope() = default;
+ ~SafeForInterruptsScope() override = default;
};
class StackTraceFailureMessage {
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index fe02afceea..16b140bb38 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -13,19 +13,12 @@
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
-var MathMax = global.Math.max;
-var MathMin = global.Math.min;
-var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
var ObjectToString = global.Object.prototype.toString;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
// -------------------------------------------------------------------
-macro IS_PROXY(arg)
-(%_IsJSProxy(arg))
-endmacro
-
macro INVERT_NEG_ZERO(arg)
((arg) + 0)
endmacro
@@ -201,140 +194,6 @@ function ConvertToString(use_locale, x, locales, options) {
}
-// This function implements the optimized splice implementation that can use
-// special array operations to handle sparse arrays in a sensible fashion.
-function SparseSlice(array, start_i, del_count, len, deleted_elements) {
- // Move deleted elements to a new array (the return value from splice).
- var indices = %GetArrayKeys(array, start_i + del_count);
- if (IS_NUMBER(indices)) {
- var limit = indices;
- for (var i = start_i; i < limit; ++i) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- %CreateDataProperty(deleted_elements, i - start_i, current);
- }
- }
- } else {
- var length = indices.length;
- for (var k = 0; k < length; ++k) {
- var key = indices[k];
- if (key >= start_i) {
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- %CreateDataProperty(deleted_elements, key - start_i, current);
- }
- }
- }
- }
-}
-
-
-// This function implements the optimized splice implementation that can use
-// special array operations to handle sparse arrays in a sensible fashion.
-function SparseMove(array, start_i, del_count, len, num_additional_args) {
- // Bail out if no moving is necessary.
- if (num_additional_args === del_count) return;
- // Move data to new array.
- var new_array = new InternalArray(
- // Clamp array length to 2^32-1 to avoid early RangeError.
- MathMin(len - del_count + num_additional_args, 0xffffffff));
- var big_indices;
- var indices = %GetArrayKeys(array, len);
- if (IS_NUMBER(indices)) {
- var limit = indices;
- for (var i = 0; i < start_i && i < limit; ++i) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- new_array[i] = current;
- }
- }
- for (var i = start_i + del_count; i < limit; ++i) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- new_array[i - del_count + num_additional_args] = current;
- }
- }
- } else {
- var length = indices.length;
- for (var k = 0; k < length; ++k) {
- var key = indices[k];
- if (key < start_i) {
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- new_array[key] = current;
- }
- } else if (key >= start_i + del_count) {
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- var new_key = key - del_count + num_additional_args;
- new_array[new_key] = current;
- if (new_key > 0xfffffffe) {
- big_indices = big_indices || new InternalArray();
- big_indices.push(new_key);
- }
- }
- }
- }
- }
- // Move contents of new_array into this array
- %MoveArrayContents(new_array, array);
- // Add any moved values that aren't elements anymore.
- if (!IS_UNDEFINED(big_indices)) {
- var length = big_indices.length;
- for (var i = 0; i < length; ++i) {
- var key = big_indices[i];
- array[key] = new_array[key];
- }
- }
-}
-
-
-// This is part of the old simple-minded splice. We are using it either
-// because the receiver is not an array (so we have no choice) or because we
-// know we are not deleting or moving a lot of elements.
-function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
- for (var i = 0; i < del_count; i++) {
- var index = start_i + i;
- if (index in array) {
- var current = array[index];
- %CreateDataProperty(deleted_elements, i, current);
- }
- }
-}
-
-
-function SimpleMove(array, start_i, del_count, len, num_additional_args) {
- if (num_additional_args !== del_count) {
- // Move the existing elements after the elements to be deleted
- // to the right position in the resulting array.
- if (num_additional_args > del_count) {
- for (var i = len - del_count; i > start_i; i--) {
- var from_index = i + del_count - 1;
- var to_index = i + num_additional_args - 1;
- if (from_index in array) {
- array[to_index] = array[from_index];
- } else {
- delete array[to_index];
- }
- }
- } else {
- for (var i = start_i; i < len - del_count; i++) {
- var from_index = i + del_count;
- var to_index = i + num_additional_args;
- if (from_index in array) {
- array[to_index] = array[from_index];
- } else {
- delete array[to_index];
- }
- }
- for (var i = len; i > len - del_count + num_additional_args; i--) {
- delete array[i - 1];
- }
- }
- }
-}
-
-
// -------------------------------------------------------------------
var ArrayJoin;
@@ -408,55 +267,6 @@ DEFINE_METHOD(
);
-function ArrayShiftFallback() {
- var array = TO_OBJECT(this);
- var len = TO_LENGTH(array.length);
-
- if (len === 0) {
- array.length = 0;
- return;
- }
-
- var first = array[0];
-
- if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
- SparseMove(array, 0, 1, len, 0);
- } else {
- SimpleMove(array, 0, 1, len, 0);
- }
-
- array.length = len - 1;
-
- return first;
-}
-
-
-function ArrayUnshiftFallback(arg1) { // length == 1
- var array = TO_OBJECT(this);
- var len = TO_LENGTH(array.length);
- var num_arguments = arguments.length;
-
- const new_len = len + num_arguments;
- if (num_arguments > 0) {
- if (new_len >= 2**53) throw %make_type_error(kInvalidArrayLength);
-
- if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
- !%object_is_sealed(array)) {
- SparseMove(array, 0, 0, len, num_arguments);
- } else {
- SimpleMove(array, 0, 0, len, num_arguments);
- }
-
- for (var i = 0; i < num_arguments; i++) {
- array[i] = arguments[i];
- }
- }
-
- array.length = new_len;
- return new_len;
-}
-
-
// Oh the humanity... don't remove the following function because js2c for some
// reason gets symbol minifiation wrong if it's not there. Instead of spending
// the time fixing js2c (which will go away when all of the internal .js runtime
@@ -465,83 +275,6 @@ function ArraySliceFallback(start, end) {
return null;
}
-function ComputeSpliceStartIndex(start_i, len) {
- if (start_i < 0) {
- start_i += len;
- return start_i < 0 ? 0 : start_i;
- }
-
- return start_i > len ? len : start_i;
-}
-
-
-function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for
- // compatibility.
- var del_count = 0;
- if (num_arguments == 1)
- return len - start_i;
-
- del_count = TO_INTEGER(delete_count);
- if (del_count < 0)
- return 0;
-
- if (del_count > len - start_i)
- return len - start_i;
-
- return del_count;
-}
-
-
-function ArraySpliceFallback(start, delete_count) {
- var num_arguments = arguments.length;
- var array = TO_OBJECT(this);
- var len = TO_LENGTH(array.length);
- var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
- var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
- start_i);
- var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
-
- const new_len = len - del_count + num_elements_to_add;
- if (new_len >= 2**53) throw %make_type_error(kInvalidArrayLength);
-
- var deleted_elements = ArraySpeciesCreate(array, del_count);
- deleted_elements.length = del_count;
-
- var changed_elements = del_count;
- if (num_elements_to_add != del_count) {
- // If the slice needs to do a actually move elements after the insertion
- // point, then include those in the estimate of changed elements.
- changed_elements += len - start_i - del_count;
- }
- if (UseSparseVariant(array, len, IS_ARRAY(array), changed_elements)) {
- %NormalizeElements(array);
- if (IS_ARRAY(deleted_elements)) %NormalizeElements(deleted_elements);
- SparseSlice(array, start_i, del_count, len, deleted_elements);
- SparseMove(array, start_i, del_count, len, num_elements_to_add);
- } else {
- SimpleSlice(array, start_i, del_count, len, deleted_elements);
- SimpleMove(array, start_i, del_count, len, num_elements_to_add);
- }
-
- // Insert the arguments into the resulting array in
- // place of the deleted elements.
- var i = start_i;
- var arguments_index = 2;
- var arguments_length = arguments.length;
- while (arguments_index < arguments_length) {
- array[i++] = arguments[arguments_index++];
- }
- array.length = new_len;
-
- // Return the deleted elements.
- return deleted_elements;
-}
-
-
function InnerArraySort(array, length, comparefn) {
// In-place QuickSort algorithm.
// For short (length <= 10) arrays, insertion sort is used for efficiency.
@@ -700,56 +433,6 @@ function InnerArraySort(array, length, comparefn) {
}
-DEFINE_METHOD_LEN(
- GlobalArray.prototype,
- lastIndexOf(element, index) {
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(this.length);
-
- if (length == 0) return -1;
- if (arguments.length < 2) {
- index = length - 1;
- } else {
- index = INVERT_NEG_ZERO(TO_INTEGER(index));
- // If index is negative, index from end of the array.
- if (index < 0) index += length;
- // If index is still negative, do not search the array.
- if (index < 0) return -1;
- else if (index >= length) index = length - 1;
- }
- var min = 0;
- var max = index;
- if (UseSparseVariant(array, length, IS_ARRAY(array), index)) {
- %NormalizeElements(array);
- var indices = %GetArrayKeys(array, index + 1);
- if (IS_NUMBER(indices)) {
- // It's an interval.
- max = indices; // Capped by index already.
- // Fall through to loop below.
- } else {
- if (indices.length == 0) return -1;
- // Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(array, indices);
- var i = sortedKeys.length - 1;
- while (i >= 0) {
- var key = sortedKeys[i];
- if (array[key] === element) return key;
- i--;
- }
- return -1;
- }
- }
- // Lookup through the array.
- for (var i = max; i >= min; i--) {
- if (i in array && array[i] === element) return i;
- }
- return -1;
- },
- 1 /* Set function length */
-);
-
-
-
// Set up unscopable properties on the Array.prototype object.
var unscopables = {
__proto__: null,
@@ -827,10 +510,6 @@ utils.Export(function(to) {
"array_for_each_iterator", ArrayForEach,
"array_keys_iterator", ArrayKeys,
"array_values_iterator", ArrayValues,
- // Fallback implementations of Array builtins.
- "array_shift", ArrayShiftFallback,
- "array_splice", ArraySpliceFallback,
- "array_unshift", ArrayUnshiftFallback,
]);
});
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
index db4d45c563..43119a490d 100644
--- a/deps/v8/src/js/intl.js
+++ b/deps/v8/src/js/intl.js
@@ -34,7 +34,6 @@ var InternalArray = utils.InternalArray;
var MathMax = global.Math.max;
var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
var ObjectKeys = global.Object.keys;
-var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var StringSubstr = GlobalString.prototype.substr;
var StringSubstring = GlobalString.prototype.substring;
@@ -44,77 +43,6 @@ utils.Import(function(from) {
ArrayPush = from.ArrayPush;
});
-// Utilities for definitions
-
-macro NUMBER_IS_NAN(arg)
-(%IS_VAR(arg) !== arg)
-endmacro
-
-// To avoid ES2015 Function name inference.
-
-macro ANONYMOUS_FUNCTION(fn)
-(0, (fn))
-endmacro
-
-/**
- * Adds bound method to the prototype of the given object.
- */
-function AddBoundMethod(obj, methodName, implementation, length, type,
- compat) {
- %CheckIsBootstrapping();
- var internalName = %CreatePrivateSymbol(methodName);
-
- DEFINE_METHOD(
- obj.prototype,
- get [methodName]() {
- if(!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
- }
- var receiver = %IntlUnwrapReceiver(this, type, obj, methodName, compat);
- if (IS_UNDEFINED(receiver[internalName])) {
- var boundMethod;
- if (IS_UNDEFINED(length) || length === 2) {
- boundMethod =
- ANONYMOUS_FUNCTION((fst, snd) => implementation(receiver, fst, snd));
- } else if (length === 1) {
- boundMethod = ANONYMOUS_FUNCTION(fst => implementation(receiver, fst));
- } else {
- boundMethod = ANONYMOUS_FUNCTION((...args) => {
- // DateTimeFormat.format needs to be 0 arg method, but can still
- // receive an optional dateValue param. If one was provided, pass it
- // along.
- if (args.length > 0) {
- return implementation(receiver, args[0]);
- } else {
- return implementation(receiver);
- }
- });
- }
- %SetNativeFlag(boundMethod);
- receiver[internalName] = boundMethod;
- }
- return receiver[internalName];
- }
- );
-}
-
-function IntlConstruct(receiver, constructor, create, newTarget, args,
- compat) {
- var locales = args[0];
- var options = args[1];
-
- var instance = create(locales, options);
-
- if (compat && IS_UNDEFINED(newTarget) && receiver instanceof constructor) {
- %object_define_property(receiver, IntlFallbackSymbol, { value: instance });
- return receiver;
- }
-
- return instance;
-}
-
-
-
// -------------------------------------------------------------------
/**
@@ -129,6 +57,7 @@ var AVAILABLE_LOCALES = {
'pluralrules': UNDEFINED,
'relativetimeformat': UNDEFINED,
'listformat': UNDEFINED,
+ 'segmenter': UNDEFINED,
};
/**
@@ -156,18 +85,6 @@ function GetAnyExtensionRE() {
}
/**
- * Replace quoted text (single quote, anything but the quote and quote again).
- */
-var QUOTED_STRING_RE = UNDEFINED;
-
-function GetQuotedStringRE() {
- if (IS_UNDEFINED(QUOTED_STRING_RE)) {
- QUOTED_STRING_RE = new GlobalRegExp("'[^']+'", 'g');
- }
- return QUOTED_STRING_RE;
-}
-
-/**
* Matches valid service name.
*/
var SERVICE_RE = UNDEFINED;
@@ -181,42 +98,6 @@ function GetServiceRE() {
}
/**
- * Matches valid IANA time zone names.
- */
-var TIMEZONE_NAME_CHECK_RE = UNDEFINED;
-var GMT_OFFSET_TIMEZONE_NAME_CHECK_RE = UNDEFINED;
-
-function GetTimezoneNameCheckRE() {
- if (IS_UNDEFINED(TIMEZONE_NAME_CHECK_RE)) {
- TIMEZONE_NAME_CHECK_RE = new GlobalRegExp(
- '^([A-Za-z]+)/([A-Za-z_-]+)((?:\/[A-Za-z_-]+)+)*$');
- }
- return TIMEZONE_NAME_CHECK_RE;
-}
-
-function GetGMTOffsetTimezoneNameCheckRE() {
- if (IS_UNDEFINED(GMT_OFFSET_TIMEZONE_NAME_CHECK_RE)) {
- GMT_OFFSET_TIMEZONE_NAME_CHECK_RE = new GlobalRegExp(
- '^(?:ETC/GMT)(?<offset>0|[+-](?:[0-9]|1[0-4]))$');
- }
- return GMT_OFFSET_TIMEZONE_NAME_CHECK_RE;
-}
-
-/**
- * Matches valid location parts of IANA time zone names.
- */
-var TIMEZONE_NAME_LOCATION_PART_RE = UNDEFINED;
-
-function GetTimezoneNameLocationPartRE() {
- if (IS_UNDEFINED(TIMEZONE_NAME_LOCATION_PART_RE)) {
- TIMEZONE_NAME_LOCATION_PART_RE =
- new GlobalRegExp('^([A-Za-z]+)((?:[_-][A-Za-z]+)+)*$');
- }
- return TIMEZONE_NAME_LOCATION_PART_RE;
-}
-
-
-/**
* Returns a getOption function that extracts property value for given
* options object. If property is missing it returns defaultValue. If value
* is out of range for that property it throws RangeError.
@@ -391,69 +272,6 @@ function bestFitMatcher(service, requestedLocales) {
}
/**
- * Populates internalOptions object with boolean key-value pairs
- * from extensionMap and options.
- * Returns filtered extension (number and date format constructors use
- * Unicode extensions for passing parameters to ICU).
- * It's used for extension-option pairs only, e.g. kn-normalization, but not
- * for 'sensitivity' since it doesn't have extension equivalent.
- * Extensions like nu and ca don't have options equivalent, so we place
- * undefined in the map.property to denote that.
- */
-function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
- var extension = '';
-
- var updateExtension = function updateExtension(key, value) {
- return '-' + key + '-' + TO_STRING(value);
- }
-
- var updateProperty = function updateProperty(property, type, value) {
- if (type === 'boolean' && (typeof value === 'string')) {
- value = (value === 'true') ? true : false;
- }
-
- if (!IS_UNDEFINED(property)) {
- %DefineWEProperty(outOptions, property, value);
- }
- }
-
- for (var key in keyValues) {
- if (HAS_OWN_PROPERTY(keyValues, key)) {
- var value = UNDEFINED;
- var map = keyValues[key];
- if (!IS_UNDEFINED(map.property)) {
- // This may return true if user specifies numeric: 'false', since
- // Boolean('nonempty') === true.
- value = getOption(map.property, map.type, map.values);
- }
- if (!IS_UNDEFINED(value)) {
- updateProperty(map.property, map.type, value);
- extension += updateExtension(key, value);
- continue;
- }
- // User options didn't have it, check Unicode extension.
- // Here we want to convert strings 'true', 'false' into proper Boolean
- // values (not a user error).
- if (HAS_OWN_PROPERTY(extensionMap, key)) {
- value = extensionMap[key];
- if (!IS_UNDEFINED(value)) {
- updateProperty(map.property, map.type, value);
- extension += updateExtension(key, value);
- } else if (map.type === 'boolean') {
- // Boolean keys are allowed not to have values in Unicode extension.
- // Those default to true.
- updateProperty(map.property, map.type, true);
- extension += updateExtension(key, true);
- }
- }
- }
- }
-
- return extension === ''? '' : '-u' + extension;
-}
-
-
-/**
* Given an array-like, outputs an Array with the numbered
* properties copied over and defined
* configurable: false, writable: false, enumerable: true.
@@ -513,66 +331,6 @@ function getAvailableLocalesOf(service) {
return available;
}
-
-/**
- * Defines a property and sets writable, enumerable and configurable to true.
- */
-function defineWECProperty(object, property, value) {
- %object_define_property(object, property, {value: value,
- writable: true,
- enumerable: true,
- configurable: true});
-}
-
-
-/**
- * Adds property to an object if the value is not undefined.
- * Sets all descriptors to true.
- */
-function addWECPropertyIfDefined(object, property, value) {
- if (!IS_UNDEFINED(value)) {
- defineWECProperty(object, property, value);
- }
-}
-
-
-/**
- * Returns titlecased word, aMeRricA -> America.
- */
-function toTitleCaseWord(word) {
- return %StringToUpperCaseIntl(%_Call(StringSubstr, word, 0, 1)) +
- %StringToLowerCaseIntl(%_Call(StringSubstr, word, 1));
-}
-
-/**
- * Returns titlecased location, bueNos_airES -> Buenos_Aires
- * or ho_cHi_minH -> Ho_Chi_Minh. It is locale-agnostic and only
- * deals with ASCII only characters.
- * 'of', 'au' and 'es' are special-cased and lowercased.
- */
-function toTitleCaseTimezoneLocation(location) {
- var match = %regexp_internal_match(GetTimezoneNameLocationPartRE(), location)
- if (IS_NULL(match)) throw %make_range_error(kExpectedLocation, location);
-
- var result = toTitleCaseWord(match[1]);
- if (!IS_UNDEFINED(match[2]) && 2 < match.length) {
- // The first character is a separator, '_' or '-'.
- // None of IANA zone names has both '_' and '-'.
- var separator = %_Call(StringSubstring, match[2], 0, 1);
- var parts = %StringSplit(match[2], separator, kMaxUint32);
- for (var i = 1; i < parts.length; i++) {
- var part = parts[i]
- var lowercasedPart = %StringToLowerCaseIntl(part);
- result = result + separator +
- ((lowercasedPart !== 'es' &&
- lowercasedPart !== 'of' && lowercasedPart !== 'au') ?
- toTitleCaseWord(part) : lowercasedPart);
- }
- }
- return result;
-}
-
-
/**
* Returns an InternalArray where all locales are canonicalized and duplicates
* removed.
@@ -627,797 +385,6 @@ DEFINE_METHOD(
}
);
-/**
- * Collator resolvedOptions method.
- */
-DEFINE_METHOD(
- GlobalIntlCollator.prototype,
- resolvedOptions() {
- return %CollatorResolvedOptions(this);
- }
-);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-DEFINE_METHOD(
- GlobalIntlCollator,
- supportedLocalesOf(locales) {
- return %SupportedLocalesOf('collator', locales, arguments[1]);
- }
-);
-
-
-DEFINE_METHOD(
- GlobalIntlPluralRules.prototype,
- resolvedOptions() {
- return %PluralRulesResolvedOptions(this);
- }
-);
-
-DEFINE_METHOD(
- GlobalIntlPluralRules,
- supportedLocalesOf(locales) {
- return %SupportedLocalesOf('pluralrules', locales, arguments[1]);
- }
-);
-
-DEFINE_METHOD(
- GlobalIntlPluralRules.prototype,
- select(value) {
- return %PluralRulesSelect(this, TO_NUMBER(value) + 0);
- }
-);
-
-// ECMA 402 #sec-setnfdigitoptions
-// SetNumberFormatDigitOptions ( intlObj, options, mnfdDefault, mxfdDefault )
-function SetNumberFormatDigitOptions(internalOptions, options,
- mnfdDefault, mxfdDefault) {
- // Digit ranges.
- var mnid = %GetNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
- %DefineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
-
- var mnfd = %GetNumberOption(options, 'minimumFractionDigits', 0, 20,
- mnfdDefault);
- %DefineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
-
- var mxfdActualDefault = MathMax(mnfd, mxfdDefault);
-
- var mxfd = %GetNumberOption(options, 'maximumFractionDigits', mnfd, 20,
- mxfdActualDefault);
-
- %DefineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
-
- var mnsd = options['minimumSignificantDigits'];
- var mxsd = options['maximumSignificantDigits'];
- if (!IS_UNDEFINED(mnsd) || !IS_UNDEFINED(mxsd)) {
- mnsd = %DefaultNumberOption(mnsd, 1, 21, 1, 'minimumSignificantDigits');
- %DefineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
-
- mxsd = %DefaultNumberOption(mxsd, mnsd, 21, 21, 'maximumSignificantDigits');
- %DefineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
- }
-}
-
-/**
- * Initializes the given object so it's a valid NumberFormat instance.
- * Useful for subclassing.
- */
-function CreateNumberFormat(locales, options) {
- if (IS_UNDEFINED(options)) {
- options = {__proto__: null};
- } else {
- options = TO_OBJECT(options);
- }
-
- var getOption = getGetOption(options, 'numberformat');
-
- var locale = resolveLocale('numberformat', locales, options);
-
- var internalOptions = {__proto__: null};
- %DefineWEProperty(internalOptions, 'style', getOption(
- 'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
-
- var currency = getOption('currency', 'string');
- if (!IS_UNDEFINED(currency) && !%IsWellFormedCurrencyCode(currency)) {
- throw %make_range_error(kInvalidCurrencyCode, currency);
- }
-
- if (internalOptions.style === 'currency' && IS_UNDEFINED(currency)) {
- throw %make_type_error(kCurrencyCode);
- }
-
- var mnfdDefault, mxfdDefault;
-
- var currencyDisplay = getOption(
- 'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
- if (internalOptions.style === 'currency') {
- %DefineWEProperty(internalOptions, 'currency', %StringToUpperCaseIntl(currency));
- %DefineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
-
- mnfdDefault = mxfdDefault = %CurrencyDigits(internalOptions.currency);
- } else {
- mnfdDefault = 0;
- mxfdDefault = internalOptions.style === 'percent' ? 0 : 3;
- }
-
- SetNumberFormatDigitOptions(internalOptions, options, mnfdDefault,
- mxfdDefault);
-
- // Grouping.
- %DefineWEProperty(internalOptions, 'useGrouping', getOption(
- 'useGrouping', 'boolean', UNDEFINED, true));
-
- // ICU prefers options to be passed using -u- extension key/values for
- // number format, so we need to build that.
- var extensionMap = %ParseExtension(locale.extension);
-
- /**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a number format.
- */
- var NUMBER_FORMAT_KEY_MAP = {
- __proto__: null,
- 'nu': {__proto__: null, 'property': UNDEFINED, 'type': 'string'}
- };
-
- var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
- getOption, internalOptions);
-
- var requestedLocale = locale.locale + extension;
- var resolved = %object_define_properties({__proto__: null}, {
- currency: {writable: true},
- currencyDisplay: {writable: true},
- locale: {writable: true},
- maximumFractionDigits: {writable: true},
- minimumFractionDigits: {writable: true},
- minimumIntegerDigits: {writable: true},
- numberingSystem: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- style: {value: internalOptions.style, writable: true},
- useGrouping: {writable: true}
- });
- if (HAS_OWN_PROPERTY(internalOptions, 'minimumSignificantDigits')) {
- %DefineWEProperty(resolved, 'minimumSignificantDigits', UNDEFINED);
- }
- if (HAS_OWN_PROPERTY(internalOptions, 'maximumSignificantDigits')) {
- %DefineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
- }
- var numberFormat = %CreateNumberFormat(requestedLocale, internalOptions,
- resolved);
-
- if (internalOptions.style === 'currency') {
- %object_define_property(resolved, 'currencyDisplay',
- {value: currencyDisplay, writable: true});
- }
-
- %MarkAsInitializedIntlObjectOfType(numberFormat, NUMBER_FORMAT_TYPE);
- numberFormat[resolvedSymbol] = resolved;
-
- return numberFormat;
-}
-
-
-/**
- * Constructs Intl.NumberFormat object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-function NumberFormatConstructor() {
- return IntlConstruct(this, GlobalIntlNumberFormat, CreateNumberFormat,
- new.target, arguments, true);
-}
-%SetCode(GlobalIntlNumberFormat, NumberFormatConstructor);
-
-
-/**
- * NumberFormat resolvedOptions method.
- */
-DEFINE_METHOD(
- GlobalIntlNumberFormat.prototype,
- resolvedOptions() {
- var methodName = 'resolvedOptions';
- if(!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
- }
- var format = %IntlUnwrapReceiver(this, NUMBER_FORMAT_TYPE,
- GlobalIntlNumberFormat,
- methodName, true);
- var result = {
- locale: format[resolvedSymbol].locale,
- numberingSystem: format[resolvedSymbol].numberingSystem,
- style: format[resolvedSymbol].style,
- useGrouping: format[resolvedSymbol].useGrouping,
- minimumIntegerDigits: format[resolvedSymbol].minimumIntegerDigits,
- minimumFractionDigits: format[resolvedSymbol].minimumFractionDigits,
- maximumFractionDigits: format[resolvedSymbol].maximumFractionDigits,
- };
-
- if (result.style === 'currency') {
- defineWECProperty(result, 'currency', format[resolvedSymbol].currency);
- defineWECProperty(result, 'currencyDisplay',
- format[resolvedSymbol].currencyDisplay);
- }
-
- if (HAS_OWN_PROPERTY(format[resolvedSymbol], 'minimumSignificantDigits')) {
- defineWECProperty(result, 'minimumSignificantDigits',
- format[resolvedSymbol].minimumSignificantDigits);
- }
-
- if (HAS_OWN_PROPERTY(format[resolvedSymbol], 'maximumSignificantDigits')) {
- defineWECProperty(result, 'maximumSignificantDigits',
- format[resolvedSymbol].maximumSignificantDigits);
- }
-
- return result;
- }
-);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-DEFINE_METHOD(
- GlobalIntlNumberFormat,
- supportedLocalesOf(locales) {
- return %SupportedLocalesOf('numberformat', locales, arguments[1]);
- }
-);
-
-/**
- * Returns a string that matches LDML representation of the options object.
- */
-function toLDMLString(options) {
- var getOption = getGetOption(options, 'dateformat');
-
- var ldmlString = '';
-
- var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(
- option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
-
- option = getOption('era', 'string', ['narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(
- option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
-
- option = getOption('year', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
-
- option = getOption('month', 'string',
- ['2-digit', 'numeric', 'narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
- 'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
-
- option = getOption('day', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(
- option, {'2-digit': 'dd', 'numeric': 'd'});
-
- var hr12 = getOption('hour12', 'boolean');
- option = getOption('hour', 'string', ['2-digit', 'numeric']);
- if (IS_UNDEFINED(hr12)) {
- ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
- } else if (hr12 === true) {
- ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
- } else {
- ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
- }
-
- option = getOption('minute', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
-
- option = getOption('second', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
-
- option = getOption('timeZoneName', 'string', ['short', 'long']);
- ldmlString += appendToLDMLString(option, {short: 'z', long: 'zzzz'});
-
- return ldmlString;
-}
-
-
-/**
- * Returns either LDML equivalent of the current option or empty string.
- */
-function appendToLDMLString(option, pairs) {
- if (!IS_UNDEFINED(option)) {
- return pairs[option];
- } else {
- return '';
- }
-}
-
-
-/**
- * Returns object that matches LDML representation of the date.
- */
-function fromLDMLString(ldmlString) {
- // First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = %RegExpInternalReplace(GetQuotedStringRE(), ldmlString, '');
-
- var options = {__proto__: null};
- var match = %regexp_internal_match(/E{3,5}/, ldmlString);
- options = appendToDateTimeObject(
- options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
-
- match = %regexp_internal_match(/G{3,5}/, ldmlString);
- options = appendToDateTimeObject(
- options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
-
- match = %regexp_internal_match(/y{1,2}/, ldmlString);
- options = appendToDateTimeObject(
- options, 'year', match, {y: 'numeric', yy: '2-digit'});
-
- match = %regexp_internal_match(/M{1,5}/, ldmlString);
- options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
- M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
-
- // Sometimes we get L instead of M for month - standalone name.
- match = %regexp_internal_match(/L{1,5}/, ldmlString);
- options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
- L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
-
- match = %regexp_internal_match(/d{1,2}/, ldmlString);
- options = appendToDateTimeObject(
- options, 'day', match, {d: 'numeric', dd: '2-digit'});
-
- match = %regexp_internal_match(/h{1,2}/, ldmlString);
- if (match !== null) {
- options['hour12'] = true;
- }
- options = appendToDateTimeObject(
- options, 'hour', match, {h: 'numeric', hh: '2-digit'});
-
- match = %regexp_internal_match(/H{1,2}/, ldmlString);
- if (match !== null) {
- options['hour12'] = false;
- }
- options = appendToDateTimeObject(
- options, 'hour', match, {H: 'numeric', HH: '2-digit'});
-
- match = %regexp_internal_match(/m{1,2}/, ldmlString);
- options = appendToDateTimeObject(
- options, 'minute', match, {m: 'numeric', mm: '2-digit'});
-
- match = %regexp_internal_match(/s{1,2}/, ldmlString);
- options = appendToDateTimeObject(
- options, 'second', match, {s: 'numeric', ss: '2-digit'});
-
- match = %regexp_internal_match(/z|zzzz/, ldmlString);
- options = appendToDateTimeObject(
- options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
-
- return options;
-}
-
-
-function appendToDateTimeObject(options, option, match, pairs) {
- if (IS_NULL(match)) {
- if (!HAS_OWN_PROPERTY(options, option)) {
- %DefineWEProperty(options, option, UNDEFINED);
- }
- return options;
- }
-
- var property = match[0];
- %DefineWEProperty(options, option, pairs[property]);
-
- return options;
-}
-
-
-/**
- * Returns options with at least default values in it.
- */
-function toDateTimeOptions(options, required, defaults) {
- if (IS_UNDEFINED(options)) {
- options = {__proto__: null};
- } else {
- options = TO_OBJECT(options);
- }
-
- options = %object_create(options);
-
- var needsDefault = true;
- if ((required === 'date' || required === 'any') &&
- (!IS_UNDEFINED(options.weekday) || !IS_UNDEFINED(options.year) ||
- !IS_UNDEFINED(options.month) || !IS_UNDEFINED(options.day))) {
- needsDefault = false;
- }
-
- if ((required === 'time' || required === 'any') &&
- (!IS_UNDEFINED(options.hour) || !IS_UNDEFINED(options.minute) ||
- !IS_UNDEFINED(options.second))) {
- needsDefault = false;
- }
-
- if (needsDefault && (defaults === 'date' || defaults === 'all')) {
- %object_define_property(options, 'year', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- %object_define_property(options, 'month', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- %object_define_property(options, 'day', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- }
-
- if (needsDefault && (defaults === 'time' || defaults === 'all')) {
- %object_define_property(options, 'hour', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- %object_define_property(options, 'minute', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- %object_define_property(options, 'second', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- }
-
- return options;
-}
-
-
-/**
- * Initializes the given object so it's a valid DateTimeFormat instance.
- * Useful for subclassing.
- */
-function CreateDateTimeFormat(locales, options) {
- if (IS_UNDEFINED(options)) {
- options = {__proto__: null};
- }
-
- var locale = resolveLocale('dateformat', locales, options);
-
- options = %ToDateTimeOptions(options, 'any', 'date');
-
- var getOption = getGetOption(options, 'dateformat');
-
- // We implement only best fit algorithm, but still need to check
- // if the formatMatcher values are in range.
- var matcher = getOption('formatMatcher', 'string',
- ['basic', 'best fit'], 'best fit');
-
- // Build LDML string for the skeleton that we pass to the formatter.
- var ldmlString = toLDMLString(options);
-
- // Filter out supported extension keys so we know what to put in resolved
- // section later on.
- // We need to pass calendar and number system to the method.
- var tz = canonicalizeTimeZoneID(options.timeZone);
-
- // ICU prefers options to be passed using -u- extension key/values, so
- // we need to build that.
- var internalOptions = {__proto__: null};
- var extensionMap = %ParseExtension(locale.extension);
-
- /**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a date/time format.
- */
- var DATETIME_FORMAT_KEY_MAP = {
- __proto__: null,
- 'ca': {__proto__: null, 'property': UNDEFINED, 'type': 'string'},
- 'nu': {__proto__: null, 'property': UNDEFINED, 'type': 'string'}
- };
-
- var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
- getOption, internalOptions);
-
- var requestedLocale = locale.locale + extension;
- var resolved = %object_define_properties({__proto__: null}, {
- calendar: {writable: true},
- day: {writable: true},
- era: {writable: true},
- hour12: {writable: true},
- hour: {writable: true},
- locale: {writable: true},
- minute: {writable: true},
- month: {writable: true},
- numberingSystem: {writable: true},
- [patternSymbol]: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- second: {writable: true},
- timeZone: {writable: true},
- timeZoneName: {writable: true},
- tz: {value: tz, writable: true},
- weekday: {writable: true},
- year: {writable: true}
- });
-
- var dateFormat = %CreateDateTimeFormat(
- requestedLocale,
- {__proto__: null, skeleton: ldmlString, timeZone: tz}, resolved);
-
- if (resolved.timeZone === "Etc/Unknown") {
- throw %make_range_error(kInvalidTimeZone, tz);
- }
-
- %MarkAsInitializedIntlObjectOfType(dateFormat, DATE_TIME_FORMAT_TYPE);
- dateFormat[resolvedSymbol] = resolved;
-
- return dateFormat;
-}
-
-
-/**
- * Constructs Intl.DateTimeFormat object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-function DateTimeFormatConstructor() {
- return IntlConstruct(this, GlobalIntlDateTimeFormat, CreateDateTimeFormat,
- new.target, arguments, true);
-}
-%SetCode(GlobalIntlDateTimeFormat, DateTimeFormatConstructor);
-
-
-/**
- * DateTimeFormat resolvedOptions method.
- */
-DEFINE_METHOD(
- GlobalIntlDateTimeFormat.prototype,
- resolvedOptions() {
- var methodName = 'resolvedOptions';
- if(!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
- }
- var format = %IntlUnwrapReceiver(this, DATE_TIME_FORMAT_TYPE,
- GlobalIntlDateTimeFormat,
- methodName, true);
-
- /**
- * Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
- * See typeMap section in third_party/icu/source/data/misc/keyTypeData.txt
- * and
- * http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
- */
- var ICU_CALENDAR_MAP = {
- __proto__: null,
- 'gregorian': 'gregory',
- 'ethiopic-amete-alem': 'ethioaa'
- };
-
- var fromPattern = fromLDMLString(format[resolvedSymbol][patternSymbol]);
- var userCalendar = ICU_CALENDAR_MAP[format[resolvedSymbol].calendar];
- if (IS_UNDEFINED(userCalendar)) {
- // No match means that ICU's legacy name is identical to LDML/BCP type.
- userCalendar = format[resolvedSymbol].calendar;
- }
-
- var result = {
- locale: format[resolvedSymbol].locale,
- numberingSystem: format[resolvedSymbol].numberingSystem,
- calendar: userCalendar,
- timeZone: format[resolvedSymbol].timeZone
- };
-
- addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
- addWECPropertyIfDefined(result, 'era', fromPattern.era);
- addWECPropertyIfDefined(result, 'year', fromPattern.year);
- addWECPropertyIfDefined(result, 'month', fromPattern.month);
- addWECPropertyIfDefined(result, 'day', fromPattern.day);
- addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
- addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
- addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
- addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
- addWECPropertyIfDefined(result, 'second', fromPattern.second);
-
- return result;
- }
-);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-DEFINE_METHOD(
- GlobalIntlDateTimeFormat,
- supportedLocalesOf(locales) {
- return %SupportedLocalesOf('dateformat', locales, arguments[1]);
- }
-);
-
-
-/**
- * Returns canonical Area/Location(/Location) name, or throws an exception
- * if the zone name is invalid IANA name.
- */
-function canonicalizeTimeZoneID(tzID) {
- // Skip undefined zones.
- if (IS_UNDEFINED(tzID)) {
- return tzID;
- }
-
- // Convert zone name to string.
- tzID = TO_STRING(tzID);
-
- // Special case handling (UTC, GMT).
- var upperID = %StringToUpperCaseIntl(tzID);
- if (upperID === 'UTC' || upperID === 'GMT' ||
- upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
- return 'UTC';
- }
-
- // We expect only _, '-' and / beside ASCII letters.
- // All inputs should conform to Area/Location(/Location)*, or Etc/GMT* .
- // TODO(jshin): 1. Support 'GB-Eire", 'EST5EDT", "ROK', 'US/*', 'NZ' and many
- // other aliases/linked names when moving timezone validation code to C++.
- // See crbug.com/364374 and crbug.com/v8/8007 .
- // 2. Resolve the difference betwee CLDR/ICU and IANA time zone db.
- // See http://unicode.org/cldr/trac/ticket/9892 and crbug.com/645807 .
- let match = %regexp_internal_match(GetTimezoneNameCheckRE(), tzID);
- if (IS_NULL(match)) {
- let match =
- %regexp_internal_match(GetGMTOffsetTimezoneNameCheckRE(), upperID);
- if (!IS_NULL(match) && match.length == 2)
- return "Etc/GMT" + match.groups.offset;
- else
- throw %make_range_error(kInvalidTimeZone, tzID);
- }
-
- let result = toTitleCaseTimezoneLocation(match[1]) + '/' +
- toTitleCaseTimezoneLocation(match[2]);
-
- if (!IS_UNDEFINED(match[3]) && 3 < match.length) {
- let locations = %StringSplit(match[3], '/', kMaxUint32);
- // The 1st element is empty. Starts with i=1.
- for (var i = 1; i < locations.length; i++) {
- result = result + '/' + toTitleCaseTimezoneLocation(locations[i]);
- }
- }
-
- return result;
-}
-
-/**
- * Initializes the given object so it's a valid BreakIterator instance.
- * Useful for subclassing.
- */
-function CreateBreakIterator(locales, options) {
- if (IS_UNDEFINED(options)) {
- options = {__proto__: null};
- }
-
- var getOption = getGetOption(options, 'breakiterator');
-
- var internalOptions = {__proto__: null};
-
- %DefineWEProperty(internalOptions, 'type', getOption(
- 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
-
- var locale = resolveLocale('breakiterator', locales, options);
- var resolved = %object_define_properties({__proto__: null}, {
- requestedLocale: {value: locale.locale, writable: true},
- type: {value: internalOptions.type, writable: true},
- locale: {writable: true}
- });
-
- var iterator = %CreateBreakIterator(locale.locale, internalOptions, resolved);
-
- %MarkAsInitializedIntlObjectOfType(iterator, BREAK_ITERATOR_TYPE);
- iterator[resolvedSymbol] = resolved;
-
- return iterator;
-}
-
-
-/**
- * Constructs Intl.v8BreakIterator object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-function v8BreakIteratorConstructor() {
- return IntlConstruct(this, GlobalIntlv8BreakIterator, CreateBreakIterator,
- new.target, arguments);
-}
-%SetCode(GlobalIntlv8BreakIterator, v8BreakIteratorConstructor);
-
-
-/**
- * BreakIterator resolvedOptions method.
- */
-DEFINE_METHOD(
- GlobalIntlv8BreakIterator.prototype,
- resolvedOptions() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
- var methodName = 'resolvedOptions';
- if(!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
- }
- var segmenter = %IntlUnwrapReceiver(this, BREAK_ITERATOR_TYPE,
- GlobalIntlv8BreakIterator, methodName,
- false);
-
- return {
- locale: segmenter[resolvedSymbol].locale,
- type: segmenter[resolvedSymbol].type
- };
- }
-);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-DEFINE_METHOD(
- GlobalIntlv8BreakIterator,
- supportedLocalesOf(locales) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
- return %SupportedLocalesOf('breakiterator', locales, arguments[1]);
- }
-);
-
-
-/**
- * Returns index of the first break in the string and moves current pointer.
- */
-function first(iterator) {
- return %BreakIteratorFirst(iterator);
-}
-
-
-/**
- * Returns the index of the next break and moves the pointer.
- */
-function next(iterator) {
- return %BreakIteratorNext(iterator);
-}
-
-
-/**
- * Returns index of the current break.
- */
-function current(iterator) {
- return %BreakIteratorCurrent(iterator);
-}
-
-
-/**
- * Returns type of the current break.
- */
-function breakType(iterator) {
- return %BreakIteratorBreakType(iterator);
-}
-
-
-AddBoundMethod(GlobalIntlv8BreakIterator, 'first', first, 0,
- BREAK_ITERATOR_TYPE, false);
-AddBoundMethod(GlobalIntlv8BreakIterator, 'next', next, 0,
- BREAK_ITERATOR_TYPE, false);
-AddBoundMethod(GlobalIntlv8BreakIterator, 'current', current, 0,
- BREAK_ITERATOR_TYPE, false);
-AddBoundMethod(GlobalIntlv8BreakIterator, 'breakType', breakType, 0,
- BREAK_ITERATOR_TYPE, false);
-
// Save references to Intl objects and methods we use, for added security.
var savedObjects = {
__proto__: null,
@@ -1480,51 +447,4 @@ function cachedOrNewService(service, locales, options, defaults) {
"cached_or_new_service", cachedOrNewService
]);
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - both date and time are
- * present in the output.
- */
-DEFINE_METHOD(
- GlobalDate.prototype,
- toLocaleString() {
- var locales = arguments[0];
- var options = arguments[1];
- return %ToLocaleDateTime(
- this, locales, options, 'any', 'all', 'dateformatall');
- }
-);
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - only date is present
- * in the output.
- */
-DEFINE_METHOD(
- GlobalDate.prototype,
- toLocaleDateString() {
- var locales = arguments[0];
- var options = arguments[1];
- return %ToLocaleDateTime(
- this, locales, options, 'date', 'date', 'dateformatdate');
- }
-);
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - only time is present
- * in the output.
- */
-DEFINE_METHOD(
- GlobalDate.prototype,
- toLocaleTimeString() {
- var locales = arguments[0];
- var options = arguments[1];
- return %ToLocaleDateTime(
- this, locales, options, 'time', 'time', 'dateformattime');
- }
-);
-
})
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 8e533c38bc..4eaf990a58 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -62,10 +62,8 @@ macro REQUIRE_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) ||
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro TO_BOOLEAN(arg) = (!!(arg));
-macro TO_INTEGER(arg) = (%_ToInteger(arg));
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_STRING(arg) = (%_ToString(arg));
-macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index ac1442b414..67ad58c206 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -15,7 +15,7 @@ namespace internal {
enum ParseElementResult { kElementFound, kElementNotFound, kNullHandle };
-class JsonParseInternalizer BASE_EMBEDDED {
+class JsonParseInternalizer {
public:
static MaybeHandle<Object> Internalize(Isolate* isolate,
Handle<Object> object,
@@ -36,7 +36,7 @@ class JsonParseInternalizer BASE_EMBEDDED {
// A simple json parser.
template <bool seq_one_byte>
-class JsonParser BASE_EMBEDDED {
+class JsonParser {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Parse(
Isolate* isolate, Handle<String> source, Handle<Object> reviver) {
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index 91a1b7201b..fe9a1468b1 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -16,7 +16,7 @@
namespace v8 {
namespace internal {
-class JsonStringifier BASE_EMBEDDED {
+class JsonStringifier {
public:
explicit JsonStringifier(Isolate* isolate);
@@ -755,11 +755,49 @@ void JsonStringifier::SerializeStringUnchecked_(
// Assert that uc16 character is not truncated down to 8 bit.
// The <uc16, char> version of this method must not be called.
DCHECK(sizeof(DestChar) >= sizeof(SrcChar));
-
for (int i = 0; i < src.length(); i++) {
SrcChar c = src[i];
if (DoNotEscape(c)) {
dest->Append(c);
+ } else if (FLAG_harmony_json_stringify && c >= 0xD800 && c <= 0xDFFF) {
+ // The current character is a surrogate.
+ if (c <= 0xDBFF) {
+ // The current character is a leading surrogate.
+ if (i + 1 < src.length()) {
+ // There is a next character.
+ SrcChar next = src[i + 1];
+ if (next >= 0xDC00 && next <= 0xDFFF) {
+ // The next character is a trailing surrogate, meaning this is a
+ // surrogate pair.
+ dest->Append(c);
+ dest->Append(next);
+ i++;
+ } else {
+ // The next character is not a trailing surrogate. Thus, the
+ // current character is a lone leading surrogate.
+ dest->AppendCString("\\u");
+ char* const hex = DoubleToRadixCString(c, 16);
+ dest->AppendCString(hex);
+ DeleteArray(hex);
+ }
+ } else {
+ // There is no next character. Thus, the current character is a lone
+ // leading surrogate.
+ dest->AppendCString("\\u");
+ char* const hex = DoubleToRadixCString(c, 16);
+ dest->AppendCString(hex);
+ DeleteArray(hex);
+ }
+ } else {
+ // The current character is a lone trailing surrogate. (If it had been
+ // preceded by a leading surrogate, we would've ended up in the other
+ // branch earlier on, and the current character would've been handled
+ // as part of the surrogate pair already.)
+ dest->AppendCString("\\u");
+ char* const hex = DoubleToRadixCString(c, 16);
+ dest->AppendCString(hex);
+ DeleteArray(hex);
+ }
} else {
dest->AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
}
@@ -784,6 +822,45 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
SrcChar c = reader.Get<SrcChar>(i);
if (DoNotEscape(c)) {
builder_.Append<SrcChar, DestChar>(c);
+ } else if (FLAG_harmony_json_stringify && c >= 0xD800 && c <= 0xDFFF) {
+ // The current character is a surrogate.
+ if (c <= 0xDBFF) {
+ // The current character is a leading surrogate.
+ if (i + 1 < reader.length()) {
+ // There is a next character.
+ SrcChar next = reader.Get<SrcChar>(i + 1);
+ if (next >= 0xDC00 && next <= 0xDFFF) {
+ // The next character is a trailing surrogate, meaning this is a
+ // surrogate pair.
+ builder_.Append<SrcChar, DestChar>(c);
+ builder_.Append<SrcChar, DestChar>(next);
+ i++;
+ } else {
+ // The next character is not a trailing surrogate. Thus, the
+ // current character is a lone leading surrogate.
+ builder_.AppendCString("\\u");
+ char* const hex = DoubleToRadixCString(c, 16);
+ builder_.AppendCString(hex);
+ DeleteArray(hex);
+ }
+ } else {
+ // There is no next character. Thus, the current character is a
+ // lone leading surrogate.
+ builder_.AppendCString("\\u");
+ char* const hex = DoubleToRadixCString(c, 16);
+ builder_.AppendCString(hex);
+ DeleteArray(hex);
+ }
+ } else {
+ // The current character is a lone trailing surrogate. (If it had
+ // been preceded by a leading surrogate, we would've ended up in the
+ // other branch earlier on, and the current character would've been
+ // handled as part of the surrogate pair already.)
+ builder_.AppendCString("\\u");
+ char* const hex = DoubleToRadixCString(c, 16);
+ builder_.AppendCString(hex);
+ DeleteArray(hex);
+ }
} else {
builder_.AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
}
@@ -794,12 +871,15 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
template <>
bool JsonStringifier::DoNotEscape(uint8_t c) {
- return c >= '#' && c <= '~' && c != '\\';
+ // https://tc39.github.io/ecma262/#table-json-single-character-escapes
+ return c >= 0x23 && c <= 0x7E && c != 0x5C;
}
template <>
bool JsonStringifier::DoNotEscape(uint16_t c) {
- return c >= '#' && c != '\\' && c != 0x7F;
+ // https://tc39.github.io/ecma262/#table-json-single-character-escapes
+ return c >= 0x23 && c != 0x5C && c != 0x7F &&
+ (!FLAG_harmony_json_stringify || (c < 0xD800 || c > 0xDFFF));
}
void JsonStringifier::NewLine() {
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index e92902cfb5..ab893d5df9 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -20,9 +20,6 @@
namespace v8 {
namespace internal {
-KeyAccumulator::~KeyAccumulator() {
-}
-
namespace {
static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
@@ -634,10 +631,10 @@ Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
Handle<JSObject> object,
T* raw_dictionary) {
Handle<T> dictionary(raw_dictionary, isolate);
- int length = dictionary->NumberOfEnumerableProperties();
- if (length == 0) {
+ if (dictionary->NumberOfElements() == 0) {
return isolate->factory()->empty_fixed_array();
}
+ int length = dictionary->NumberOfEnumerableProperties();
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
T::CopyEnumKeysTo(isolate, dictionary, storage, mode, accumulator);
return storage;
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index c8db24a217..7ec7127aa5 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -32,12 +32,12 @@ enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
// Only unique keys are kept by the KeyAccumulator, strings are stored in a
// HashSet for inexpensive lookups. Integer keys are kept in sorted lists which
// are more compact and allow for reasonably fast includes check.
-class KeyAccumulator final BASE_EMBEDDED {
+class KeyAccumulator final {
public:
KeyAccumulator(Isolate* isolate, KeyCollectionMode mode,
PropertyFilter filter)
: isolate_(isolate), mode_(mode), filter_(filter) {}
- ~KeyAccumulator();
+ ~KeyAccumulator() = default;
static MaybeHandle<FixedArray> GetKeys(
Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 3c29bd7eaa..c23616116e 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -140,11 +140,9 @@ bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate,
std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
{
base::LockGuard<base::Mutex> guard(&lock_);
- if (foreground_task_runner_map_.find(isolate) ==
- foreground_task_runner_map_.end()) {
- return failed_result;
- }
- task_runner = foreground_task_runner_map_[isolate];
+ auto it = foreground_task_runner_map_.find(isolate);
+ if (it == foreground_task_runner_map_.end()) return failed_result;
+ task_runner = it->second;
}
std::unique_ptr<Task> task = task_runner->PopTaskFromQueue(wait_for_work);
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 1c844f0f09..40a945fd8a 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -35,7 +35,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
std::unique_ptr<v8::TracingController> tracing_controller = {});
- virtual ~DefaultPlatform();
+ ~DefaultPlatform() override;
void SetThreadPoolSize(int thread_pool_size);
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
index 4251d8076e..f3338acfe3 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
@@ -17,6 +17,7 @@ DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(
}
}
+// NOLINTNEXTLINE
DefaultWorkerThreadsTaskRunner::~DefaultWorkerThreadsTaskRunner() {
// This destructor is needed because we have unique_ptr to the WorkerThreads,
// und the {WorkerThread} class is forward declared in the header file.
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.h b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
index 5e46e3dd41..0b146a509c 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.h
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
@@ -19,7 +19,7 @@ class V8_PLATFORM_EXPORT DefaultWorkerThreadsTaskRunner
public:
DefaultWorkerThreadsTaskRunner(uint32_t thread_pool_size);
- ~DefaultWorkerThreadsTaskRunner();
+ ~DefaultWorkerThreadsTaskRunner() override;
void Terminate();
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.cc b/deps/v8/src/libplatform/tracing/trace-buffer.cc
index c7142ea520..8bec153440 100644
--- a/deps/v8/src/libplatform/tracing/trace-buffer.cc
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.cc
@@ -15,8 +15,6 @@ TraceBufferRingBuffer::TraceBufferRingBuffer(size_t max_chunks,
chunks_.resize(max_chunks);
}
-TraceBufferRingBuffer::~TraceBufferRingBuffer() {}
-
TraceObject* TraceBufferRingBuffer::AddTraceEvent(uint64_t* handle) {
base::LockGuard<base::Mutex> guard(&mutex_);
if (is_empty_ || chunks_[chunk_index_]->IsFull()) {
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.h b/deps/v8/src/libplatform/tracing/trace-buffer.h
index 3c756b7a69..95b9313338 100644
--- a/deps/v8/src/libplatform/tracing/trace-buffer.h
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.h
@@ -18,7 +18,7 @@ namespace tracing {
class TraceBufferRingBuffer : public TraceBuffer {
public:
TraceBufferRingBuffer(size_t max_chunks, TraceWriter* trace_writer);
- ~TraceBufferRingBuffer();
+ ~TraceBufferRingBuffer() override = default;
TraceObject* AddTraceEvent(uint64_t* handle) override;
TraceObject* GetEventByHandle(uint64_t handle) override;
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
index d811351389..df48c5a377 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.h
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -15,7 +15,7 @@ class JSONTraceWriter : public TraceWriter {
public:
explicit JSONTraceWriter(std::ostream& stream);
JSONTraceWriter(std::ostream& stream, const std::string& tag);
- ~JSONTraceWriter();
+ ~JSONTraceWriter() override;
void AppendTraceEvent(TraceObject* trace_event) override;
void Flush() override;
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index aa8789fa07..3d02347216 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -40,7 +40,7 @@ const int g_num_builtin_categories = 3;
// Skip default categories.
v8::base::AtomicWord g_category_index = g_num_builtin_categories;
-TracingController::TracingController() {}
+TracingController::TracingController() = default;
TracingController::~TracingController() {
StopTracing();
diff --git a/deps/v8/src/libplatform/worker-thread.h b/deps/v8/src/libplatform/worker-thread.h
index 22b0626024..abf0383e03 100644
--- a/deps/v8/src/libplatform/worker-thread.h
+++ b/deps/v8/src/libplatform/worker-thread.h
@@ -21,7 +21,7 @@ class TaskQueue;
class V8_PLATFORM_EXPORT WorkerThread : public NON_EXPORTED_BASE(base::Thread) {
public:
explicit WorkerThread(TaskQueue* queue);
- virtual ~WorkerThread();
+ ~WorkerThread() override;
// Thread implementation.
void Run() override;
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index f374ccddaf..464b4de32a 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -21,20 +21,12 @@
#include <mach/mach.h>
// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
// and is a typedef for struct sigcontext. There is no uc_mcontext.
-#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && !V8_OS_OPENBSD
+#elif !V8_OS_OPENBSD
#include <ucontext.h>
#endif
#include <unistd.h>
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h> // NOLINT
-#endif
-
#elif V8_OS_WIN || V8_OS_CYGWIN
#include "src/base/win32-headers.h"
@@ -423,7 +415,7 @@ class SignalHandler {
static void Restore() {
if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
+ sigaction(SIGPROF, &old_signal_handler_, nullptr);
signal_handler_installed_ = false;
}
}
diff --git a/deps/v8/src/locked-queue.h b/deps/v8/src/locked-queue.h
index 1667917329..5bcab57b0c 100644
--- a/deps/v8/src/locked-queue.h
+++ b/deps/v8/src/locked-queue.h
@@ -17,7 +17,7 @@ namespace internal {
// See:
// https://www.cs.rochester.edu/research/synchronization/pseudocode/queues.html
template <typename Record>
-class LockedQueue final BASE_EMBEDDED {
+class LockedQueue final {
public:
inline LockedQueue();
inline ~LockedQueue();
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index bd56aaf418..e30b32b875 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -58,12 +58,12 @@ class Log {
// Utility class for formatting log messages. It escapes the given messages
// and then appends them to the static buffer in Log.
- class MessageBuilder BASE_EMBEDDED {
+ class MessageBuilder {
public:
// Create a message builder starting from position 0.
// This acquires the mutex in the log as well.
explicit MessageBuilder(Log* log);
- ~MessageBuilder() { }
+ ~MessageBuilder() = default;
void AppendString(String* str,
base::Optional<int> length_limit = base::nullopt);
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index a3f484fd59..d78625a46a 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -349,7 +349,6 @@ void ExternalCodeEventListener::LogExistingCode() {
HandleScope scope(isolate_);
ExistingCodeLogger logger(isolate_, this);
logger.LogCodeObjects();
- logger.LogBytecodeHandlers();
logger.LogCompiledFunctions();
}
@@ -655,7 +654,7 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
SharedFunctionInfo* shared, const char* name,
int length) {
JitCodeEvent event;
- memset(&event, 0, sizeof(event));
+ memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
event.code_start = reinterpret_cast<void*>(code->InstructionStart());
event.code_type =
@@ -676,7 +675,7 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
JitCodeEvent event;
- memset(&event, 0, sizeof(event));
+ memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
event.code_type = JitCodeEvent::JIT_CODE;
event.code_start = code->instructions().start();
@@ -708,7 +707,7 @@ void JitLogger::AddCodeLinePosInfoEvent(
int position,
JitCodeEvent::PositionType position_type) {
JitCodeEvent event;
- memset(&event, 0, sizeof(event));
+ memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
event.user_data = jit_handler_data;
event.line_info.offset = pc_offset;
@@ -722,7 +721,7 @@ void JitLogger::AddCodeLinePosInfoEvent(
void* JitLogger::StartCodePosInfoEvent() {
JitCodeEvent event;
- memset(&event, 0, sizeof(event));
+ memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
@@ -733,7 +732,7 @@ void* JitLogger::StartCodePosInfoEvent() {
void JitLogger::EndCodePosInfoEvent(Address start_address,
void* jit_handler_data) {
JitCodeEvent event;
- memset(&event, 0, sizeof(event));
+ memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
event.code_start = reinterpret_cast<void*>(start_address);
event.user_data = jit_handler_data;
@@ -794,7 +793,7 @@ class Profiler: public base::Thread {
}
}
- virtual void Run();
+ void Run() override;
// Pause and Resume TickSample data collection.
void Pause() { paused_ = true; }
@@ -848,7 +847,7 @@ class Ticker: public sampler::Sampler {
profiler_(nullptr),
sampling_thread_(new SamplingThread(this, interval_microseconds)) {}
- ~Ticker() {
+ ~Ticker() override {
if (IsActive()) Stop();
delete sampling_thread_;
}
@@ -1323,7 +1322,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// <script-offset> is the position within the script
// <inlining-id> is the offset in the <inlining> table
// <inlining> table is a sequence of strings of the form
- // F<function-id>O<script-offset>[I<inlining-id>
+ // F<function-id>O<script-offset>[I<inlining-id>]
// where
// <function-id> is an index into the <fns> function table
// <fns> is the function table encoded as a sequence of strings
@@ -1335,12 +1334,8 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
<< shared->EndPosition() << kNext;
SourcePositionTableIterator iterator(code->source_position_table());
- bool is_first = true;
bool hasInlined = false;
for (; !iterator.done(); iterator.Advance()) {
- if (is_first) {
- is_first = false;
- }
SourcePosition pos = iterator.source_position();
msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
if (pos.isInlined()) {
@@ -1604,7 +1599,7 @@ bool Logger::EnsureLogScriptSource(Script* script) {
// Make sure the script is written to the log file.
int script_id = script->id();
if (logged_source_code_.find(script_id) != logged_source_code_.end()) {
- return false;
+ return true;
}
// This script has not been logged yet.
logged_source_code_.insert(script_id);
@@ -1838,16 +1833,6 @@ void Logger::LogCodeObject(Object* object) {
void Logger::LogCodeObjects() { existing_code_logger_.LogCodeObjects(); }
-void Logger::LogBytecodeHandler(interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale,
- Code* code) {
- existing_code_logger_.LogBytecodeHandler(bytecode, operand_scale, code);
-}
-
-void Logger::LogBytecodeHandlers() {
- existing_code_logger_.LogBytecodeHandlers();
-}
-
void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code) {
existing_code_logger_.LogExistingFunction(shared, code);
@@ -2162,37 +2147,6 @@ void ExistingCodeLogger::LogCompiledFunctions() {
}
}
-void ExistingCodeLogger::LogBytecodeHandler(
- interpreter::Bytecode bytecode, interpreter::OperandScale operand_scale,
- Code* code) {
- std::string bytecode_name =
- interpreter::Bytecodes::ToString(bytecode, operand_scale);
- CALL_CODE_EVENT_HANDLER(
- CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
- AbstractCode::cast(code), bytecode_name.c_str()))
-}
-
-void ExistingCodeLogger::LogBytecodeHandlers() {
- const interpreter::OperandScale kOperandScales[] = {
-#define VALUE(Name, _) interpreter::OperandScale::k##Name,
- OPERAND_SCALE_LIST(VALUE)
-#undef VALUE
- };
-
- const int last_index = static_cast<int>(interpreter::Bytecode::kLast);
- interpreter::Interpreter* interpreter = isolate_->interpreter();
- for (auto operand_scale : kOperandScales) {
- for (int index = 0; index <= last_index; ++index) {
- interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
- if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
- Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
- if (isolate_->heap()->IsDeserializeLazyHandler(code)) continue;
- LogBytecodeHandler(bytecode, operand_scale, code);
- }
- }
- }
-}
-
void ExistingCodeLogger::LogExistingFunction(
Handle<SharedFunctionInfo> shared, Handle<AbstractCode> code,
CodeEventListener::LogEventsAndTags tag) {
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 5ce7418364..8ec63a9b46 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -75,15 +75,6 @@ class Profiler;
class RuntimeCallTimer;
class Ticker;
-namespace interpreter {
-enum class Bytecode : uint8_t;
-enum class OperandScale : uint8_t;
-} // namespace interpreter
-
-namespace wasm {
-class WasmCode;
-}
-
#undef LOG
#define LOG(isolate, Call) \
do { \
@@ -104,7 +95,6 @@ class ExistingCodeLogger {
: isolate_(isolate), listener_(listener) {}
void LogCodeObjects();
- void LogBytecodeHandlers();
void LogCompiledFunctions();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
@@ -112,8 +102,6 @@ class ExistingCodeLogger {
CodeEventListener::LogEventsAndTags tag =
CodeEventListener::LAZY_COMPILE_TAG);
void LogCodeObject(Object* object);
- void LogBytecodeHandler(interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale, Code* code);
private:
Isolate* isolate_;
@@ -202,41 +190,43 @@ class Logger : public CodeEventListener {
void RemoveCodeEventListener(CodeEventListener* listener);
// Emits a code event for a callback function.
- void CallbackEvent(Name* name, Address entry_point);
- void GetterCallbackEvent(Name* name, Address entry_point);
- void SetterCallbackEvent(Name* name, Address entry_point);
+ void CallbackEvent(Name* name, Address entry_point) override;
+ void GetterCallbackEvent(Name* name, Address entry_point) override;
+ void SetterCallbackEvent(Name* name, Address entry_point) override;
// Emits a code create event.
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, const char* source);
+ AbstractCode* code, const char* source) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, Name* name);
+ AbstractCode* code, Name* name) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
- Name* name);
+ Name* name) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
- Name* source, int line, int column);
+ Name* source, int line, int column) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- const wasm::WasmCode* code, wasm::WasmName name);
+ const wasm::WasmCode* code,
+ wasm::WasmName name) override;
// Emits a code deoptimization event.
- void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
- void CodeMovingGCEvent();
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override;
+ void CodeMovingGCEvent() override;
// Emits a code create event for a RegExp.
- void RegExpCodeCreateEvent(AbstractCode* code, String* source);
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
// Emits a code move event.
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to);
+ void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
// Emits a code line info record event.
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray* source_position_table);
void CodeLinePosInfoRecordEvent(Address code_start,
Vector<const byte> source_position_table);
- void SharedFunctionInfoMoveEvent(Address from, Address to);
+ void SharedFunctionInfoMoveEvent(Address from, Address to) override;
void CodeNameEvent(Address addr, int pos, const char* code_name);
void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
- int fp_to_sp_delta);
+ int fp_to_sp_delta) override;
void ICEvent(const char* type, bool keyed, Map* map, Object* key,
char old_state, char new_state, const char* modifier,
@@ -268,7 +258,7 @@ class Logger : public CodeEventListener {
return is_logging_;
}
- bool is_listening_to_code_events() {
+ bool is_listening_to_code_events() override {
return is_logging() || jit_logger_ != nullptr;
}
@@ -284,10 +274,6 @@ class Logger : public CodeEventListener {
void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
void LogCodeObjects();
- // Used for logging bytecode handlers found in the snapshot.
- void LogBytecodeHandlers();
- void LogBytecodeHandler(interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale, Code* code);
// Logs all Mpas foind in the heap.
void LogMaps();
@@ -303,7 +289,7 @@ class Logger : public CodeEventListener {
private:
explicit Logger(Isolate* isolate);
- ~Logger();
+ ~Logger() override;
// Emits the profiler's first message.
void ProfilerBeginEvent();
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index fb3d1263d7..c6cc06eeae 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -323,12 +323,19 @@ void LookupIterator::InternalUpdateProtector() {
}
}
} else if (*name_ == roots.next_string()) {
- if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
- // Setting the next property of %ArrayIteratorPrototype% also needs to
- // invalidate the array iterator protector.
if (isolate_->IsInAnyContext(
*holder_, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
+ // Setting the next property of %ArrayIteratorPrototype% also needs to
+ // invalidate the array iterator protector.
+ if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *receiver_,
+ Context::INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX)) {
+ // Setting the next property of %StringIteratorPrototype% invalidates the
+ // string iterator protector.
+ if (!isolate_->IsStringIteratorLookupChainIntact()) return;
+ isolate_->InvalidateStringIteratorProtector();
}
} else if (*name_ == roots.species_symbol()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
@@ -354,9 +361,17 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
isolate_->InvalidateIsConcatSpreadableProtector();
} else if (*name_ == roots.iterator_symbol()) {
- if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
if (holder_->IsJSArray()) {
+ if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *receiver_, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
+ // Setting the Symbol.iterator property of String.prototype invalidates
+ // the string iterator protector. Symbol.iterator can also be set on a
+ // String wrapper, but not on a primitive string. We only support
+ // protector for primitive strings.
+ if (!isolate_->IsStringIteratorLookupChainIntact()) return;
+ isolate_->InvalidateStringIteratorProtector();
}
} else if (*name_ == roots.resolve_string()) {
if (!isolate_->IsPromiseResolveLookupChainIntact()) return;
@@ -535,7 +550,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
// via a trap. Adding properties to primitive values is not observable.
void LookupIterator::PrepareTransitionToDataProperty(
Handle<JSReceiver> receiver, Handle<Object> value,
- PropertyAttributes attributes, Object::StoreFromKeyed store_mode) {
+ PropertyAttributes attributes, StoreOrigin store_origin) {
DCHECK_IMPLIES(receiver->IsJSProxy(), name()->IsPrivate());
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
if (state_ == TRANSITION) return;
@@ -589,7 +604,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
Handle<Map> transition =
Map::TransitionToDataProperty(isolate_, map, name_, value, attributes,
- kDefaultFieldConstness, store_mode);
+ kDefaultFieldConstness, store_origin);
state_ = TRANSITION;
transition_ = transition;
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index c1d4dd4460..1c55d2769c 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE LookupIterator final {
public:
enum Configuration {
// Configuration bits.
@@ -160,7 +160,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
void PrepareTransitionToDataProperty(Handle<JSReceiver> receiver,
Handle<Object> value,
PropertyAttributes attributes,
- Object::StoreFromKeyed store_mode);
+ StoreOrigin store_origin);
inline bool IsCacheableTransition();
void ApplyTransitionToDataProperty(Handle<JSReceiver> receiver);
void ReconfigureDataProperty(Handle<Object> value,
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 68db01a4ef..37c2623f89 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -298,6 +298,12 @@ V8_EXPORT_PRIVATE inline int ElementSizeInBytes(MachineRepresentation rep) {
return 1 << ElementSizeLog2Of(rep);
}
+// Converts representation to bit for representation masks.
+V8_EXPORT_PRIVATE inline constexpr int RepresentationBit(
+ MachineRepresentation rep) {
+ return 1 << static_cast<int>(rep);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index af5c60536b..33ca113da0 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -209,7 +209,7 @@ class NoRootArrayScope {
// either registers or immediate values. Used to make sure that the
// caller provides exactly the expected number of parameters to the
// callee.
-class ParameterCount BASE_EMBEDDED {
+class ParameterCount {
public:
explicit ParameterCount(Register reg) : reg_(reg), immediate_(0) {}
explicit ParameterCount(uint16_t imm) : reg_(no_reg), immediate_(imm) {}
diff --git a/deps/v8/src/math-random.cc b/deps/v8/src/math-random.cc
new file mode 100644
index 0000000000..932d4b9d2a
--- /dev/null
+++ b/deps/v8/src/math-random.cc
@@ -0,0 +1,70 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/math-random.h"
+
+#include "src/assert-scope.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/contexts-inl.h"
+#include "src/isolate.h"
+#include "src/objects/fixed-array.h"
+
+namespace v8 {
+namespace internal {
+
+void MathRandom::InitializeContext(Isolate* isolate,
+ Handle<Context> native_context) {
+ Handle<FixedDoubleArray> cache = Handle<FixedDoubleArray>::cast(
+ isolate->factory()->NewFixedDoubleArray(kCacheSize, TENURED));
+ for (int i = 0; i < kCacheSize; i++) cache->set(i, 0);
+ native_context->set_math_random_cache(*cache);
+ Handle<PodArray<State>> pod = PodArray<State>::New(isolate, 1, TENURED);
+ native_context->set_math_random_state(*pod);
+ ResetContext(*native_context);
+}
+
+void MathRandom::ResetContext(Context* native_context) {
+ native_context->set_math_random_index(Smi::kZero);
+ State state = {0, 0};
+ PodArray<State>::cast(native_context->math_random_state())->set(0, state);
+}
+
+Smi* MathRandom::RefillCache(Isolate* isolate, Context* native_context) {
+ DisallowHeapAllocation no_gc;
+ PodArray<State>* pod =
+ PodArray<State>::cast(native_context->math_random_state());
+ State state = pod->get(0);
+ // Initialize state if not yet initialized. If a fixed random seed was
+ // requested, use it to reset our state the first time a script asks for
+ // random numbers in this context. This ensures the script sees a consistent
+ // sequence.
+ if (state.s0 == 0 && state.s1 == 0) {
+ uint64_t seed;
+ if (FLAG_random_seed != 0) {
+ seed = FLAG_random_seed;
+ } else {
+ isolate->random_number_generator()->NextBytes(&seed, sizeof(seed));
+ }
+ state.s0 = base::RandomNumberGenerator::MurmurHash3(seed);
+ state.s1 = base::RandomNumberGenerator::MurmurHash3(~seed);
+ CHECK(state.s0 != 0 || state.s1 != 0);
+ }
+
+ FixedDoubleArray* cache =
+ FixedDoubleArray::cast(native_context->math_random_cache());
+ // Create random numbers.
+ for (int i = 0; i < kCacheSize; i++) {
+ // Generate random numbers using xorshift128+.
+ base::RandomNumberGenerator::XorShift128(&state.s0, &state.s1);
+ cache->set(i, base::RandomNumberGenerator::ToDouble(state.s0));
+ }
+ pod->set(0, state);
+
+ Smi* new_index = Smi::FromInt(kCacheSize);
+ native_context->set_math_random_index(new_index);
+ return new_index;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/math-random.h b/deps/v8/src/math-random.h
new file mode 100644
index 0000000000..a720c75757
--- /dev/null
+++ b/deps/v8/src/math-random.h
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MATH_RANDOM_H_
+#define V8_MATH_RANDOM_H_
+
+#include "src/contexts.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class MathRandom : public AllStatic {
+ public:
+ static void InitializeContext(Isolate* isolate,
+ Handle<Context> native_context);
+
+ static void ResetContext(Context* native_context);
+ static Smi* RefillCache(Isolate* isolate, Context* native_context);
+
+ static const int kCacheSize = 64;
+ static const int kStateSize = 2 * kInt64Size;
+
+ struct State {
+ uint64_t s0;
+ uint64_t s1;
+ };
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_MATH_RANDOM_H_
diff --git a/deps/v8/src/maybe-handles-inl.h b/deps/v8/src/maybe-handles-inl.h
index c9c8c88700..1743af41a4 100644
--- a/deps/v8/src/maybe-handles-inl.h
+++ b/deps/v8/src/maybe-handles-inl.h
@@ -22,12 +22,12 @@ MaybeObjectHandle::MaybeObjectHandle()
MaybeObjectHandle::MaybeObjectHandle(MaybeObject* object, Isolate* isolate) {
HeapObject* heap_object;
- DCHECK(!object->IsClearedWeakHeapObject());
- if (object->ToWeakHeapObject(&heap_object)) {
+ DCHECK(!object->IsCleared());
+ if (object->GetHeapObjectIfWeak(&heap_object)) {
handle_ = handle(heap_object, isolate);
reference_type_ = HeapObjectReferenceType::WEAK;
} else {
- handle_ = handle(object->ToObject(), isolate);
+ handle_ = handle(object->cast<Object>(), isolate);
reference_type_ = HeapObjectReferenceType::STRONG;
}
}
diff --git a/deps/v8/src/maybe-handles.h b/deps/v8/src/maybe-handles.h
index d4b639e18e..231e4d78fb 100644
--- a/deps/v8/src/maybe-handles.h
+++ b/deps/v8/src/maybe-handles.h
@@ -23,7 +23,7 @@ namespace internal {
template <typename T>
class MaybeHandle final {
public:
- V8_INLINE MaybeHandle() {}
+ V8_INLINE MaybeHandle() = default;
// Constructor for handling automatic up casting from Handle.
// Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index a1c228ec59..3d98da4e63 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -305,10 +305,9 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
const int flags = array->Flags(frame_ix)->value();
is_constructor_ = (flags & FrameArray::kIsConstructor) != 0;
is_strict_ = (flags & FrameArray::kIsStrict) != 0;
+ is_async_ = (flags & FrameArray::kIsAsync) != 0;
}
-JSStackFrame::JSStackFrame() {}
-
JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
Handle<JSFunction> function,
Handle<AbstractCode> code, int offset)
@@ -317,6 +316,7 @@ JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
function_(function),
code_(code),
offset_(offset),
+ is_async_(false),
is_constructor_(false),
is_strict_(false) {}
@@ -386,6 +386,13 @@ Handle<Object> JSStackFrame::GetMethodName() {
}
Handle<String> name(function_->shared()->Name(), isolate_);
+
+ // The static initializer function is not a method, so don't add a
+ // class name, just return the function name.
+ if (name->IsUtf8EqualTo(CStrVector("<static_fields_initializer>"), true)) {
+ return name;
+ }
+
// ES2015 gives getters and setters name prefixes which must
// be stripped to find the property name.
if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
@@ -599,9 +606,13 @@ MaybeHandle<String> JSStackFrame::ToString() {
Handle<Object> function_name = GetFunctionName();
const bool is_toplevel = IsToplevel();
+ const bool is_async = IsAsync();
const bool is_constructor = IsConstructor();
const bool is_method_call = !(is_toplevel || is_constructor);
+ if (is_async) {
+ builder.AppendCString("async ");
+ }
if (is_method_call) {
AppendMethodCall(isolate_, this, &builder);
} else if (is_constructor) {
@@ -635,8 +646,6 @@ Handle<Script> JSStackFrame::GetScript() const {
return handle(Script::cast(function_->shared()->script()), isolate_);
}
-WasmStackFrame::WasmStackFrame() {}
-
void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
int frame_ix) {
// This function is called for compiled and interpreted wasm frames, and for
@@ -728,8 +737,6 @@ Handle<Script> WasmStackFrame::GetScript() const {
return handle(wasm_instance_->module_object()->script(), isolate_);
}
-AsmJsWasmStackFrame::AsmJsWasmStackFrame() {}
-
void AsmJsWasmStackFrame::FromFrameArray(Isolate* isolate,
Handle<FrameArray> array,
int frame_ix) {
@@ -851,8 +858,9 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
handle(isolate->native_context()->callsite_function(), isolate);
Handle<JSObject> obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::New(target, target),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, obj,
+ JSObject::New(target, target, Handle<AllocationSite>::null()), Object);
Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
@@ -947,39 +955,55 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<FrameArray> elems(FrameArray::cast(raw_stack_array->elements()),
isolate);
- // If there's a user-specified "prepareStackFrames" function, call it on the
- // frames and use its result.
+ const bool in_recursion = isolate->formatting_stack_trace();
+ if (!in_recursion) {
+ if (isolate->HasPrepareStackTraceCallback()) {
+ Handle<Context> error_context = error->GetCreationContext();
+ DCHECK(!error_context.is_null() && error_context->IsNativeContext());
+ PrepareStackTraceScope scope(isolate);
- Handle<JSFunction> global_error = isolate->error_function();
- Handle<Object> prepare_stack_trace;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, prepare_stack_trace,
- JSFunction::GetProperty(isolate, global_error, "prepareStackTrace"),
- Object);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ isolate->RunPrepareStackTraceCallback(error_context, error), Object);
+ return result;
+ } else {
+ Handle<JSFunction> global_error = isolate->error_function();
- const bool in_recursion = isolate->formatting_stack_trace();
- if (prepare_stack_trace->IsJSFunction() && !in_recursion) {
- PrepareStackTraceScope scope(isolate);
+ // If there's a user-specified "prepareStackTrace" function, call it on
+ // the frames and use its result.
- isolate->CountUsage(v8::Isolate::kErrorPrepareStackTrace);
+ Handle<Object> prepare_stack_trace;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prepare_stack_trace,
+ JSFunction::GetProperty(isolate, global_error, "prepareStackTrace"),
+ Object);
- Handle<JSArray> sites;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, sites, GetStackFrames(isolate, elems),
- Object);
+ if (prepare_stack_trace->IsJSFunction()) {
+ PrepareStackTraceScope scope(isolate);
- const int argc = 2;
- ScopedVector<Handle<Object>> argv(argc);
+ isolate->CountUsage(v8::Isolate::kErrorPrepareStackTrace);
- argv[0] = error;
- argv[1] = sites;
+ Handle<JSArray> sites;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, sites,
+ GetStackFrames(isolate, elems), Object);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, prepare_stack_trace,
- global_error, argc, argv.start()),
- Object);
+ const int argc = 2;
+ ScopedVector<Handle<Object>> argv(argc);
+ argv[0] = error;
+ argv[1] = sites;
+
+ Handle<Object> result;
- return result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, prepare_stack_trace, global_error, argc,
+ argv.start()),
+ Object);
+
+ return result;
+ }
+ }
}
// Otherwise, run our internal formatting logic.
@@ -1107,8 +1131,10 @@ MaybeHandle<Object> ErrorUtils::Construct(
// 2. Let O be ? OrdinaryCreateFromConstructor(newTarget, "%ErrorPrototype%",
// « [[ErrorData]] »).
Handle<JSObject> err;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, err,
- JSObject::New(target, new_target_recv), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, err,
+ JSObject::New(target, new_target_recv, Handle<AllocationSite>::null()),
+ Object);
// 3. If message is not undefined, then
// a. Let msg be ? ToString(message).
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 030fc0b926..05d287faae 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -50,7 +50,7 @@ class MessageLocation {
class StackFrameBase {
public:
- virtual ~StackFrameBase() {}
+ virtual ~StackFrameBase() = default;
virtual Handle<Object> GetReceiver() const = 0;
virtual Handle<Object> GetFunction() const = 0;
@@ -71,13 +71,14 @@ class StackFrameBase {
virtual bool IsNative() = 0;
virtual bool IsToplevel() = 0;
virtual bool IsEval();
+ virtual bool IsAsync() const = 0;
virtual bool IsConstructor() = 0;
virtual bool IsStrict() const = 0;
virtual MaybeHandle<String> ToString() = 0;
protected:
- StackFrameBase() {}
+ StackFrameBase() = default;
explicit StackFrameBase(Isolate* isolate) : isolate_(isolate) {}
Isolate* isolate_;
@@ -91,7 +92,7 @@ class JSStackFrame : public StackFrameBase {
JSStackFrame(Isolate* isolate, Handle<Object> receiver,
Handle<JSFunction> function, Handle<AbstractCode> code,
int offset);
- virtual ~JSStackFrame() {}
+ ~JSStackFrame() override = default;
Handle<Object> GetReceiver() const override { return receiver_; }
Handle<Object> GetFunction() const override;
@@ -108,13 +109,14 @@ class JSStackFrame : public StackFrameBase {
bool IsNative() override;
bool IsToplevel() override;
+ bool IsAsync() const override { return is_async_; }
bool IsConstructor() override { return is_constructor_; }
bool IsStrict() const override { return is_strict_; }
MaybeHandle<String> ToString() override;
private:
- JSStackFrame();
+ JSStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
bool HasScript() const override;
@@ -125,15 +127,16 @@ class JSStackFrame : public StackFrameBase {
Handle<AbstractCode> code_;
int offset_;
- bool is_constructor_;
- bool is_strict_;
+ bool is_async_ : 1;
+ bool is_constructor_ : 1;
+ bool is_strict_ : 1;
friend class FrameArrayIterator;
};
class WasmStackFrame : public StackFrameBase {
public:
- virtual ~WasmStackFrame() {}
+ ~WasmStackFrame() override = default;
Handle<Object> GetReceiver() const override;
Handle<Object> GetFunction() const override;
@@ -150,6 +153,7 @@ class WasmStackFrame : public StackFrameBase {
bool IsNative() override { return false; }
bool IsToplevel() override { return false; }
+ bool IsAsync() const override { return false; }
bool IsConstructor() override { return false; }
bool IsStrict() const override { return false; }
bool IsInterpreted() const { return code_ == nullptr; }
@@ -168,7 +172,7 @@ class WasmStackFrame : public StackFrameBase {
int offset_;
private:
- WasmStackFrame();
+ WasmStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
friend class FrameArrayIterator;
@@ -177,7 +181,7 @@ class WasmStackFrame : public StackFrameBase {
class AsmJsWasmStackFrame : public WasmStackFrame {
public:
- virtual ~AsmJsWasmStackFrame() {}
+ ~AsmJsWasmStackFrame() override = default;
Handle<Object> GetReceiver() const override;
Handle<Object> GetFunction() const override;
@@ -193,7 +197,7 @@ class AsmJsWasmStackFrame : public WasmStackFrame {
private:
friend class FrameArrayIterator;
- AsmJsWasmStackFrame();
+ AsmJsWasmStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
bool is_at_number_conversion_;
@@ -377,6 +381,7 @@ class ErrorUtils : public AllStatic {
"% is not a function or its return value is not async iterable") \
T(NotFiniteNumber, "Value need to be finite number for %()") \
T(NotIterable, "% is not iterable") \
+ T(NotIterableNoSymbolLoad, "% is not iterable (cannot read property %)") \
T(NotAsyncIterable, "% is not async iterable") \
T(NotPropertyName, "% is not a valid property name") \
T(NotTypedArray, "this is not a typed array.") \
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 2c04430509..ee39c524f6 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -41,6 +41,7 @@
#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/mips/assembler-mips-inl.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -248,6 +249,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
@@ -259,6 +267,7 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
@@ -270,6 +279,11 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
+ case HeapObjectRequest::kStringConstant:
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
@@ -2321,6 +2335,16 @@ void Assembler::sc(Register rd, const MemOperand& rs) {
}
}
+void Assembler::llwp(Register rd, Register rt, Register base) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL3, base, rt, rd, 1, LL_R6);
+}
+
+void Assembler::scwp(Register rd, Register rt, Register base) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL3, base, rt, rd, 1, SC_R6);
+}
+
void Assembler::lui(Register rd, int32_t j) {
DCHECK(is_uint16(j) || is_int16(j));
GenInstrImmediate(LUI, zero_reg, rd, j);
@@ -3873,17 +3897,11 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
- if (!RelocInfo::IsNone(rinfo.rmode())) {
- if (options().disable_reloc_info_for_patching) return;
- if (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code()) {
- return;
- }
- DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
- reloc_info_writer.Write(&rinfo);
- }
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
+ reloc_info_writer.Write(&rinfo);
}
void Assembler::BlockTrampolinePoolFor(int instructions) {
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 5a51522940..d535f1e923 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -304,7 +304,6 @@ typedef FPURegister DoubleRegister;
DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
#undef DECLARE_DOUBLE_REGISTER
-constexpr DoubleRegister no_freg = DoubleRegister::no_reg();
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// SIMD registers.
@@ -385,7 +384,7 @@ constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
// Machine instruction Operands.
// Class Operand represents a shifter operand in data processing instructions.
-class Operand BASE_EMBEDDED {
+class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int32_t immediate,
@@ -408,6 +407,7 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Register.
V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
@@ -883,6 +883,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void ll(Register rd, const MemOperand& rs);
void sc(Register rd, const MemOperand& rs);
+ void llwp(Register rd, Register rt, Register base);
+ void scwp(Register rd, Register rt, Register base);
// ---------PC-Relative-instructions-----------
@@ -2212,8 +2214,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
friend class EnsureSpace;
};
-
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
explicit inline EnsureSpace(Assembler* assembler);
};
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 3da00d4748..1650458d19 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -119,7 +119,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ li(t0, ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ LoadRoot(v0, Heap::kExceptionRootIndex);
+ __ LoadRoot(v0, RootIndex::kException);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
@@ -415,7 +415,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
stack_space_offset != kInvalidStackOffset);
// Check if the function scheduled an exception.
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t0, RootIndex::kTheHoleValue);
__ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ lw(t1, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
@@ -466,13 +466,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
// new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// call data.
__ Push(call_data);
Register scratch = call_data;
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
// Push return value and default return value.
__ Push(scratch, scratch);
__ li(scratch, ExternalReference::isolate_address(masm->isolate()));
@@ -543,7 +543,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
__ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
__ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
kPointerSize));
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index d6b47990f8..86546668db 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -7,7 +7,6 @@
#include <memory>
#include "src/codegen.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/mips/simulator-mips.h"
@@ -18,18 +17,18 @@ namespace internal {
#if defined(V8_HOST_ARCH_MIPS)
-MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
- MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
@@ -541,26 +540,28 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ nop();
}
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
return nullptr;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
__ MovFromFloatParameter(f12);
__ sqrt_d(f0, f12);
@@ -568,12 +569,13 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 0049f9fa91..e7ec95b7ac 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1546,6 +1546,16 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
break;
}
+ case LL_R6: {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Format(instr, "llwp 'rd, 'rt, 0('rs)");
+ break;
+ }
+ case SC_R6: {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Format(instr, "scwp 'rd, 'rt, 0('rs)");
+ break;
+ }
default: {
sa >>= kBp2Bits;
switch (sa) {
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 31b5f82895..1ece4812a3 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -88,9 +88,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
- // a2 : arguments list (FixedArray)
// t0 : arguments list length (untagged)
- Register registers[] = {a1, a0, a2, t0};
+ // a2 : arguments list (FixedArray)
+ Register registers[] = {a1, a0, t0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -125,9 +125,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
// a3 : the new target
- // a2 : arguments list (FixedArray)
// t0 : arguments list length (untagged)
- Register registers[] = {a1, a3, a0, a2, t0};
+ // a2 : arguments list (FixedArray)
+ Register registers[] = {a1, a3, a0, t0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -194,7 +194,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // JSFunction
@@ -238,10 +238,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
- a3, // new target
+ t4, // address of the first argument
a1, // constructor to call
- a2, // allocation site feedback if available, undefined otherwise.
- t4 // address of the first argument
+ a3, // new target
+ a2, // allocation site feedback if available, undefined otherwise
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 35a9959ddb..c10602df48 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -127,11 +127,11 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
lw(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
@@ -273,8 +273,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -286,7 +284,6 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter);
Pop(object_parameter);
- li(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -1347,6 +1344,11 @@ void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
li(dst, Operand(value), mode);
}
+void TurboAssembler::li(Register dst, const StringConstantBase* string,
+ LiFlags mode) {
+ li(dst, Operand::EmbeddedStringConstant(string), mode);
+}
+
void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1461,6 +1463,26 @@ void TurboAssembler::SubPair(Register dst_low, Register dst_high,
Move(dst_low, scratch1);
}
+void TurboAssembler::AndPair(Register dst_low, Register dst_high,
+ Register left_low, Register left_high,
+ Register right_low, Register right_high) {
+ And(dst_low, left_low, right_low);
+ And(dst_high, left_high, right_high);
+}
+
+void TurboAssembler::OrPair(Register dst_low, Register dst_high,
+ Register left_low, Register left_high,
+ Register right_low, Register right_high) {
+ Or(dst_low, left_low, right_low);
+ Or(dst_high, left_high, right_high);
+}
+void TurboAssembler::XorPair(Register dst_low, Register dst_high,
+ Register left_low, Register left_high,
+ Register right_low, Register right_high) {
+ Xor(dst_low, left_low, right_low);
+ Xor(dst_high, left_high, right_high);
+}
+
void TurboAssembler::MulPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high,
@@ -2814,7 +2836,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
}
void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
- Heap::RootListIndex index, BranchDelaySlot bdslot) {
+ RootIndex index, BranchDelaySlot bdslot) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@@ -3623,8 +3645,8 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
- LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
lw(destination,
FieldMemOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
@@ -4326,7 +4348,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ LoadRoot(a3, RootIndex::kUndefinedValue);
}
Label done;
@@ -5032,7 +5054,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
- LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
Branch(&done_checking, eq, object, Operand(scratch));
GetObjectType(object, scratch, scratch);
Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index f6c371923f..ae3138f85f 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -118,6 +118,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -223,7 +226,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
- void Branch(Label* L, Condition cond, Register rs, Heap::RootListIndex index,
+ void Branch(Label* L, Condition cond, Register rs, RootIndex index,
BranchDelaySlot bdslot = PROTECT);
// Load int32 in the rd register.
@@ -233,6 +236,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, const StringConstantBase* string,
+ LiFlags mode = OPTIMIZE_SIZE);
void LoadFromConstantsTable(Register destination,
int constant_index) override;
@@ -577,6 +582,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register left_high, Register right_low, Register right_high,
Register scratch1, Register scratch2);
+ void AndPair(Register dst_low, Register dst_high, Register left_low,
+ Register left_high, Register right_low, Register right_high);
+
+ void OrPair(Register dst_low, Register dst_high, Register left_low,
+ Register left_high, Register right_low, Register right_high);
+
+ void XorPair(Register dst_low, Register dst_high, Register left_low,
+ Register left_high, Register right_low, Register right_high);
+
void MulPair(Register dst_low, Register dst_high, Register left_low,
Register left_high, Register right_low, Register right_high,
Register scratch1, Register scratch2);
@@ -793,8 +807,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Func GetLabelFunction);
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index) override;
- void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond,
+ void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index, Condition cond,
Register src1, const Operand& src2);
// If the value is a NaN, canonicalize the value else, do nothing.
@@ -901,10 +915,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -912,7 +930,7 @@ class MacroAssembler : public TurboAssembler {
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
- void PushRoot(Heap::RootListIndex index) {
+ void PushRoot(RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@@ -920,7 +938,7 @@ class MacroAssembler : public TurboAssembler {
}
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@@ -928,8 +946,7 @@ class MacroAssembler : public TurboAssembler {
}
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal) {
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 13f5f38f0d..b759176db3 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -4240,6 +4240,21 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
SetResult(rd_reg(), alu_out);
break;
}
+ case LL_R6: {
+ // LLWP/SCWP sequence cannot be simulated properly
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ set_register(rd_reg(), ReadW(rs() + 4, instr_.instr()));
+ set_register(rt(), ReadW(rs(), instr_.instr()));
+ break;
+ }
+ case SC_R6: {
+ // LLWP/SCWP sequence cannot be simulated properly
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ WriteW(rs() + 4, rd_reg(), instr_.instr());
+ WriteW(rs(), rt(), instr_.instr());
+ set_register(rt(), 1);
+ break;
+ }
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 4abd272a5e..3449537626 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -40,6 +40,7 @@
#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/mips64/assembler-mips64-inl.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -226,6 +227,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
@@ -238,6 +246,7 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
@@ -249,6 +258,11 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
+ case HeapObjectRequest::kStringConstant:
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
@@ -1819,35 +1833,25 @@ void Assembler::bnezc(Register rs, int32_t offset) {
void Assembler::j(int64_t target) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
- BlockTrampolinePoolFor(1); // For associated delay slot.
+ // Deprecated. Use PC-relative jumps instead.
+ UNREACHABLE();
}
void Assembler::j(Label* target) {
- uint64_t imm = jump_offset(target);
- if (target->is_bound()) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrJump(static_cast<Opcode>(kJRawMark),
- static_cast<uint32_t>(imm >> 2) & kImm26Mask);
- BlockTrampolinePoolFor(1); // For associated delay slot.
- } else {
- j(imm);
- }
+ // Deprecated. Use PC-relative jumps instead.
+ UNREACHABLE();
}
void Assembler::jal(Label* target) {
- uint64_t imm = jump_offset(target);
- if (target->is_bound()) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrJump(static_cast<Opcode>(kJalRawMark),
- static_cast<uint32_t>(imm >> 2) & kImm26Mask);
- BlockTrampolinePoolFor(1); // For associated delay slot.
- } else {
- jal(imm);
- }
+ // Deprecated. Use PC-relative jumps instead.
+ UNREACHABLE();
+}
+
+void Assembler::jal(int64_t target) {
+ // Deprecated. Use PC-relative jumps instead.
+ UNREACHABLE();
}
@@ -1862,13 +1866,6 @@ void Assembler::jr(Register rs) {
}
-void Assembler::jal(int64_t target) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
- BlockTrampolinePoolFor(1); // For associated delay slot.
-}
-
-
void Assembler::jalr(Register rs, Register rd) {
DCHECK(rs.code() != rd.code());
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4218,18 +4215,11 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
- if (!RelocInfo::IsNone(rinfo.rmode())) {
- if (options().disable_reloc_info_for_patching) return;
- // Don't record external references unless the heap will be serialized.
- if (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code()) {
- return;
- }
- DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
- reloc_info_writer.Write(&rinfo);
- }
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
+ reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 868882eb4a..814f3eacba 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -309,7 +309,6 @@ typedef FPURegister DoubleRegister;
DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
#undef DECLARE_DOUBLE_REGISTER
-constexpr DoubleRegister no_freg = DoubleRegister::no_reg();
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// SIMD registers.
@@ -392,7 +391,7 @@ constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
// Class Operand represents a shifter operand in data processing instructions.
-class Operand BASE_EMBEDDED {
+class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int64_t immediate,
@@ -415,6 +414,7 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Register.
V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
@@ -820,16 +820,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version.
- // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
- void j(int64_t target);
- void jal(int64_t target);
- void j(Label* target);
- void jal(Label* target);
void jalr(Register rs, Register rd = ra);
void jr(Register target);
void jic(Register rt, int16_t offset);
void jialc(Register rt, int16_t offset);
+ // Following instructions are deprecated and require 256 MB
+ // code alignment. Use PC-relative instructions instead.
+ void j(int64_t target);
+ void jal(int64_t target);
+ void j(Label* target);
+ void jal(Label* target);
// -------Data-processing-instructions---------
@@ -2279,8 +2280,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
friend class EnsureSpace;
};
-
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
explicit inline EnsureSpace(Assembler* assembler);
};
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index cd02bea0f1..bb51ac7cf3 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -118,7 +118,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ li(a4, ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate));
__ Sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
- __ LoadRoot(v0, Heap::kExceptionRootIndex);
+ __ LoadRoot(v0, RootIndex::kException);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
@@ -418,7 +418,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
stack_space_offset != kInvalidStackOffset);
// Check if the function scheduled an exception.
- __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
__ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ Ld(a5, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
@@ -469,13 +469,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
// new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// call data.
__ Push(call_data);
Register scratch = call_data;
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
// Push return value and default return value.
__ Push(scratch, scratch);
__ li(scratch, ExternalReference::isolate_address(masm->isolate()));
@@ -548,7 +548,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
__ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
__ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
kPointerSize));
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 81a6cd4342..ac143dd3e5 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -7,7 +7,6 @@
#include <memory>
#include "src/codegen.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/mips64/simulator-mips64.h"
@@ -18,18 +17,17 @@ namespace internal {
#if defined(V8_HOST_ARCH_MIPS)
-MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
- MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
-
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
@@ -542,26 +540,28 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ nop();
}
CodeDesc desc;
- masm.GetCode(isolte, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
return nullptr;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
__ MovFromFloatParameter(f12);
__ sqrt_d(f0, f12);
@@ -569,12 +569,13 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 8f4fdcc905..a6c7bfa4ba 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -88,9 +88,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
- // a2 : arguments list (FixedArray)
// a4 : arguments list length (untagged)
- Register registers[] = {a1, a0, a2, a4};
+ // a2 : arguments list (FixedArray)
+ Register registers[] = {a1, a0, a4, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -125,9 +125,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
// a3 : the new target
- // a2 : arguments list (FixedArray)
// a4 : arguments list length (untagged)
- Register registers[] = {a1, a3, a0, a2, a4};
+ // a2 : arguments list (FixedArray)
+ Register registers[] = {a1, a3, a0, a4, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -194,7 +194,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // JSFunction
@@ -238,10 +238,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
- a3, // new target
+ a4, // address of the first argument
a1, // constructor to call
- a2, // allocation site feedback if available, undefined otherwise.
- a4 // address of the first argument
+ a3, // new target
+ a2, // allocation site feedback if available, undefined otherwise
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index b55b47a2ed..dd3b51eba5 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -127,11 +127,11 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
Ld(destination, MemOperand(s6, RootRegisterOffset(index)));
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
@@ -273,8 +273,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -286,7 +284,6 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter);
Pop(object_parameter);
- li(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -1573,6 +1570,11 @@ void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
li(dst, Operand(value), mode);
}
+void TurboAssembler::li(Register dst, const StringConstantBase* string,
+ LiFlags mode) {
+ li(dst, Operand::EmbeddedStringConstant(string), mode);
+}
+
static inline int InstrCountForLiLower32Bit(int64_t value) {
if (!is_int16(static_cast<int32_t>(value)) && (value & kUpper16MaskOf64) &&
(value & kImm16Mask)) {
@@ -3311,7 +3313,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
}
void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
- Heap::RootListIndex index, BranchDelaySlot bdslot) {
+ RootIndex index, BranchDelaySlot bdslot) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@@ -4124,8 +4126,8 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
- LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ld(destination,
FieldMemOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
@@ -4708,7 +4710,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ LoadRoot(a3, RootIndex::kUndefinedValue);
}
Label done;
@@ -5434,7 +5436,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
- LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
Branch(&done_checking, eq, object, Operand(scratch));
GetObjectType(object, scratch, scratch);
Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 7dd5761571..9160b26e01 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -135,6 +135,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -240,7 +243,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
- void Branch(Label* L, Condition cond, Register rs, Heap::RootListIndex index,
+ void Branch(Label* L, Condition cond, Register rs, RootIndex index,
BranchDelaySlot bdslot = PROTECT);
static int InstrCountForLi64Bit(int64_t value);
@@ -251,8 +254,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(j), mode);
}
+ // inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ // li(rd, Operand(static_cast<int64_t>(j)), mode);
+ // }
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, const StringConstantBase* string,
+ LiFlags mode = OPTIMIZE_SIZE);
void LoadFromConstantsTable(Register destination,
int constant_index) override;
@@ -763,8 +771,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Func GetLabelFunction);
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index) override;
- void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond,
+ void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index, Condition cond,
Register src1, const Operand& src2);
// If the value is a NaN, canonicalize the value else, do nothing.
@@ -914,10 +922,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -927,7 +939,7 @@ class MacroAssembler : public TurboAssembler {
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
- void PushRoot(Heap::RootListIndex index) {
+ void PushRoot(RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@@ -935,7 +947,7 @@ class MacroAssembler : public TurboAssembler {
}
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@@ -943,8 +955,7 @@ class MacroAssembler : public TurboAssembler {
}
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal) {
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index 122cdde5bf..e91de2bac3 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -86,6 +86,19 @@ void BodyDescriptorBase::IterateMaybeWeakPointer(HeapObject* obj, int offset,
v->VisitPointer(obj, HeapObject::RawMaybeWeakField(obj, offset));
}
+template <typename ObjectVisitor>
+DISABLE_CFI_PERF void BodyDescriptorBase::IterateCustomWeakPointers(
+ HeapObject* obj, int start_offset, int end_offset, ObjectVisitor* v) {
+ v->VisitCustomWeakPointers(obj, HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IterateCustomWeakPointer(HeapObject* obj, int offset,
+ ObjectVisitor* v) {
+ v->VisitCustomWeakPointer(obj, HeapObject::RawField(obj, offset));
+}
+
class JSObject::BodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
@@ -149,8 +162,7 @@ class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
}
};
-template <bool includeWeakNext>
-class AllocationSite::BodyDescriptorImpl final : public BodyDescriptorBase {
+class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(AllocationSite::kCommonPointerFieldEndOffset ==
AllocationSite::kPretenureDataOffset);
@@ -165,8 +177,7 @@ class AllocationSite::BodyDescriptorImpl final : public BodyDescriptorBase {
return true;
}
// check for weak_next offset
- if (includeWeakNext &&
- map->instance_size() == AllocationSite::kSizeWithWeakNext &&
+ if (map->instance_size() == AllocationSite::kSizeWithWeakNext &&
offset == AllocationSite::kWeakNextOffset) {
return true;
}
@@ -179,12 +190,12 @@ class AllocationSite::BodyDescriptorImpl final : public BodyDescriptorBase {
// Iterate over all the common pointer fields
IteratePointers(obj, AllocationSite::kStartOffset,
AllocationSite::kCommonPointerFieldEndOffset, v);
- // Skip PretenureDataOffset and PretenureCreateCount which are Int32 fields
- // Visit weak_next only for full body descriptor and if it has weak_next
- // field
- if (includeWeakNext && object_size == AllocationSite::kSizeWithWeakNext)
- IteratePointers(obj, AllocationSite::kWeakNextOffset,
- AllocationSite::kSizeWithWeakNext, v);
+ // Skip PretenureDataOffset and PretenureCreateCount which are Int32 fields.
+ // Visit weak_next only if it has weak_next field.
+ if (object_size == AllocationSite::kSizeWithWeakNext) {
+ IterateCustomWeakPointers(obj, AllocationSite::kWeakNextOffset,
+ AllocationSite::kSizeWithWeakNext, v);
+ }
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -207,10 +218,8 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
- // Array buffers contain raw pointers that the GC does not know about. These
- // are stored at kBackStoreOffset and later, so we do not iterate over
- // those.
- IteratePointers(obj, kPropertiesOrHashOffset, kBackingStoreOffset, v);
+ // JSArrayBuffer instances contain raw data that the GC does not know about.
+ IteratePointers(obj, kPropertiesOrHashOffset, kByteLengthOffset, v);
IterateBodyImpl(map, obj, kSize, object_size, v);
}
@@ -219,6 +228,31 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class JSArrayBufferView::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kBufferOffset + kPointerSize == kByteOffsetOffset);
+ STATIC_ASSERT(kByteOffsetOffset + kUIntptrSize == kByteLengthOffset);
+ STATIC_ASSERT(kByteLengthOffset + kUIntptrSize == kHeaderSize);
+
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ if (offset < kByteOffsetOffset) return true;
+ if (offset < kHeaderSize) return false;
+ return IsValidSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ // JSArrayBufferView contains raw data that the GC does not know about.
+ IteratePointers(obj, kPropertiesOrHashOffset, kByteOffsetOffset, v);
+ IterateBodyImpl(map, obj, kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
template <typename Derived>
class SmallOrderedHashTable<Derived>::BodyDescriptor final
: public BodyDescriptorBase {
@@ -624,6 +658,49 @@ class DataHandler::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class Context::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return offset >= Context::kHeaderSize && offset < Context::kSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, Context::kHeaderSize,
+ Context::kHeaderSize + FIRST_WEAK_SLOT * kPointerSize, v);
+ IterateCustomWeakPointers(
+ obj, Context::kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
+ Context::kSize, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return Context::kSize;
+ }
+};
+
+class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return offset >= CodeDataContainer::kHeaderSize &&
+ offset < CodeDataContainer::kSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, CodeDataContainer::kHeaderSize,
+ CodeDataContainer::kPointerFieldsStrongEndOffset, v);
+ IterateCustomWeakPointers(
+ obj, CodeDataContainer::kPointerFieldsStrongEndOffset,
+ CodeDataContainer::kPointerFieldsWeakEndOffset, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return CodeDataContainer::kSize;
+ }
+};
+
template <typename Op, typename ReturnType, typename T1, typename T2,
typename T3, typename T4>
ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
@@ -663,6 +740,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case EPHEMERON_HASH_TABLE_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -707,8 +785,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_ARRAY_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
@@ -726,12 +802,17 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
case JS_INTL_LIST_FORMAT_TYPE:
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
+ case WASM_EXCEPTION_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
@@ -746,6 +827,10 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
p4);
case JS_ARRAY_BUFFER_TYPE:
return Op::template apply<JSArrayBuffer::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_DATA_VIEW_TYPE:
+ return Op::template apply<JSDataView::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_TYPED_ARRAY_TYPE:
+ return Op::template apply<JSTypedArray::BodyDescriptor>(p1, p2, p3, p4);
case JS_FUNCTION_TYPE:
return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3, p4);
case ODDBALL_TYPE:
@@ -808,7 +893,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case ALLOCATION_SITE_TYPE:
return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3, p4);
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
if (type == PROTOTYPE_INFO_TYPE) {
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h
index 6277f9d8bd..a1dc0f7ffa 100644
--- a/deps/v8/src/objects-body-descriptors.h
+++ b/deps/v8/src/objects-body-descriptors.h
@@ -26,7 +26,7 @@ namespace internal {
// template <typename ObjectVisitor>
// static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
// ObjectVisitor* v);
-class BodyDescriptorBase BASE_EMBEDDED {
+class BodyDescriptorBase {
public:
template <typename ObjectVisitor>
static inline void IteratePointers(HeapObject* obj, int start_offset,
@@ -37,6 +37,15 @@ class BodyDescriptorBase BASE_EMBEDDED {
ObjectVisitor* v);
template <typename ObjectVisitor>
+ static inline void IterateCustomWeakPointers(HeapObject* obj,
+ int start_offset, int end_offset,
+ ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IterateCustomWeakPointer(HeapObject* obj, int offset,
+ ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
static inline void IterateMaybeWeakPointers(HeapObject* obj, int start_offset,
int end_offset, ObjectVisitor* v);
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 3ce26f95c9..b4e50843a1 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -15,30 +15,38 @@
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-collator-inl.h"
-#endif // V8_INTL_SUPPORT
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator-inl.h"
+#include "src/objects/js-collator-inl.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/js-collection-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-date-time-format-inl.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/js-generator-inl.h"
-#include "src/objects/literal-objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-number-format-inl.h"
+#include "src/objects/js-plural-rules-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator-inl.h"
#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segmenter-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/microtask-inl.h"
+#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions.h"
@@ -90,10 +98,10 @@ void Object::VerifyPointer(Isolate* isolate, Object* p) {
void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject* p) {
HeapObject* heap_object;
- if (p->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (p->GetHeapObject(&heap_object)) {
HeapObject::VerifyHeapPointer(isolate, heap_object);
} else {
- CHECK(p->IsSmi() || p->IsClearedWeakHeapObject());
+ CHECK(p->IsSmi() || p->IsCleared());
}
}
@@ -115,7 +123,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
CHECK(map()->IsMap());
switch (map()->instance_type()) {
-#define STRING_TYPE_CASE(TYPE, size, name, camel_name) case TYPE:
+#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
STRING_TYPE_LIST(STRING_TYPE_CASE)
#undef STRING_TYPE_CASE
String::cast(this)->StringVerify(isolate);
@@ -155,6 +163,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case FIXED_ARRAY_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -222,6 +231,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case WASM_EXCEPTION_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_TABLE_TYPE:
@@ -356,25 +366,37 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
CodeDataContainer::cast(this)->CodeDataContainerVerify(isolate);
break;
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ JSV8BreakIterator::cast(this)->JSV8BreakIteratorVerify(isolate);
+ break;
case JS_INTL_COLLATOR_TYPE:
JSCollator::cast(this)->JSCollatorVerify(isolate);
break;
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ JSDateTimeFormat::cast(this)->JSDateTimeFormatVerify(isolate);
+ break;
case JS_INTL_LIST_FORMAT_TYPE:
JSListFormat::cast(this)->JSListFormatVerify(isolate);
break;
case JS_INTL_LOCALE_TYPE:
JSLocale::cast(this)->JSLocaleVerify(isolate);
break;
+ case JS_INTL_NUMBER_FORMAT_TYPE:
+ JSNumberFormat::cast(this)->JSNumberFormatVerify(isolate);
+ break;
case JS_INTL_PLURAL_RULES_TYPE:
JSPluralRules::cast(this)->JSPluralRulesVerify(isolate);
break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatVerify(isolate);
break;
+ case JS_INTL_SEGMENTER_TYPE:
+ JSSegmenter::cast(this)->JSSegmenterVerify(isolate);
+ break;
#endif // V8_INTL_SUPPORT
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
Name::cast(this)->Name##Verify(isolate); \
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
@@ -434,8 +456,7 @@ void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) {
CHECK(IsFeedbackVector());
MaybeObject* code = optimized_code_weak_or_smi();
MaybeObject::VerifyMaybeObjectPointer(isolate, code);
- CHECK(code->IsSmi() || code->IsClearedWeakHeapObject() ||
- code->IsWeakHeapObject());
+ CHECK(code->IsSmi() || code->IsWeakOrCleared());
}
template <class Traits>
@@ -703,7 +724,7 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
// Check that properties with private symbols names are non-enumerable.
for (int descriptor = 0; descriptor < nof_descriptors; descriptor++) {
- Object* key = get(ToKeyIndex(descriptor))->ToObject();
+ Object* key = get(ToKeyIndex(descriptor))->cast<Object>();
// number_of_descriptors() may be out of sync with the actual descriptors
// written during descriptor array construction.
if (key->IsUndefined(isolate)) continue;
@@ -714,13 +735,14 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
MaybeObject* value = get(ToValueIndex(descriptor));
HeapObject* heap_object;
if (details.location() == kField) {
- CHECK(value == MaybeObject::FromObject(FieldType::None()) ||
- value == MaybeObject::FromObject(FieldType::Any()) ||
- value->IsClearedWeakHeapObject() ||
- (value->ToWeakHeapObject(&heap_object) && heap_object->IsMap()));
+ CHECK(
+ value == MaybeObject::FromObject(FieldType::None()) ||
+ value == MaybeObject::FromObject(FieldType::Any()) ||
+ value->IsCleared() ||
+ (value->GetHeapObjectIfWeak(&heap_object) && heap_object->IsMap()));
} else {
- CHECK(!value->IsWeakOrClearedHeapObject());
- CHECK(!value->ToObject()->IsMap());
+ CHECK(!value->IsWeakOrCleared());
+ CHECK(!value->cast<Object>()->IsMap());
}
}
}
@@ -1269,6 +1291,13 @@ void PromiseReactionJobTask::PromiseReactionJobTaskVerify(Isolate* isolate) {
promise_or_capability()->IsPromiseCapability());
}
+void MicrotaskQueue::MicrotaskQueueVerify(Isolate* isolate) {
+ CHECK(IsMicrotaskQueue());
+ VerifyHeapPointer(isolate, queue());
+ VerifySmiField(kPendingMicrotaskCountOffset);
+ CHECK_LE(pending_microtask_count(), queue()->length());
+}
+
void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskVerify(
Isolate* isolate) {
CHECK(IsPromiseFulfillReactionJobTask());
@@ -1444,9 +1473,6 @@ void JSProxy::JSProxyVerify(Isolate* isolate) {
void JSArrayBuffer::JSArrayBufferVerify(Isolate* isolate) {
CHECK(IsJSArrayBuffer());
JSObjectVerify(isolate);
- VerifyPointer(isolate, byte_length());
- CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber() ||
- byte_length()->IsUndefined(isolate));
}
void JSArrayBufferView::JSArrayBufferViewVerify(Isolate* isolate) {
@@ -1455,14 +1481,8 @@ void JSArrayBufferView::JSArrayBufferViewVerify(Isolate* isolate) {
VerifyPointer(isolate, buffer());
CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined(isolate) ||
buffer() == Smi::kZero);
-
- VerifyPointer(isolate, raw_byte_offset());
- CHECK(raw_byte_offset()->IsSmi() || raw_byte_offset()->IsHeapNumber() ||
- raw_byte_offset()->IsUndefined(isolate));
-
- VerifyPointer(isolate, raw_byte_length());
- CHECK(raw_byte_length()->IsSmi() || raw_byte_length()->IsHeapNumber() ||
- raw_byte_length()->IsUndefined(isolate));
+ CHECK_LE(byte_length(), JSArrayBuffer::kMaxByteLength);
+ CHECK_LE(byte_offset(), JSArrayBuffer::kMaxByteLength);
}
void JSTypedArray::JSTypedArrayVerify(Isolate* isolate) {
@@ -1575,7 +1595,7 @@ void PrototypeUsers::Verify(WeakArrayList* array) {
while (empty_slot != kNoEmptySlotsMarker) {
CHECK_GT(empty_slot, 0);
CHECK_LT(empty_slot, array->length());
- empty_slot = Smi::ToInt(array->Get(empty_slot)->ToSmi());
+ empty_slot = Smi::ToInt(array->Get(empty_slot)->cast<Smi>());
++empty_slots_count;
}
@@ -1585,8 +1605,8 @@ void PrototypeUsers::Verify(WeakArrayList* array) {
for (int i = kFirstIndex; i < array->length(); ++i) {
HeapObject* heap_object;
MaybeObject* object = array->Get(i);
- if ((object->ToWeakHeapObject(&heap_object) && heap_object->IsMap()) ||
- object->IsClearedWeakHeapObject()) {
+ if ((object->GetHeapObjectIfWeak(&heap_object) && heap_object->IsMap()) ||
+ object->IsCleared()) {
++weak_maps_count;
} else {
CHECK(object->IsSmi());
@@ -1800,9 +1820,8 @@ void Script::ScriptVerify(Isolate* isolate) {
for (int i = 0; i < shared_function_infos()->length(); ++i) {
MaybeObject* maybe_object = shared_function_infos()->Get(i);
HeapObject* heap_object;
- CHECK(maybe_object->IsWeakHeapObject() ||
- maybe_object->IsClearedWeakHeapObject() ||
- (maybe_object->ToStrongHeapObject(&heap_object) &&
+ CHECK(maybe_object->IsWeak() || maybe_object->IsCleared() ||
+ (maybe_object->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsUndefined(isolate)));
}
}
@@ -1813,12 +1832,11 @@ void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
for (int i = 0; i < length(); i++) {
MaybeObject* e = WeakFixedArray::Get(i);
HeapObject* heap_object;
- if (e->ToWeakHeapObject(&heap_object)) {
+ if (e->GetHeapObjectIfWeak(&heap_object)) {
Map::cast(heap_object)->DictionaryMapVerify(isolate);
} else {
- CHECK(e->IsClearedWeakHeapObject() ||
- (e->ToStrongHeapObject(&heap_object) &&
- heap_object->IsUndefined(isolate)));
+ CHECK(e->IsCleared() || (e->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object->IsUndefined(isolate)));
}
}
}
@@ -1871,18 +1889,37 @@ void InterpreterData::InterpreterDataVerify(Isolate* isolate) {
}
#ifdef V8_INTL_SUPPORT
+void JSV8BreakIterator::JSV8BreakIteratorVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kLocaleOffset);
+ VerifyObjectField(isolate, kTypeOffset);
+ VerifyObjectField(isolate, kBreakIteratorOffset);
+ VerifyObjectField(isolate, kUnicodeStringOffset);
+ VerifyObjectField(isolate, kBoundAdoptTextOffset);
+ VerifyObjectField(isolate, kBoundFirstOffset);
+ VerifyObjectField(isolate, kBoundNextOffset);
+ VerifyObjectField(isolate, kBoundCurrentOffset);
+ VerifyObjectField(isolate, kBoundBreakTypeOffset);
+}
+
void JSCollator::JSCollatorVerify(Isolate* isolate) {
CHECK(IsJSCollator());
JSObjectVerify(isolate);
VerifyObjectField(isolate, kICUCollatorOffset);
- VerifyObjectField(isolate, kFlagsOffset);
VerifyObjectField(isolate, kBoundCompareOffset);
}
+void JSDateTimeFormat::JSDateTimeFormatVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kICULocaleOffset);
+ VerifyObjectField(isolate, kICUSimpleDateFormatOffset);
+ VerifyObjectField(isolate, kBoundFormatOffset);
+}
+
void JSListFormat::JSListFormatVerify(Isolate* isolate) {
JSObjectVerify(isolate);
VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kFormatterOffset);
+ VerifyObjectField(isolate, kICUFormatterOffset);
VerifyObjectField(isolate, kFlagsOffset);
}
@@ -1894,14 +1931,21 @@ void JSLocale::JSLocaleVerify(Isolate* isolate) {
VerifyObjectField(isolate, kBaseNameOffset);
VerifyObjectField(isolate, kLocaleOffset);
// Unicode extension fields.
+ VerifyObjectField(isolate, kFlagsOffset);
VerifyObjectField(isolate, kCalendarOffset);
- VerifyObjectField(isolate, kCaseFirstOffset);
VerifyObjectField(isolate, kCollationOffset);
- VerifyObjectField(isolate, kHourCycleOffset);
- VerifyObjectField(isolate, kNumericOffset);
VerifyObjectField(isolate, kNumberingSystemOffset);
}
+void JSNumberFormat::JSNumberFormatVerify(Isolate* isolate) {
+ CHECK(IsJSNumberFormat());
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kLocaleOffset);
+ VerifyObjectField(isolate, kICUNumberFormatOffset);
+ VerifyObjectField(isolate, kBoundFormatOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
+}
+
void JSPluralRules::JSPluralRulesVerify(Isolate* isolate) {
CHECK(IsJSPluralRules());
JSObjectVerify(isolate);
@@ -1914,7 +1958,14 @@ void JSPluralRules::JSPluralRulesVerify(Isolate* isolate) {
void JSRelativeTimeFormat::JSRelativeTimeFormatVerify(Isolate* isolate) {
JSObjectVerify(isolate);
VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kFormatterOffset);
+ VerifyObjectField(isolate, kICUFormatterOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
+}
+
+void JSSegmenter::JSSegmenterVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kLocaleOffset);
+ VerifyObjectField(isolate, kICUBreakIteratorOffset);
VerifyObjectField(isolate, kFlagsOffset);
}
#endif // V8_INTL_SUPPORT
@@ -2119,8 +2170,8 @@ bool CanLeak(Object* obj, Heap* heap) {
if (obj->IsContext()) return true;
if (obj->IsMap()) {
Map* map = Map::cast(obj);
- for (int i = 0; i < Heap::kStrongRootListLength; i++) {
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ for (RootIndex root_index = RootIndex::kFirstStrongRoot;
+ root_index <= RootIndex::kLastStrongRoot; ++root_index) {
if (map == heap->root(root_index)) return false;
}
return true;
diff --git a/deps/v8/src/objects-definitions.h b/deps/v8/src/objects-definitions.h
index 3b0a379632..8b8d50f2a7 100644
--- a/deps/v8/src/objects-definitions.h
+++ b/deps/v8/src/objects-definitions.h
@@ -38,173 +38,177 @@ namespace internal {
// NOTE: List had to be split into two, because of conditional item(s) from
// INTL namespace. They can't just be appended to the end, because of the
// checks we do in tests (expecting JS_FUNCTION_TYPE to be last).
-#define INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
- V(INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(STRING_TYPE) \
- V(CONS_STRING_TYPE) \
- V(EXTERNAL_STRING_TYPE) \
- V(SLICED_STRING_TYPE) \
- V(THIN_STRING_TYPE) \
- V(ONE_BYTE_STRING_TYPE) \
- V(CONS_ONE_BYTE_STRING_TYPE) \
- V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(SLICED_ONE_BYTE_STRING_TYPE) \
- V(THIN_ONE_BYTE_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- \
- V(SYMBOL_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(BIGINT_TYPE) \
- V(ODDBALL_TYPE) \
- \
- V(MAP_TYPE) \
- V(CODE_TYPE) \
- V(MUTABLE_HEAP_NUMBER_TYPE) \
- V(FOREIGN_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(BYTECODE_ARRAY_TYPE) \
- V(FREE_SPACE_TYPE) \
- \
- V(FIXED_INT8_ARRAY_TYPE) \
- V(FIXED_UINT8_ARRAY_TYPE) \
- V(FIXED_INT16_ARRAY_TYPE) \
- V(FIXED_UINT16_ARRAY_TYPE) \
- V(FIXED_INT32_ARRAY_TYPE) \
- V(FIXED_UINT32_ARRAY_TYPE) \
- V(FIXED_FLOAT32_ARRAY_TYPE) \
- V(FIXED_FLOAT64_ARRAY_TYPE) \
- V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
- V(FIXED_BIGINT64_ARRAY_TYPE) \
- V(FIXED_BIGUINT64_ARRAY_TYPE) \
- \
- V(FIXED_DOUBLE_ARRAY_TYPE) \
- V(FEEDBACK_METADATA_TYPE) \
- V(FILLER_TYPE) \
- \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(ACCESSOR_INFO_TYPE) \
- V(ACCESSOR_PAIR_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
- V(ASYNC_GENERATOR_REQUEST_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(INTERPRETER_DATA_TYPE) \
- V(MODULE_INFO_ENTRY_TYPE) \
- V(MODULE_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(PROMISE_CAPABILITY_TYPE) \
- V(PROMISE_REACTION_TYPE) \
- V(PROTOTYPE_INFO_TYPE) \
- V(SCRIPT_TYPE) \
- V(STACK_FRAME_INFO_TYPE) \
- V(TUPLE2_TYPE) \
- V(TUPLE3_TYPE) \
- V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
- V(WASM_DEBUG_INFO_TYPE) \
- V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
- \
- V(CALLABLE_TASK_TYPE) \
- V(CALLBACK_TASK_TYPE) \
- V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
- V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
- \
- V(ALLOCATION_SITE_TYPE) \
- \
- V(FIXED_ARRAY_TYPE) \
- V(OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
- V(HASH_TABLE_TYPE) \
- V(ORDERED_HASH_MAP_TYPE) \
- V(ORDERED_HASH_SET_TYPE) \
- V(NAME_DICTIONARY_TYPE) \
- V(GLOBAL_DICTIONARY_TYPE) \
- V(NUMBER_DICTIONARY_TYPE) \
- V(SIMPLE_NUMBER_DICTIONARY_TYPE) \
- V(STRING_TABLE_TYPE) \
- V(EPHEMERON_HASH_TABLE_TYPE) \
- V(SCOPE_INFO_TYPE) \
- V(SCRIPT_CONTEXT_TABLE_TYPE) \
- \
- V(BLOCK_CONTEXT_TYPE) \
- V(CATCH_CONTEXT_TYPE) \
- V(DEBUG_EVALUATE_CONTEXT_TYPE) \
- V(EVAL_CONTEXT_TYPE) \
- V(FUNCTION_CONTEXT_TYPE) \
- V(MODULE_CONTEXT_TYPE) \
- V(NATIVE_CONTEXT_TYPE) \
- V(SCRIPT_CONTEXT_TYPE) \
- V(WITH_CONTEXT_TYPE) \
- \
- V(WEAK_FIXED_ARRAY_TYPE) \
- V(DESCRIPTOR_ARRAY_TYPE) \
- V(TRANSITION_ARRAY_TYPE) \
- \
- V(CALL_HANDLER_INFO_TYPE) \
- V(CELL_TYPE) \
- V(CODE_DATA_CONTAINER_TYPE) \
- V(FEEDBACK_CELL_TYPE) \
- V(FEEDBACK_VECTOR_TYPE) \
- V(LOAD_HANDLER_TYPE) \
- V(PRE_PARSED_SCOPE_DATA_TYPE) \
- V(PROPERTY_ARRAY_TYPE) \
- V(PROPERTY_CELL_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(SMALL_ORDERED_HASH_MAP_TYPE) \
- V(SMALL_ORDERED_HASH_SET_TYPE) \
- V(STORE_HANDLER_TYPE) \
- V(UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
- V(UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
- V(WEAK_ARRAY_LIST_TYPE) \
- \
- V(JS_PROXY_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_MODULE_NAMESPACE_TYPE) \
- V(JS_SPECIAL_API_OBJECT_TYPE) \
- V(JS_VALUE_TYPE) \
- V(JS_API_OBJECT_TYPE) \
- V(JS_OBJECT_TYPE) \
- \
- V(JS_ARGUMENTS_TYPE) \
- V(JS_ARRAY_BUFFER_TYPE) \
- V(JS_ARRAY_ITERATOR_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
- V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_DATE_TYPE) \
- V(JS_ERROR_TYPE) \
- V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_MAP_TYPE) \
- V(JS_MAP_KEY_ITERATOR_TYPE) \
- V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_MAP_VALUE_ITERATOR_TYPE) \
- V(JS_MESSAGE_OBJECT_TYPE) \
- V(JS_PROMISE_TYPE) \
- V(JS_REGEXP_TYPE) \
- V(JS_REGEXP_STRING_ITERATOR_TYPE) \
- V(JS_SET_TYPE) \
- V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_SET_VALUE_ITERATOR_TYPE) \
- V(JS_STRING_ITERATOR_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_WEAK_SET_TYPE) \
- V(JS_TYPED_ARRAY_TYPE) \
+#define INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ V(INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(STRING_TYPE) \
+ V(CONS_STRING_TYPE) \
+ V(EXTERNAL_STRING_TYPE) \
+ V(SLICED_STRING_TYPE) \
+ V(THIN_STRING_TYPE) \
+ V(ONE_BYTE_STRING_TYPE) \
+ V(CONS_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SLICED_ONE_BYTE_STRING_TYPE) \
+ V(THIN_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(UNCACHED_EXTERNAL_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ \
+ V(SYMBOL_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
+ V(BIGINT_TYPE) \
+ V(ODDBALL_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(CODE_TYPE) \
+ V(MUTABLE_HEAP_NUMBER_TYPE) \
+ V(FOREIGN_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(BYTECODE_ARRAY_TYPE) \
+ V(FREE_SPACE_TYPE) \
+ \
+ V(FIXED_INT8_ARRAY_TYPE) \
+ V(FIXED_UINT8_ARRAY_TYPE) \
+ V(FIXED_INT16_ARRAY_TYPE) \
+ V(FIXED_UINT16_ARRAY_TYPE) \
+ V(FIXED_INT32_ARRAY_TYPE) \
+ V(FIXED_UINT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT64_ARRAY_TYPE) \
+ V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ V(FIXED_BIGINT64_ARRAY_TYPE) \
+ V(FIXED_BIGUINT64_ARRAY_TYPE) \
+ \
+ V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(FEEDBACK_METADATA_TYPE) \
+ V(FILLER_TYPE) \
+ \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(ACCESSOR_INFO_TYPE) \
+ V(ACCESSOR_PAIR_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(DEBUG_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(INTERPRETER_DATA_TYPE) \
+ V(MODULE_INFO_ENTRY_TYPE) \
+ V(MODULE_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(PROMISE_CAPABILITY_TYPE) \
+ V(PROMISE_REACTION_TYPE) \
+ V(PROTOTYPE_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ V(STACK_FRAME_INFO_TYPE) \
+ V(TUPLE2_TYPE) \
+ V(TUPLE3_TYPE) \
+ V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(WASM_DEBUG_INFO_TYPE) \
+ V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
+ \
+ V(CALLABLE_TASK_TYPE) \
+ V(CALLBACK_TASK_TYPE) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
+ \
+ V(MICROTASK_QUEUE_TYPE) \
+ \
+ V(ALLOCATION_SITE_TYPE) \
+ \
+ V(FIXED_ARRAY_TYPE) \
+ V(OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(HASH_TABLE_TYPE) \
+ V(ORDERED_HASH_MAP_TYPE) \
+ V(ORDERED_HASH_SET_TYPE) \
+ V(NAME_DICTIONARY_TYPE) \
+ V(GLOBAL_DICTIONARY_TYPE) \
+ V(NUMBER_DICTIONARY_TYPE) \
+ V(SIMPLE_NUMBER_DICTIONARY_TYPE) \
+ V(STRING_TABLE_TYPE) \
+ V(EPHEMERON_HASH_TABLE_TYPE) \
+ V(SCOPE_INFO_TYPE) \
+ V(SCRIPT_CONTEXT_TABLE_TYPE) \
+ \
+ V(AWAIT_CONTEXT_TYPE) \
+ V(BLOCK_CONTEXT_TYPE) \
+ V(CATCH_CONTEXT_TYPE) \
+ V(DEBUG_EVALUATE_CONTEXT_TYPE) \
+ V(EVAL_CONTEXT_TYPE) \
+ V(FUNCTION_CONTEXT_TYPE) \
+ V(MODULE_CONTEXT_TYPE) \
+ V(NATIVE_CONTEXT_TYPE) \
+ V(SCRIPT_CONTEXT_TYPE) \
+ V(WITH_CONTEXT_TYPE) \
+ \
+ V(WEAK_FIXED_ARRAY_TYPE) \
+ V(DESCRIPTOR_ARRAY_TYPE) \
+ V(TRANSITION_ARRAY_TYPE) \
+ \
+ V(CALL_HANDLER_INFO_TYPE) \
+ V(CELL_TYPE) \
+ V(CODE_DATA_CONTAINER_TYPE) \
+ V(FEEDBACK_CELL_TYPE) \
+ V(FEEDBACK_VECTOR_TYPE) \
+ V(LOAD_HANDLER_TYPE) \
+ V(PRE_PARSED_SCOPE_DATA_TYPE) \
+ V(PROPERTY_ARRAY_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ V(SMALL_ORDERED_HASH_MAP_TYPE) \
+ V(SMALL_ORDERED_HASH_SET_TYPE) \
+ V(STORE_HANDLER_TYPE) \
+ V(UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
+ V(UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
+ V(WEAK_ARRAY_LIST_TYPE) \
+ \
+ V(JS_PROXY_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_MODULE_NAMESPACE_TYPE) \
+ V(JS_SPECIAL_API_OBJECT_TYPE) \
+ V(JS_VALUE_TYPE) \
+ V(JS_API_OBJECT_TYPE) \
+ V(JS_OBJECT_TYPE) \
+ \
+ V(JS_ARGUMENTS_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_ARRAY_ITERATOR_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
+ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_DATE_TYPE) \
+ V(JS_ERROR_TYPE) \
+ V(JS_GENERATOR_OBJECT_TYPE) \
+ V(JS_MAP_TYPE) \
+ V(JS_MAP_KEY_ITERATOR_TYPE) \
+ V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_MAP_VALUE_ITERATOR_TYPE) \
+ V(JS_MESSAGE_OBJECT_TYPE) \
+ V(JS_PROMISE_TYPE) \
+ V(JS_REGEXP_TYPE) \
+ V(JS_REGEXP_STRING_ITERATOR_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_SET_VALUE_ITERATOR_TYPE) \
+ V(JS_STRING_ITERATOR_TYPE) \
+ V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
+ V(JS_TYPED_ARRAY_TYPE) \
V(JS_DATA_VIEW_TYPE)
#define INSTANCE_TYPE_LIST_AFTER_INTL(V) \
+ V(WASM_EXCEPTION_TYPE) \
V(WASM_GLOBAL_TYPE) \
V(WASM_INSTANCE_TYPE) \
V(WASM_MEMORY_TYPE) \
@@ -216,11 +220,15 @@ namespace internal {
#ifdef V8_INTL_SUPPORT
#define INSTANCE_TYPE_LIST(V) \
INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ V(JS_INTL_V8_BREAK_ITERATOR_TYPE) \
V(JS_INTL_COLLATOR_TYPE) \
+ V(JS_INTL_DATE_TIME_FORMAT_TYPE) \
V(JS_INTL_LIST_FORMAT_TYPE) \
V(JS_INTL_LOCALE_TYPE) \
+ V(JS_INTL_NUMBER_FORMAT_TYPE) \
V(JS_INTL_PLURAL_RULES_TYPE) \
V(JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
+ V(JS_INTL_SEGMENTER_TYPE) \
INSTANCE_TYPE_LIST_AFTER_INTL(V)
#else
#define INSTANCE_TYPE_LIST(V) \
@@ -230,56 +238,57 @@ namespace internal {
// Since string types are not consecutive, this macro is used to
// iterate over them.
-#define STRING_TYPE_LIST(V) \
- V(STRING_TYPE, kVariableSizeSentinel, string, String) \
- V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \
- OneByteString) \
- V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString) \
- V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string, \
- ConsOneByteString) \
- V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString) \
- V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \
- SlicedOneByteString) \
- V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string, \
- ExternalString) \
- V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
- external_one_byte_string, ExternalOneByteString) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize, \
- external_string_with_one_byte_data, ExternalStringWithOneByteData) \
- V(SHORT_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kShortSize, \
- short_external_string, ShortExternalString) \
- V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kShortSize, \
- short_external_one_byte_string, ShortExternalOneByteString) \
- V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_string_with_one_byte_data, \
- ShortExternalStringWithOneByteData) \
- \
- V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
- InternalizedString) \
- V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
- one_byte_internalized_string, OneByteInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \
- external_internalized_string, ExternalInternalizedString) \
- V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
- external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_internalized_string_with_one_byte_data, \
- ExternalInternalizedStringWithOneByteData) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
- ExternalTwoByteString::kShortSize, short_external_internalized_string, \
- ShortExternalInternalizedString) \
- V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \
- ExternalOneByteString::kShortSize, \
- short_external_one_byte_internalized_string, \
- ShortExternalOneByteInternalizedString) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_internalized_string_with_one_byte_data, \
- ShortExternalInternalizedStringWithOneByteData) \
- V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
- V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
+#define STRING_TYPE_LIST(V) \
+ V(STRING_TYPE, kVariableSizeSentinel, string, String) \
+ V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \
+ OneByteString) \
+ V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString) \
+ V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string, \
+ ConsOneByteString) \
+ V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString) \
+ V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \
+ SlicedOneByteString) \
+ V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string, \
+ ExternalString) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_string, ExternalOneByteString) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize, \
+ external_string_with_one_byte_data, ExternalStringWithOneByteData) \
+ V(UNCACHED_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kUncachedSize, \
+ uncached_external_string, UncachedExternalString) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE, \
+ ExternalOneByteString::kUncachedSize, uncached_external_one_byte_string, \
+ UncachedExternalOneByteString) \
+ V(UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kUncachedSize, \
+ uncached_external_string_with_one_byte_data, \
+ UncachedExternalStringWithOneByteData) \
+ \
+ V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
+ InternalizedString) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
+ one_byte_internalized_string, OneByteInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \
+ external_internalized_string, ExternalInternalizedString) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
+ external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_internalized_string_with_one_byte_data, \
+ ExternalInternalizedStringWithOneByteData) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kUncachedSize, \
+ uncached_external_internalized_string, UncachedExternalInternalizedString) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \
+ ExternalOneByteString::kUncachedSize, \
+ uncached_external_one_byte_internalized_string, \
+ UncachedExternalOneByteInternalizedString) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
+ ExternalTwoByteString::kUncachedSize, \
+ uncached_external_internalized_string_with_one_byte_data, \
+ UncachedExternalInternalizedStringWithOneByteData) \
+ V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
+ V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
ThinOneByteString)
// A struct is a simple object a set of object-valued fields. Including an
@@ -291,54 +300,95 @@ namespace internal {
// Note that for subtle reasons related to the ordering or numerical values of
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
-#define STRUCT_LIST(V) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
- V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
- V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
- V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
- V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
- V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
- V(INTERPRETER_DATA, InterpreterData, interpreter_data) \
- V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
- V(MODULE, Module, module) \
- V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(PROMISE_CAPABILITY, PromiseCapability, promise_capability) \
- V(PROMISE_REACTION, PromiseReaction, promise_reaction) \
- V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
- V(SCRIPT, Script, script) \
- V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
- V(TUPLE2, Tuple2, tuple2) \
- V(TUPLE3, Tuple3, tuple3) \
- V(ARRAY_BOILERPLATE_DESCRIPTION, ArrayBoilerplateDescription, \
- array_boilerplate_description) \
- V(WASM_DEBUG_INFO, WasmDebugInfo, wasm_debug_info) \
- V(WASM_EXPORTED_FUNCTION_DATA, WasmExportedFunctionData, \
- wasm_exported_function_data) \
- V(CALLABLE_TASK, CallableTask, callable_task) \
- V(CALLBACK_TASK, CallbackTask, callback_task) \
- V(PROMISE_FULFILL_REACTION_JOB_TASK, PromiseFulfillReactionJobTask, \
- promise_fulfill_reaction_job_task) \
- V(PROMISE_REJECT_REACTION_JOB_TASK, PromiseRejectReactionJobTask, \
- promise_reject_reaction_job_task) \
- V(PROMISE_RESOLVE_THENABLE_JOB_TASK, PromiseResolveThenableJobTask, \
- promise_resolve_thenable_job_task)
+#define STRUCT_LIST_GENERATOR(V, _) \
+ V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \
+ V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \
+ V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \
+ V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
+ aliased_arguments_entry) \
+ V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
+ V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
+ async_generator_request) \
+ V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
+ V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
+ function_template_info) \
+ V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
+ V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
+ V(_, MODULE_INFO_ENTRY_TYPE, ModuleInfoEntry, module_info_entry) \
+ V(_, MODULE_TYPE, Module, module) \
+ V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
+ V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
+ V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
+ V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
+ V(_, SCRIPT_TYPE, Script, script) \
+ V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
+ V(_, TUPLE2_TYPE, Tuple2, tuple2) \
+ V(_, TUPLE3_TYPE, Tuple3, tuple3) \
+ V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
+ array_boilerplate_description) \
+ V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
+ V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
+ wasm_exported_function_data) \
+ V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
+ V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
+ V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task) \
+ V(_, MICROTASK_QUEUE_TYPE, MicrotaskQueue, microtask_queue)
-#define ALLOCATION_SITE_LIST(V) \
- V(ALLOCATION_SITE, AllocationSite, WithWeakNext, allocation_site) \
- V(ALLOCATION_SITE, AllocationSite, WithoutWeakNext, \
+// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
+#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
+
+// Produces (NAME, Name, name) entries.
+#define STRUCT_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
+
+// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_MAPS_LIST entry
+#define STRUCT_MAPS_LIST_ADAPTER(V, NAME, Name, name) \
+ V(Map, name##_map, Name##Map)
+
+// Produces (Map, struct_name_map, StructNameMap) entries
+#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
+
+//
+// The following macros define list of allocation size objects and list of
+// their maps.
+//
+#define ALLOCATION_SITE_LIST(V, _) \
+ V(_, ALLOCATION_SITE_TYPE, AllocationSite, WithWeakNext, allocation_site) \
+ V(_, ALLOCATION_SITE_TYPE, AllocationSite, WithoutWeakNext, \
allocation_site_without_weaknext)
-#define DATA_HANDLER_LIST(V) \
- V(LOAD_HANDLER, LoadHandler, 1, load_handler1) \
- V(LOAD_HANDLER, LoadHandler, 2, load_handler2) \
- V(LOAD_HANDLER, LoadHandler, 3, load_handler3) \
- V(STORE_HANDLER, StoreHandler, 0, store_handler0) \
- V(STORE_HANDLER, StoreHandler, 1, store_handler1) \
- V(STORE_HANDLER, StoreHandler, 2, store_handler2) \
- V(STORE_HANDLER, StoreHandler, 3, store_handler3)
+// Adapts one ALLOCATION_SITE_LIST entry to the ALLOCATION_SITE_MAPS_LIST entry
+#define ALLOCATION_SITE_MAPS_LIST_ADAPTER(V, TYPE, Name, Size, name_size) \
+ V(Map, name_size##_map, Name##Size##Map)
+
+// Produces (Map, allocation_site_name_map, AllocationSiteNameMap) entries
+#define ALLOCATION_SITE_MAPS_LIST(V) \
+ ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAPS_LIST_ADAPTER, V)
+
+//
+// The following macros define list of data handler objects and list of their
+// maps.
+//
+#define DATA_HANDLER_LIST(V, _) \
+ V(_, LOAD_HANDLER_TYPE, LoadHandler, 1, load_handler1) \
+ V(_, LOAD_HANDLER_TYPE, LoadHandler, 2, load_handler2) \
+ V(_, LOAD_HANDLER_TYPE, LoadHandler, 3, load_handler3) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 0, store_handler0) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 1, store_handler1) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 2, store_handler2) \
+ V(_, STORE_HANDLER_TYPE, StoreHandler, 3, store_handler3)
+
+// Adapts one DATA_HANDLER_LIST entry to the DATA_HANDLER_MAPS_LIST entry.
+#define DATA_HANDLER_MAPS_LIST_ADAPTER(V, TYPE, Name, Size, name_size) \
+ V(Map, name_size##_map, Name##Size##Map)
+
+// Produces (Map, handler_name_map, HandlerNameMap) entries
+#define DATA_HANDLER_MAPS_LIST(V) \
+ DATA_HANDLER_LIST(DATA_HANDLER_MAPS_LIST_ADAPTER, V)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index ed3e15ed9e..5c66d5f60a 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -83,8 +83,8 @@ INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER);
TYPED_ARRAYS(TYPED_ARRAY_INSTANCE_TYPE_CHECKER)
#undef TYPED_ARRAY_INSTANCE_TYPE_CHECKER
-#define STRUCT_INSTANCE_TYPE_CHECKER(NAME, Name, name) \
- INSTANCE_TYPE_CHECKER(Name, NAME##_TYPE)
+#define STRUCT_INSTANCE_TYPE_CHECKER(TYPE, Name, name) \
+ INSTANCE_TYPE_CHECKER(Name, TYPE)
STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER)
#undef STRUCT_INSTANCE_TYPE_CHECKER
@@ -116,6 +116,11 @@ V8_INLINE bool IsJSObject(InstanceType instance_type) {
return instance_type >= FIRST_JS_OBJECT_TYPE;
}
+V8_INLINE bool IsJSReceiver(InstanceType instance_type) {
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ return instance_type >= FIRST_JS_RECEIVER_TYPE;
+}
+
} // namespace InstanceTypeChecker
// TODO(v8:7786): For instance types that have a single map instance on the
@@ -286,13 +291,6 @@ bool HeapObject::IsFiller() const {
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
-bool HeapObject::IsJSReceiver() const {
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
-}
-
-bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
-
bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
}
@@ -434,8 +432,8 @@ bool HeapObject::IsAccessCheckNeeded() const {
bool HeapObject::IsStruct() const {
switch (map()->instance_type()) {
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
return true;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
@@ -472,8 +470,6 @@ bool Object::IsMinusZero() const {
// Cast operations
CAST_ACCESSOR(AccessorPair)
-CAST_ACCESSOR(AllocationMemento)
-CAST_ACCESSOR(AllocationSite)
CAST_ACCESSOR(AsyncGeneratorRequest)
CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(ObjectBoilerplateDescription)
@@ -487,18 +483,6 @@ CAST_ACCESSOR(FeedbackCell)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(JSAsyncFromSyncIterator)
-CAST_ACCESSOR(JSBoundFunction)
-CAST_ACCESSOR(JSDataView)
-CAST_ACCESSOR(JSDate)
-CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSStringIterator)
-CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(MutableHeapNumber)
@@ -511,7 +495,6 @@ CAST_ACCESSOR(ObjectHashTable)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PropertyArray)
CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
@@ -519,8 +502,6 @@ CAST_ACCESSOR(SimpleNumberDictionary)
CAST_ACCESSOR(SmallOrderedHashMap)
CAST_ACCESSOR(SmallOrderedHashSet)
CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(SourcePositionTableWithFrameCache)
-CAST_ACCESSOR(StackFrameInfo)
CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
@@ -727,14 +708,6 @@ MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
return GetProperty(&it);
}
-MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Name> name) {
- LookupIterator it(isolate, receiver, name, receiver);
- if (!it.IsFound()) return it.factory()->undefined_value();
- return Object::GetProperty(&it);
-}
-
MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
uint32_t index) {
LookupIterator it(isolate, object, index);
@@ -742,79 +715,15 @@ MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
return GetProperty(&it);
}
-MaybeHandle<Object> JSReceiver::GetElement(Isolate* isolate,
- Handle<JSReceiver> receiver,
- uint32_t index) {
- LookupIterator it(isolate, receiver, index, receiver);
- if (!it.IsFound()) return it.factory()->undefined_value();
- return Object::GetProperty(&it);
-}
-
-Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
- LookupIterator it(object, name, object,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- if (!it.IsFound()) return it.factory()->undefined_value();
- return GetDataProperty(&it);
-}
-
MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
uint32_t index, Handle<Object> value,
LanguageMode language_mode) {
LookupIterator it(isolate, object, index);
MAYBE_RETURN_NULL(
- SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED));
+ SetProperty(&it, value, language_mode, StoreOrigin::kMaybeKeyed));
return value;
}
-MaybeHandle<Object> JSReceiver::GetPrototype(Isolate* isolate,
- Handle<JSReceiver> receiver) {
- // We don't expect access checks to be needed on JSProxy objects.
- DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
- PrototypeIterator iter(isolate, receiver, kStartAtReceiver,
- PrototypeIterator::END_AT_NON_HIDDEN);
- do {
- if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
- } while (!iter.IsAtEnd());
- return PrototypeIterator::GetCurrent(iter);
-}
-
-MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- const char* name) {
- Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- return GetProperty(isolate, receiver, str);
-}
-
-// static
-V8_WARN_UNUSED_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
- Handle<JSReceiver> object) {
- return KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
- ALL_PROPERTIES,
- GetKeysConversion::kConvertToString);
-}
-
-bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
- DisallowHeapAllocation no_gc;
- HeapObject* prototype = HeapObject::cast(object->map()->prototype());
- ReadOnlyRoots roots(isolate);
- HeapObject* null = roots.null_value();
- HeapObject* empty_fixed_array = roots.empty_fixed_array();
- HeapObject* empty_slow_element_dictionary =
- roots.empty_slow_element_dictionary();
- while (prototype != null) {
- Map* map = prototype->map();
- if (map->IsCustomElementsReceiverMap()) return false;
- HeapObject* elements = JSObject::cast(prototype)->elements();
- if (elements != empty_fixed_array &&
- elements != empty_slow_element_dictionary) {
- return false;
- }
- prototype = HeapObject::cast(map->prototype());
- }
- return true;
-}
-
Object** HeapObject::RawField(const HeapObject* obj, int byte_offset) {
return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset));
}
@@ -995,282 +904,6 @@ int HeapNumberBase::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
-ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
-
-FixedArrayBase* JSObject::elements() const {
- Object* array = READ_FIELD(this, kElementsOffset);
- return static_cast<FixedArrayBase*>(array);
-}
-
-bool AllocationSite::HasWeakNext() const {
- return map() == GetReadOnlyRoots().allocation_site_map();
-}
-
-void AllocationSite::Initialize() {
- set_transition_info_or_boilerplate(Smi::kZero);
- SetElementsKind(GetInitialFastElementsKind());
- set_nested_site(Smi::kZero);
- set_pretenure_data(0);
- set_pretenure_create_count(0);
- set_dependent_code(
- DependentCode::cast(GetReadOnlyRoots().empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
-}
-
-bool AllocationSite::IsZombie() const {
- return pretenure_decision() == kZombie;
-}
-
-bool AllocationSite::IsMaybeTenure() const {
- return pretenure_decision() == kMaybeTenure;
-}
-
-bool AllocationSite::PretenuringDecisionMade() const {
- return pretenure_decision() != kUndecided;
-}
-
-
-void AllocationSite::MarkZombie() {
- DCHECK(!IsZombie());
- Initialize();
- set_pretenure_decision(kZombie);
-}
-
-ElementsKind AllocationSite::GetElementsKind() const {
- return ElementsKindBits::decode(transition_info());
-}
-
-
-void AllocationSite::SetElementsKind(ElementsKind kind) {
- set_transition_info(ElementsKindBits::update(transition_info(), kind));
-}
-
-bool AllocationSite::CanInlineCall() const {
- return DoNotInlineBit::decode(transition_info()) == 0;
-}
-
-
-void AllocationSite::SetDoNotInlineCall() {
- set_transition_info(DoNotInlineBit::update(transition_info(), true));
-}
-
-bool AllocationSite::PointsToLiteral() const {
- Object* raw_value = transition_info_or_boilerplate();
- DCHECK_EQ(!raw_value->IsSmi(),
- raw_value->IsJSArray() || raw_value->IsJSObject());
- return !raw_value->IsSmi();
-}
-
-
-// Heuristic: We only need to create allocation site info if the boilerplate
-// elements kind is the initial elements kind.
-bool AllocationSite::ShouldTrack(ElementsKind boilerplate_elements_kind) {
- return IsSmiElementsKind(boilerplate_elements_kind);
-}
-
-inline bool AllocationSite::CanTrack(InstanceType type) {
- if (FLAG_allocation_site_pretenuring) {
- // TurboFan doesn't care at all about String pretenuring feedback,
- // so don't bother even trying to track that.
- return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
- }
- return type == JS_ARRAY_TYPE;
-}
-
-AllocationSite::PretenureDecision AllocationSite::pretenure_decision() const {
- return PretenureDecisionBits::decode(pretenure_data());
-}
-
-void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
- int32_t value = pretenure_data();
- set_pretenure_data(PretenureDecisionBits::update(value, decision));
-}
-
-bool AllocationSite::deopt_dependent_code() const {
- return DeoptDependentCodeBit::decode(pretenure_data());
-}
-
-void AllocationSite::set_deopt_dependent_code(bool deopt) {
- int32_t value = pretenure_data();
- set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
-}
-
-int AllocationSite::memento_found_count() const {
- return MementoFoundCountBits::decode(pretenure_data());
-}
-
-inline void AllocationSite::set_memento_found_count(int count) {
- int32_t value = pretenure_data();
- // Verify that we can count more mementos than we can possibly find in one
- // new space collection.
- DCHECK((GetHeap()->MaxSemiSpaceSize() /
- (Heap::kMinObjectSizeInWords * kPointerSize +
- AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
- DCHECK_LT(count, MementoFoundCountBits::kMax);
- set_pretenure_data(MementoFoundCountBits::update(value, count));
-}
-
-int AllocationSite::memento_create_count() const {
- return pretenure_create_count();
-}
-
-void AllocationSite::set_memento_create_count(int count) {
- set_pretenure_create_count(count);
-}
-
-bool AllocationSite::IncrementMementoFoundCount(int increment) {
- if (IsZombie()) return false;
-
- int value = memento_found_count();
- set_memento_found_count(value + increment);
- return memento_found_count() >= kPretenureMinimumCreated;
-}
-
-
-inline void AllocationSite::IncrementMementoCreateCount() {
- DCHECK(FLAG_allocation_site_pretenuring);
- int value = memento_create_count();
- set_memento_create_count(value + 1);
-}
-
-bool AllocationMemento::IsValid() const {
- return allocation_site()->IsAllocationSite() &&
- !AllocationSite::cast(allocation_site())->IsZombie();
-}
-
-AllocationSite* AllocationMemento::GetAllocationSite() const {
- DCHECK(IsValid());
- return AllocationSite::cast(allocation_site());
-}
-
-Address AllocationMemento::GetAllocationSiteUnchecked() const {
- return reinterpret_cast<Address>(allocation_site());
-}
-
-void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
- JSObject::ValidateElements(*object);
- ElementsKind elements_kind = object->map()->elements_kind();
- if (!IsObjectElementsKind(elements_kind)) {
- if (IsHoleyElementsKind(elements_kind)) {
- TransitionElementsKind(object, HOLEY_ELEMENTS);
- } else {
- TransitionElementsKind(object, PACKED_ELEMENTS);
- }
- }
-}
-
-
-void JSObject::EnsureCanContainElements(Handle<JSObject> object,
- Object** objects,
- uint32_t count,
- EnsureElementsMode mode) {
- ElementsKind current_kind = object->GetElementsKind();
- ElementsKind target_kind = current_kind;
- {
- DisallowHeapAllocation no_allocation;
- DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- bool is_holey = IsHoleyElementsKind(current_kind);
- if (current_kind == HOLEY_ELEMENTS) return;
- Object* the_hole = object->GetReadOnlyRoots().the_hole_value();
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (current == the_hole) {
- is_holey = true;
- target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
- if (IsSmiElementsKind(target_kind)) {
- if (is_holey) {
- target_kind = HOLEY_DOUBLE_ELEMENTS;
- } else {
- target_kind = PACKED_DOUBLE_ELEMENTS;
- }
- }
- } else if (is_holey) {
- target_kind = HOLEY_ELEMENTS;
- break;
- } else {
- target_kind = PACKED_ELEMENTS;
- }
- }
- }
- }
- if (target_kind != current_kind) {
- TransitionElementsKind(object, target_kind);
- }
-}
-
-
-void JSObject::EnsureCanContainElements(Handle<JSObject> object,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode) {
- ReadOnlyRoots roots = object->GetReadOnlyRoots();
- if (elements->map() != roots.fixed_double_array_map()) {
- DCHECK(elements->map() == roots.fixed_array_map() ||
- elements->map() == roots.fixed_cow_array_map());
- if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
- mode = DONT_ALLOW_DOUBLE_ELEMENTS;
- }
- Object** objects =
- Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
- EnsureCanContainElements(object, objects, length, mode);
- return;
- }
-
- DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (object->GetElementsKind() == HOLEY_SMI_ELEMENTS) {
- TransitionElementsKind(object, HOLEY_DOUBLE_ELEMENTS);
- } else if (object->GetElementsKind() == PACKED_SMI_ELEMENTS) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (uint32_t i = 0; i < length; ++i) {
- if (double_array->is_the_hole(i)) {
- TransitionElementsKind(object, HOLEY_DOUBLE_ELEMENTS);
- return;
- }
- }
- TransitionElementsKind(object, PACKED_DOUBLE_ELEMENTS);
- }
-}
-
-
-void JSObject::SetMapAndElements(Handle<JSObject> object,
- Handle<Map> new_map,
- Handle<FixedArrayBase> value) {
- JSObject::MigrateToMap(object, new_map);
- DCHECK((object->map()->has_fast_smi_or_object_elements() ||
- (*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
- object->map()->has_fast_string_wrapper_elements()) ==
- (value->map() == object->GetReadOnlyRoots().fixed_array_map() ||
- value->map() == object->GetReadOnlyRoots().fixed_cow_array_map()));
- DCHECK((*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
- (object->map()->has_fast_double_elements() ==
- value->IsFixedDoubleArray()));
- object->set_elements(*value);
-}
-
-
-void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, value, mode);
-}
-
-
-void JSObject::initialize_elements() {
- FixedArrayBase* elements = map()->GetInitialElements();
- WRITE_FIELD(this, kElementsOffset, elements);
-}
-
-
-InterceptorInfo* JSObject::GetIndexedInterceptor() {
- return map()->GetIndexedInterceptor();
-}
-
-InterceptorInfo* JSObject::GetNamedInterceptor() {
- return map()->GetNamedInterceptor();
-}
-
double Oddball::to_number_raw() const {
return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
}
@@ -1316,18 +949,6 @@ void PropertyCell::set_property_details(PropertyDetails details) {
set_property_details_raw(details.AsSmi());
}
-int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
-
-int JSObject::GetHeaderSize(const Map* map) {
- // Check for the most common kind of JavaScript object before
- // falling into the generic switch. This speeds up the internal
- // field operations considerably on average.
- InstanceType instance_type = map->instance_type();
- return instance_type == JS_OBJECT_TYPE
- ? JSObject::kHeaderSize
- : GetHeaderSize(instance_type, map->has_prototype_slot());
-}
-
inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
}
@@ -1351,187 +972,6 @@ bool Map::IsCustomElementsReceiverMap() const {
return IsCustomElementsReceiverInstanceType(instance_type());
}
-// static
-int JSObject::GetEmbedderFieldCount(const Map* map) {
- int instance_size = map->instance_size();
- if (instance_size == kVariableSizeSentinel) return 0;
- return ((instance_size - GetHeaderSize(map)) >> kPointerSizeLog2) -
- map->GetInObjectProperties();
-}
-
-int JSObject::GetEmbedderFieldCount() const {
- return GetEmbedderFieldCount(map());
-}
-
-int JSObject::GetEmbedderFieldOffset(int index) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- return GetHeaderSize() + (kPointerSize * index);
-}
-
-Object* JSObject::GetEmbedderField(int index) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
-}
-
-void JSObject::SetEmbedderField(int index, Object* value) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- int offset = GetHeaderSize() + (kPointerSize * index);
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset, value);
-}
-
-void JSObject::SetEmbedderField(int index, Smi* value) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- int offset = GetHeaderSize() + (kPointerSize * index);
- WRITE_FIELD(this, offset, value);
-}
-
-
-bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
- if (!FLAG_unbox_double_fields) return false;
- return map()->IsUnboxedDoubleField(index);
-}
-
-// Access fast-case object properties at index. The use of these routines
-// is needed to correctly distinguish between properties stored in-object and
-// properties stored in the properties array.
-Object* JSObject::RawFastPropertyAt(FieldIndex index) {
- DCHECK(!IsUnboxedDoubleField(index));
- if (index.is_inobject()) {
- return READ_FIELD(this, index.offset());
- } else {
- return property_array()->get(index.outobject_array_index());
- }
-}
-
-
-double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
- DCHECK(IsUnboxedDoubleField(index));
- return READ_DOUBLE_FIELD(this, index.offset());
-}
-
-uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
- DCHECK(IsUnboxedDoubleField(index));
- return READ_UINT64_FIELD(this, index.offset());
-}
-
-void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
- if (index.is_inobject()) {
- int offset = index.offset();
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset, value);
- } else {
- property_array()->set(index.outobject_array_index(), value);
- }
-}
-
-void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
- uint64_t bits) {
- // Double unboxing is enabled only on 64-bit platforms.
- DCHECK_EQ(kDoubleSize, kPointerSize);
- Address field_addr = FIELD_ADDR(this, index.offset());
- base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(field_addr),
- static_cast<base::AtomicWord>(bits));
-}
-
-void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
- if (IsUnboxedDoubleField(index)) {
- DCHECK(value->IsMutableHeapNumber());
- // Ensure that all bits of the double value are preserved.
- RawFastDoublePropertyAsBitsAtPut(
- index, MutableHeapNumber::cast(value)->value_as_bits());
- } else {
- RawFastPropertyAtPut(index, value);
- }
-}
-
-void JSObject::WriteToField(int descriptor, PropertyDetails details,
- Object* value) {
- DCHECK_EQ(kField, details.location());
- DCHECK_EQ(kData, details.kind());
- DisallowHeapAllocation no_gc;
- FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
- if (details.representation().IsDouble()) {
- // Nothing more to be done.
- if (value->IsUninitialized()) {
- return;
- }
- // Manipulating the signaling NaN used for the hole and uninitialized
- // double field sentinel in C++, e.g. with bit_cast or value()/set_value(),
- // will change its value on ia32 (the x87 stack is used to return values
- // and stores to the stack silently clear the signalling bit).
- uint64_t bits;
- if (value->IsSmi()) {
- bits = bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
- } else {
- DCHECK(value->IsHeapNumber());
- bits = HeapNumber::cast(value)->value_as_bits();
- }
- if (IsUnboxedDoubleField(index)) {
- RawFastDoublePropertyAsBitsAtPut(index, bits);
- } else {
- auto box = MutableHeapNumber::cast(RawFastPropertyAt(index));
- box->set_value_as_bits(bits);
- }
- } else {
- RawFastPropertyAtPut(index, value);
- }
-}
-
-int JSObject::GetInObjectPropertyOffset(int index) {
- return map()->GetInObjectPropertyOffset(index);
-}
-
-
-Object* JSObject::InObjectPropertyAt(int index) {
- int offset = GetInObjectPropertyOffset(index);
- return READ_FIELD(this, offset);
-}
-
-
-Object* JSObject::InObjectPropertyAtPut(int index,
- Object* value,
- WriteBarrierMode mode) {
- // Adjust for the number of properties stored in the object.
- int offset = GetInObjectPropertyOffset(index);
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
- return value;
-}
-
-
-void JSObject::InitializeBody(Map* map, int start_offset,
- Object* pre_allocated_value,
- Object* filler_value) {
- DCHECK(!filler_value->IsHeapObject() || !Heap::InNewSpace(filler_value));
- DCHECK(!pre_allocated_value->IsHeapObject() ||
- !Heap::InNewSpace(pre_allocated_value));
- int size = map->instance_size();
- int offset = start_offset;
- if (filler_value != pre_allocated_value) {
- int end_of_pre_allocated_offset =
- size - (map->UnusedPropertyFields() * kPointerSize);
- DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
- while (offset < end_of_pre_allocated_offset) {
- WRITE_FIELD(this, offset, pre_allocated_value);
- offset += kPointerSize;
- }
- }
- while (offset < size) {
- WRITE_FIELD(this, offset, filler_value);
- offset += kPointerSize;
- }
-}
-
void Struct::InitializeBody(int object_size) {
Object* value = GetReadOnlyRoots().undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
@@ -1559,21 +999,6 @@ void Object::VerifyApiCallResultType() {
#endif // DEBUG
}
-Object* PropertyArray::get(int index) const {
- DCHECK_GE(index, 0);
- DCHECK_LE(index, this->length());
- return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
-}
-
-void PropertyArray::set(int index, Object* value) {
- DCHECK(IsPropertyArray());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset, value);
-}
-
int RegExpMatchInfo::NumberOfCaptureRegisters() {
DCHECK_GE(length(), kLastMatchOverhead);
Object* obj = get(kNumberOfCapturesIndex);
@@ -1665,23 +1090,11 @@ Address HeapObject::GetFieldAddress(int field_offset) const {
return FIELD_ADDR(this, field_offset);
}
-void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
-}
-
-Object** PropertyArray::data_start() {
- return HeapObject::RawField(this, kHeaderSize);
-}
-
ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
int DescriptorArray::number_of_descriptors() const {
- return Smi::ToInt(get(kDescriptorLengthIndex)->ToSmi());
+ return Smi::ToInt(get(kDescriptorLengthIndex)->cast<Smi>());
}
int DescriptorArray::number_of_descriptors_storage() const {
@@ -1707,7 +1120,7 @@ void DescriptorArray::CopyEnumCacheFrom(DescriptorArray* array) {
}
EnumCache* DescriptorArray::GetEnumCache() {
- return EnumCache::cast(get(kEnumCacheIndex)->ToStrongHeapObject());
+ return EnumCache::cast(get(kEnumCacheIndex)->GetHeapObjectAssumeStrong());
}
// Perform a binary search in a fixed array.
@@ -1861,7 +1274,8 @@ MaybeObject** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
Name* DescriptorArray::GetKey(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
- return Name::cast(get(ToKeyIndex(descriptor_number))->ToStrongHeapObject());
+ return Name::cast(
+ get(ToKeyIndex(descriptor_number))->GetHeapObjectAssumeStrong());
}
@@ -1893,7 +1307,7 @@ int DescriptorArray::GetValueOffset(int descriptor_number) {
Object* DescriptorArray::GetStrongValue(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
- return get(ToValueIndex(descriptor_number))->ToObject();
+ return get(ToValueIndex(descriptor_number))->cast<Object>();
}
@@ -1909,7 +1323,7 @@ MaybeObject* DescriptorArray::GetValue(int descriptor_number) {
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
MaybeObject* details = get(ToDetailsIndex(descriptor_number));
- return PropertyDetails(details->ToSmi());
+ return PropertyDetails(details->cast<Smi>());
}
int DescriptorArray::GetFieldIndex(int descriptor_number) {
@@ -2004,8 +1418,8 @@ uint32_t StringTableShape::HashForObject(Isolate* isolate, Object* object) {
return String::cast(object)->Hash();
}
-int StringTableShape::GetMapRootIndex() {
- return Heap::kStringTableMapRootIndex;
+RootIndex StringTableShape::GetMapRootIndex() {
+ return RootIndex::kStringTableMap;
}
bool NumberDictionary::requires_slow_elements() {
@@ -2038,37 +1452,6 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-int PropertyArray::length() const {
- Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return LengthField::decode(value);
-}
-
-void PropertyArray::initialize_length(int len) {
- SLOW_DCHECK(len >= 0);
- SLOW_DCHECK(len < LengthField::kMax);
- WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(len));
-}
-
-int PropertyArray::synchronized_length() const {
- Object* value_obj = ACQUIRE_READ_FIELD(this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return LengthField::decode(value);
-}
-
-int PropertyArray::Hash() const {
- Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return HashField::decode(value);
-}
-
-void PropertyArray::SetHash(int hash) {
- Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- value = HashField::update(value, hash);
- WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(value));
-}
-
SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
@@ -2078,7 +1461,7 @@ int FreeSpace::Size() { return size(); }
FreeSpace* FreeSpace::next() {
DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
- Heap::kFreeSpaceMapRootIndex) ||
+ RootIndex::kFreeSpaceMap) ||
(!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
@@ -2088,7 +1471,7 @@ FreeSpace* FreeSpace::next() {
void FreeSpace::set_next(FreeSpace* next) {
DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
- Heap::kFreeSpaceMapRootIndex) ||
+ RootIndex::kFreeSpaceMap) ||
(!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
@@ -2189,22 +1572,6 @@ int HeapObject::SizeFromMap(Map* map) const {
return reinterpret_cast<const Code*>(this)->CodeSize();
}
-Object* JSBoundFunction::raw_bound_target_function() const {
- return READ_FIELD(this, kBoundTargetFunctionOffset);
-}
-
-ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
- kBoundTargetFunctionOffset)
-ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
-ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
-
-ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, feedback_cell, FeedbackCell, kFeedbackCellOffset)
-
-ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
-ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
-
-ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
ACCESSORS(AsyncGeneratorRequest, next, Object, kNextOffset)
SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
@@ -2222,256 +1589,6 @@ ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
-ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
- kTransitionInfoOrBoilerplateOffset)
-
-JSObject* AllocationSite::boilerplate() const {
- DCHECK(PointsToLiteral());
- return JSObject::cast(transition_info_or_boilerplate());
-}
-
-void AllocationSite::set_boilerplate(JSObject* object, WriteBarrierMode mode) {
- set_transition_info_or_boilerplate(object, mode);
-}
-
-int AllocationSite::transition_info() const {
- DCHECK(!PointsToLiteral());
- return Smi::cast(transition_info_or_boilerplate())->value();
-}
-
-void AllocationSite::set_transition_info(int value) {
- DCHECK(!PointsToLiteral());
- set_transition_info_or_boilerplate(Smi::FromInt(value), SKIP_WRITE_BARRIER);
-}
-
-ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
-INT32_ACCESSORS(AllocationSite, pretenure_create_count,
- kPretenureCreateCountOffset)
-ACCESSORS(AllocationSite, dependent_code, DependentCode,
- kDependentCodeOffset)
-ACCESSORS_CHECKED(AllocationSite, weak_next, Object, kWeakNextOffset,
- HasWeakNext())
-ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
-
-SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberIndex)
-SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberIndex)
-SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdIndex)
-ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameIndex)
-ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
- kScriptNameOrSourceUrlIndex)
-ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameIndex)
-SMI_ACCESSORS(StackFrameInfo, flag, kFlagIndex)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
-SMI_ACCESSORS(StackFrameInfo, id, kIdIndex)
-
-ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
- kSourcePositionTableIndex)
-ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- SimpleNumberDictionary, kStackFrameCacheIndex)
-
-
-FeedbackVector* JSFunction::feedback_vector() const {
- DCHECK(has_feedback_vector());
- return FeedbackVector::cast(feedback_cell()->value());
-}
-
-// Code objects that are marked for deoptimization are not considered to be
-// optimized. This is because the JSFunction might have been already
-// deoptimized but its code() still needs to be unlinked, which will happen on
-// its next activation.
-// TODO(jupvfranco): rename this function. Maybe RunOptimizedCode,
-// or IsValidOptimizedCode.
-bool JSFunction::IsOptimized() {
- return code()->kind() == Code::OPTIMIZED_FUNCTION &&
- !code()->marked_for_deoptimization();
-}
-
-bool JSFunction::HasOptimizedCode() {
- return IsOptimized() ||
- (has_feedback_vector() && feedback_vector()->has_optimized_code() &&
- !feedback_vector()->optimized_code()->marked_for_deoptimization());
-}
-
-bool JSFunction::HasOptimizationMarker() {
- return has_feedback_vector() && feedback_vector()->has_optimization_marker();
-}
-
-void JSFunction::ClearOptimizationMarker() {
- DCHECK(has_feedback_vector());
- feedback_vector()->ClearOptimizationMarker();
-}
-
-// Optimized code marked for deoptimization will tier back down to running
-// interpreted on its next activation, and already doesn't count as IsOptimized.
-bool JSFunction::IsInterpreted() {
- return code()->is_interpreter_trampoline_builtin() ||
- (code()->kind() == Code::OPTIMIZED_FUNCTION &&
- code()->marked_for_deoptimization());
-}
-
-bool JSFunction::ChecksOptimizationMarker() {
- return code()->checks_optimization_marker();
-}
-
-bool JSFunction::IsMarkedForOptimization() {
- return has_feedback_vector() && feedback_vector()->optimization_marker() ==
- OptimizationMarker::kCompileOptimized;
-}
-
-
-bool JSFunction::IsMarkedForConcurrentOptimization() {
- return has_feedback_vector() &&
- feedback_vector()->optimization_marker() ==
- OptimizationMarker::kCompileOptimizedConcurrent;
-}
-
-
-bool JSFunction::IsInOptimizationQueue() {
- return has_feedback_vector() && feedback_vector()->optimization_marker() ==
- OptimizationMarker::kInOptimizationQueue;
-}
-
-
-void JSFunction::CompleteInobjectSlackTrackingIfActive() {
- if (!has_prototype_slot()) return;
- if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
- initial_map()->CompleteInobjectSlackTracking(GetIsolate());
- }
-}
-
-AbstractCode* JSFunction::abstract_code() {
- if (IsInterpreted()) {
- return AbstractCode::cast(shared()->GetBytecodeArray());
- } else {
- return AbstractCode::cast(code());
- }
-}
-
-Code* JSFunction::code() { return Code::cast(READ_FIELD(this, kCodeOffset)); }
-
-void JSFunction::set_code(Code* value) {
- DCHECK(!Heap::InNewSpace(value));
- WRITE_FIELD(this, kCodeOffset, value);
- MarkingBarrier(this, HeapObject::RawField(this, kCodeOffset), value);
-}
-
-
-void JSFunction::set_code_no_write_barrier(Code* value) {
- DCHECK(!Heap::InNewSpace(value));
- WRITE_FIELD(this, kCodeOffset, value);
-}
-
-void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
- if (has_feedback_vector() && feedback_vector()->has_optimized_code()) {
- if (FLAG_trace_opt) {
- PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
- reason);
- ShortPrint();
- PrintF("]\n");
- }
- feedback_vector()->ClearOptimizedCode();
- }
-}
-
-void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
- DCHECK(has_feedback_vector());
- DCHECK(ChecksOptimizationMarker());
- DCHECK(!HasOptimizedCode());
-
- feedback_vector()->SetOptimizationMarker(marker);
-}
-
-bool JSFunction::has_feedback_vector() const {
- return !feedback_cell()->value()->IsUndefined();
-}
-
-Context* JSFunction::context() {
- return Context::cast(READ_FIELD(this, kContextOffset));
-}
-
-bool JSFunction::has_context() const {
- return READ_FIELD(this, kContextOffset)->IsContext();
-}
-
-JSGlobalProxy* JSFunction::global_proxy() { return context()->global_proxy(); }
-
-Context* JSFunction::native_context() { return context()->native_context(); }
-
-
-void JSFunction::set_context(Object* value) {
- DCHECK(value->IsUndefined() || value->IsContext());
- WRITE_FIELD(this, kContextOffset, value);
- WRITE_BARRIER(this, kContextOffset, value);
-}
-
-ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
- kPrototypeOrInitialMapOffset, map()->has_prototype_slot())
-
-bool JSFunction::has_prototype_slot() const {
- return map()->has_prototype_slot();
-}
-
-Map* JSFunction::initial_map() {
- return Map::cast(prototype_or_initial_map());
-}
-
-
-bool JSFunction::has_initial_map() {
- DCHECK(has_prototype_slot());
- return prototype_or_initial_map()->IsMap();
-}
-
-
-bool JSFunction::has_instance_prototype() {
- DCHECK(has_prototype_slot());
- return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
-}
-
-bool JSFunction::has_prototype() {
- DCHECK(has_prototype_slot());
- return map()->has_non_instance_prototype() || has_instance_prototype();
-}
-
-bool JSFunction::has_prototype_property() {
- return (has_prototype_slot() && IsConstructor()) ||
- IsGeneratorFunction(shared()->kind());
-}
-
-bool JSFunction::PrototypeRequiresRuntimeLookup() {
- return !has_prototype_property() || map()->has_non_instance_prototype();
-}
-
-Object* JSFunction::instance_prototype() {
- DCHECK(has_instance_prototype());
- if (has_initial_map()) return initial_map()->prototype();
- // When there is no initial map and the prototype is a JSReceiver, the
- // initial map field is used for the prototype field.
- return prototype_or_initial_map();
-}
-
-
-Object* JSFunction::prototype() {
- DCHECK(has_prototype());
- // If the function's prototype property has been set to a non-JSReceiver
- // value, that value is stored in the constructor field of the map.
- if (map()->has_non_instance_prototype()) {
- Object* prototype = map()->GetConstructor();
- // The map must have a prototype in that field, not a back pointer.
- DCHECK(!prototype->IsMap());
- DCHECK(!prototype->IsFunctionTemplateInfo());
- return prototype;
- }
- return instance_prototype();
-}
-
-
-bool JSFunction::is_compiled() {
- return code()->builtin_index() != Builtins::kCompileLazy;
-}
-
// static
bool Foreign::IsNormalized(Object* value) {
if (value == Smi::kZero) return true;
@@ -2479,11 +1596,11 @@ bool Foreign::IsNormalized(Object* value) {
}
Address Foreign::foreign_address() {
- return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
+ return READ_UINTPTR_FIELD(this, kForeignAddressOffset);
}
void Foreign::set_foreign_address(Address value) {
- WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
+ WRITE_UINTPTR_FIELD(this, kForeignAddressOffset, value);
}
template <class Derived>
@@ -2494,158 +1611,6 @@ void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
WRITE_BARRIER(this, static_cast<int>(entry_offset), value);
}
-ACCESSORS(JSValue, value, Object, kValueOffset)
-
-
-ACCESSORS(JSDate, value, Object, kValueOffset)
-ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
-ACCESSORS(JSDate, year, Object, kYearOffset)
-ACCESSORS(JSDate, month, Object, kMonthOffset)
-ACCESSORS(JSDate, day, Object, kDayOffset)
-ACCESSORS(JSDate, weekday, Object, kWeekdayOffset)
-ACCESSORS(JSDate, hour, Object, kHourOffset)
-ACCESSORS(JSDate, min, Object, kMinOffset)
-ACCESSORS(JSDate, sec, Object, kSecOffset)
-
-
-SMI_ACCESSORS(JSMessageObject, type, kTypeOffset)
-ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
-ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
-SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
-SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
-SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
-
-ElementsKind JSObject::GetElementsKind() const {
- ElementsKind kind = map()->elements_kind();
-#if VERIFY_HEAP && DEBUG
- FixedArrayBase* fixed_array =
- reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
-
- // If a GC was caused while constructing this object, the elements
- // pointer may point to a one pointer filler map.
- if (ElementsAreSafeToExamine()) {
- Map* map = fixed_array->map();
- if (IsSmiOrObjectElementsKind(kind)) {
- DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
- map == GetReadOnlyRoots().fixed_cow_array_map());
- } else if (IsDoubleElementsKind(kind)) {
- DCHECK(fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetReadOnlyRoots().empty_fixed_array());
- } else if (kind == DICTIONARY_ELEMENTS) {
- DCHECK(fixed_array->IsFixedArray());
- DCHECK(fixed_array->IsDictionary());
- } else {
- DCHECK(kind > DICTIONARY_ELEMENTS);
- }
- DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
- }
-#endif
- return kind;
-}
-
-bool JSObject::HasObjectElements() {
- return IsObjectElementsKind(GetElementsKind());
-}
-
-bool JSObject::HasSmiElements() { return IsSmiElementsKind(GetElementsKind()); }
-
-bool JSObject::HasSmiOrObjectElements() {
- return IsSmiOrObjectElementsKind(GetElementsKind());
-}
-
-bool JSObject::HasDoubleElements() {
- return IsDoubleElementsKind(GetElementsKind());
-}
-
-bool JSObject::HasHoleyElements() {
- return IsHoleyElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastElements() {
- return IsFastElementsKind(GetElementsKind());
-}
-
-bool JSObject::HasFastPackedElements() {
- return IsFastPackedElementsKind(GetElementsKind());
-}
-
-bool JSObject::HasDictionaryElements() {
- return GetElementsKind() == DICTIONARY_ELEMENTS;
-}
-
-
-bool JSObject::HasFastArgumentsElements() {
- return GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
-}
-
-
-bool JSObject::HasSlowArgumentsElements() {
- return GetElementsKind() == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
-}
-
-
-bool JSObject::HasSloppyArgumentsElements() {
- return IsSloppyArgumentsElementsKind(GetElementsKind());
-}
-
-bool JSObject::HasStringWrapperElements() {
- return IsStringWrapperElementsKind(GetElementsKind());
-}
-
-bool JSObject::HasFastStringWrapperElements() {
- return GetElementsKind() == FAST_STRING_WRAPPER_ELEMENTS;
-}
-
-bool JSObject::HasSlowStringWrapperElements() {
- return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
-}
-
-bool JSObject::HasFixedTypedArrayElements() {
- DCHECK_NOT_NULL(elements());
- return map()->has_fixed_typed_array_elements();
-}
-
-#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
- bool JSObject::HasFixed##Type##Elements() { \
- HeapObject* array = elements(); \
- DCHECK_NOT_NULL(array); \
- if (!array->IsHeapObject()) return false; \
- return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
- }
-
-TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
-
-#undef FIXED_TYPED_ELEMENTS_CHECK
-
-
-bool JSObject::HasNamedInterceptor() {
- return map()->has_named_interceptor();
-}
-
-
-bool JSObject::HasIndexedInterceptor() {
- return map()->has_indexed_interceptor();
-}
-
-void JSGlobalObject::set_global_dictionary(GlobalDictionary* dictionary) {
- DCHECK(IsJSGlobalObject());
- set_raw_properties_or_hash(dictionary);
-}
-
-GlobalDictionary* JSGlobalObject::global_dictionary() {
- DCHECK(!HasFastProperties());
- DCHECK(IsJSGlobalObject());
- return GlobalDictionary::cast(raw_properties_or_hash());
-}
-
-NumberDictionary* JSObject::element_dictionary() {
- DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
- return NumberDictionary::cast(elements());
-}
-
// static
Maybe<bool> Object::GreaterThan(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
@@ -2729,9 +1694,9 @@ MaybeHandle<Object> Object::SetPropertyOrElement(Isolate* isolate,
Handle<Name> name,
Handle<Object> value,
LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
- MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
+ MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_origin));
return value;
}
@@ -2744,134 +1709,6 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
}
-void JSReceiver::initialize_properties() {
- Heap* heap = GetHeap();
- ReadOnlyRoots roots(heap);
- DCHECK(!Heap::InNewSpace(roots.empty_fixed_array()));
- DCHECK(!Heap::InNewSpace(heap->empty_property_dictionary()));
- if (map()->is_dictionary_map()) {
- WRITE_FIELD(this, kPropertiesOrHashOffset,
- heap->empty_property_dictionary());
- } else {
- WRITE_FIELD(this, kPropertiesOrHashOffset, roots.empty_fixed_array());
- }
-}
-
-bool JSReceiver::HasFastProperties() const {
- DCHECK(
- raw_properties_or_hash()->IsSmi() ||
- (raw_properties_or_hash()->IsDictionary() == map()->is_dictionary_map()));
- return !map()->is_dictionary_map();
-}
-
-NameDictionary* JSReceiver::property_dictionary() const {
- DCHECK(!IsJSGlobalObject());
- DCHECK(!HasFastProperties());
-
- Object* prop = raw_properties_or_hash();
- if (prop->IsSmi()) {
- return GetHeap()->empty_property_dictionary();
- }
-
- return NameDictionary::cast(prop);
-}
-
-// TODO(gsathya): Pass isolate directly to this function and access
-// the heap from this.
-PropertyArray* JSReceiver::property_array() const {
- DCHECK(HasFastProperties());
-
- Object* prop = raw_properties_or_hash();
- if (prop->IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
- return GetReadOnlyRoots().empty_property_array();
- }
-
- return PropertyArray::cast(prop);
-}
-
-Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
- LookupIterator it = LookupIterator::PropertyOrElement(object->GetIsolate(),
- object, name, object);
- return HasProperty(&it);
-}
-
-
-Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
- uint32_t index) {
- if (object->IsJSModuleNamespace()) return Just(false);
-
- if (object->IsJSObject()) { // Shortcut.
- LookupIterator it(object->GetIsolate(), object, index, object,
- LookupIterator::OWN);
- return HasProperty(&it);
- }
-
- Maybe<PropertyAttributes> attributes =
- JSReceiver::GetOwnPropertyAttributes(object, index);
- MAYBE_RETURN(attributes, Nothing<bool>());
- return Just(attributes.FromJust() != ABSENT);
-}
-
-Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
- Handle<JSReceiver> object, Handle<Name> name) {
- LookupIterator it = LookupIterator::PropertyOrElement(object->GetIsolate(),
- object, name, object);
- return GetPropertyAttributes(&it);
-}
-
-
-Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
- Handle<JSReceiver> object, Handle<Name> name) {
- LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, object, LookupIterator::OWN);
- return GetPropertyAttributes(&it);
-}
-
-Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
- Handle<JSReceiver> object, uint32_t index) {
- LookupIterator it(object->GetIsolate(), object, index, object,
- LookupIterator::OWN);
- return GetPropertyAttributes(&it);
-}
-
-Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
- LookupIterator it(object->GetIsolate(), object, index, object);
- return HasProperty(&it);
-}
-
-
-Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
- Handle<JSReceiver> object, uint32_t index) {
- Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index, object);
- return GetPropertyAttributes(&it);
-}
-
-
-Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
- Handle<JSReceiver> object, uint32_t index) {
- Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
- return GetPropertyAttributes(&it);
-}
-
-
-bool JSGlobalObject::IsDetached() {
- return JSGlobalProxy::cast(global_proxy())->IsDetachedFrom(this);
-}
-
-
-bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject* global) const {
- const PrototypeIterator iter(this->GetIsolate(),
- const_cast<JSGlobalProxy*>(this));
- return iter.GetCurrent() != global;
-}
-
-inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) {
- DCHECK_GE(embedder_field_count, 0);
- return kSize + embedder_field_count * kPointerSize;
-}
Object* AccessorPair::get(AccessorComponent component) {
return component == ACCESSOR_GETTER ? getter() : setter();
@@ -2936,14 +1773,14 @@ Object* GlobalDictionaryShape::Unwrap(Object* object) {
return PropertyCell::cast(object)->name();
}
-int GlobalDictionaryShape::GetMapRootIndex() {
- return Heap::kGlobalDictionaryMapRootIndex;
+RootIndex GlobalDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kGlobalDictionaryMap;
}
Name* NameDictionary::NameAt(int entry) { return Name::cast(KeyAt(entry)); }
-int NameDictionaryShape::GetMapRootIndex() {
- return Heap::kNameDictionaryMapRootIndex;
+RootIndex NameDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kNameDictionaryMap;
}
PropertyCell* GlobalDictionary::CellAt(int entry) {
@@ -2995,12 +1832,12 @@ Handle<Object> NumberDictionaryBaseShape::AsHandle(Isolate* isolate,
return isolate->factory()->NewNumberFromUint(key);
}
-int NumberDictionaryShape::GetMapRootIndex() {
- return Heap::kNumberDictionaryMapRootIndex;
+RootIndex NumberDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kNumberDictionaryMap;
}
-int SimpleNumberDictionaryShape::GetMapRootIndex() {
- return Heap::kSimpleNumberDictionaryMapRootIndex;
+RootIndex SimpleNumberDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kSimpleNumberDictionaryMap;
}
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
@@ -3169,15 +2006,6 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Object> key,
PACKED_ELEMENTS, 2);
}
-ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
-ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
-
-ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
- kSyncIteratorOffset)
-ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
-
-ACCESSORS(JSStringIterator, string, String, kStringOffset)
-SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
bool ScopeInfo::IsAsmModule() const { return AsmModuleField::decode(Flags()); }
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index d76c036ba9..38dd9d1c52 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -14,30 +14,38 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-collator-inl.h"
-#endif // V8_INTL_SUPPORT
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator-inl.h"
+#include "src/objects/js-collator-inl.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/js-collection-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-date-time-format-inl.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/js-generator-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-number-format-inl.h"
+#include "src/objects/js-plural-rules-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator-inl.h"
#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segmenter-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
+#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions-inl.h"
@@ -112,6 +120,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
break;
case FIXED_ARRAY_TYPE:
+ case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -187,6 +196,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
// TODO(titzer): debug printing for more wasm objects
+ case WASM_EXCEPTION_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_TABLE_TYPE:
@@ -309,27 +319,39 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
JSDataView::cast(this)->JSDataViewPrint(os);
break;
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ JSV8BreakIterator::cast(this)->JSV8BreakIteratorPrint(os);
+ break;
case JS_INTL_COLLATOR_TYPE:
JSCollator::cast(this)->JSCollatorPrint(os);
break;
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ JSDateTimeFormat::cast(this)->JSDateTimeFormatPrint(os);
+ break;
case JS_INTL_LIST_FORMAT_TYPE:
JSListFormat::cast(this)->JSListFormatPrint(os);
break;
case JS_INTL_LOCALE_TYPE:
JSLocale::cast(this)->JSLocalePrint(os);
break;
+ case JS_INTL_NUMBER_FORMAT_TYPE:
+ JSNumberFormat::cast(this)->JSNumberFormatPrint(os);
+ break;
case JS_INTL_PLURAL_RULES_TYPE:
JSPluralRules::cast(this)->JSPluralRulesPrint(os);
break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatPrint(os);
break;
+ case JS_INTL_SEGMENTER_TYPE:
+ JSSegmenter::cast(this)->JSSegmenterPrint(os);
+ break;
#endif // V8_INTL_SUPPORT
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
Name::cast(this)->Name##Print(os); \
break;
- STRUCT_LIST(MAKE_STRUCT_CASE)
+ STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
case ALLOCATION_SITE_TYPE:
@@ -358,9 +380,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case STRING_TYPE:
case CONS_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
@@ -372,9 +394,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case SLICED_ONE_BYTE_STRING_TYPE:
case THIN_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case UNCACHED_EXTERNAL_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
@@ -832,7 +854,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
Smi* smi;
if (raw_transitions()->ToSmi(&smi)) {
os << Brief(smi);
- } else if (raw_transitions()->ToStrongOrWeakHeapObject(&heap_object)) {
+ } else if (raw_transitions()->GetHeapObject(&heap_object)) {
os << Brief(heap_object);
}
transitions.PrintTransitions(os);
@@ -1073,29 +1095,6 @@ void FeedbackVector::FeedbackSlotPrint(std::ostream& os,
nexus.Print(os);
}
-namespace {
-
-const char* ICState2String(InlineCacheState state) {
- switch (state) {
- case UNINITIALIZED:
- return "UNINITIALIZED";
- case PREMONOMORPHIC:
- return "PREMONOMORPHIC";
- case MONOMORPHIC:
- return "MONOMORPHIC";
- case RECOMPUTE_HANDLER:
- return "RECOMPUTE_HANDLER";
- case POLYMORPHIC:
- return "POLYMORPHIC";
- case MEGAMORPHIC:
- return "MEGAMORPHIC";
- case GENERIC:
- return "GENERIC";
- }
- UNREACHABLE();
-}
-} // anonymous namespace
-
void FeedbackNexus::Print(std::ostream& os) { // NOLINT
switch (kind()) {
case FeedbackSlotKind::kCall:
@@ -1114,7 +1113,7 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
case FeedbackSlotKind::kStoreKeyedStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kCloneObject: {
- os << ICState2String(StateFromFeedback());
+ os << InlineCacheState2String(StateFromFeedback());
break;
}
case FeedbackSlotKind::kBinaryOp: {
@@ -1278,7 +1277,7 @@ void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArrayBuffer");
os << "\n - backing_store: " << backing_store();
- os << "\n - byte_length: " << Brief(byte_length());
+ os << "\n - byte_length: " << byte_length();
if (is_external()) os << "\n - external";
if (is_neuterable()) os << "\n - neuterable";
if (was_neutered()) os << "\n - neutered";
@@ -1291,8 +1290,8 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSTypedArray");
os << "\n - buffer: " << Brief(buffer());
- os << "\n - byte_offset: " << Brief(byte_offset());
- os << "\n - byte_length: " << Brief(byte_length());
+ os << "\n - byte_offset: " << byte_offset();
+ os << "\n - byte_length: " << byte_length();
os << "\n - length: " << Brief(length());
if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
@@ -1309,8 +1308,8 @@ void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDataView");
os << "\n - buffer =" << Brief(buffer());
- os << "\n - byte_offset: " << Brief(byte_offset());
- os << "\n - byte_length: " << Brief(byte_length());
+ os << "\n - byte_offset: " << byte_offset();
+ os << "\n - byte_length: " << byte_length();
if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
}
@@ -1953,20 +1952,41 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
}
#ifdef V8_INTL_SUPPORT
+void JSV8BreakIterator::JSV8BreakIteratorPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSV8BreakIterator");
+ os << "\n - locale: " << Brief(locale());
+ os << "\n - type: " << TypeAsString();
+ os << "\n - break iterator: " << Brief(break_iterator());
+ os << "\n - unicode string: " << Brief(unicode_string());
+ os << "\n - bound adopt text: " << Brief(bound_adopt_text());
+ os << "\n - bound first: " << Brief(bound_first());
+ os << "\n - bound next: " << Brief(bound_next());
+ os << "\n - bound current: " << Brief(bound_current());
+ os << "\n - bound break type: " << Brief(bound_break_type());
+ os << "\n";
+}
+
void JSCollator::JSCollatorPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSCollator");
- os << "\n - usage: " << JSCollator::UsageToString(usage());
os << "\n - icu collator: " << Brief(icu_collator());
os << "\n - bound compare: " << Brief(bound_compare());
os << "\n";
}
+void JSDateTimeFormat::JSDateTimeFormatPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSDateTimeFormat");
+ os << "\n - icu locale: " << Brief(icu_locale());
+ os << "\n - icu simple date format: " << Brief(icu_simple_date_format());
+ os << "\n - bound format: " << Brief(bound_format());
+ os << "\n";
+}
+
void JSListFormat::JSListFormatPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSListFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - style: " << StyleAsString();
os << "\n - type: " << TypeAsString();
- os << "\n - formatter: " << Brief(formatter());
+ os << "\n - icu formatter: " << Brief(icu_formatter());
os << "\n";
}
@@ -1978,14 +1998,24 @@ void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
os << "\n - baseName: " << Brief(base_name());
os << "\n - locale: " << Brief(locale());
os << "\n - calendar: " << Brief(calendar());
- os << "\n - caseFirst: " << Brief(case_first());
+ os << "\n - caseFirst: " << CaseFirstAsString();
os << "\n - collation: " << Brief(collation());
- os << "\n - hourCycle: " << Brief(hour_cycle());
- os << "\n - numeric: " << Brief(numeric());
+ os << "\n - hourCycle: " << HourCycleAsString();
+ os << "\n - numeric: " << NumericAsString();
os << "\n - numberingSystem: " << Brief(numbering_system());
os << "\n";
}
+void JSNumberFormat::JSNumberFormatPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSNumberFormat");
+ os << "\n - locale: " << Brief(locale());
+ os << "\n - icu_number_format: " << Brief(icu_number_format());
+ os << "\n - bound_format: " << Brief(bound_format());
+ os << "\n - style: " << StyleAsString();
+ os << "\n - currency_display: " << CurrencyDisplayAsString();
+ os << "\n";
+}
+
void JSPluralRules::JSPluralRulesPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSPluralRules");
JSObjectPrint(os);
@@ -2002,7 +2032,16 @@ void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
os << "\n - locale: " << Brief(locale());
os << "\n - style: " << StyleAsString();
os << "\n - numeric: " << NumericAsString();
- os << "\n - formatter: " << Brief(formatter());
+ os << "\n - icu formatter: " << Brief(icu_formatter());
+ os << "\n";
+}
+
+void JSSegmenter::JSSegmenterPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSSegmenter");
+ os << "\n - locale: " << Brief(locale());
+ os << "\n - granularity: " << GranularityAsString();
+ os << "\n - lineBreakStyle: " << LineBreakStyleAsString();
+ os << "\n - icubreak iterator: " << Brief(icu_break_iterator());
os << "\n";
}
#endif // V8_INTL_SUPPORT
@@ -2173,6 +2212,13 @@ void UncompiledDataWithPreParsedScope::UncompiledDataWithPreParsedScopePrint(
os << "\n";
}
+void MicrotaskQueue::MicrotaskQueuePrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "MicrotaskQueue");
+ os << "\n - pending_microtask_count: " << pending_microtask_count();
+ os << "\n - queue: " << Brief(queue());
+ os << "\n";
+}
+
void InterpreterData::InterpreterDataPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "InterpreterData");
os << "\n - bytecode_array: " << Brief(bytecode_array());
@@ -2191,12 +2237,12 @@ void MaybeObject::Print(std::ostream& os) {
HeapObject* heap_object;
if (ToSmi(&smi)) {
smi->SmiPrint(os);
- } else if (IsClearedWeakHeapObject()) {
+ } else if (IsCleared()) {
os << "[cleared]";
- } else if (ToWeakHeapObject(&heap_object)) {
+ } else if (GetHeapObjectIfWeak(&heap_object)) {
os << "[weak] ";
heap_object->HeapObjectPrint(os);
- } else if (ToStrongHeapObject(&heap_object)) {
+ } else if (GetHeapObjectIfStrong(&heap_object)) {
heap_object->HeapObjectPrint(os);
} else {
UNREACHABLE();
@@ -2356,7 +2402,7 @@ void TransitionsAccessor::PrintTransitions(std::ostream& os) { // NOLINT
case kUninitialized:
return;
case kWeakRef: {
- Map* target = Map::cast(raw_transitions_->ToWeakHeapObject());
+ Map* target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
Name* key = GetSimpleTransitionKey(target);
PrintOneTransition(os, key, target);
break;
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 811656ad9a..6ccdbf4e34 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -61,25 +61,33 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-collection-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-date-time-format.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/js-generator-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-list-format.h"
#include "src/objects/js-locale.h"
+#include "src/objects/js-number-format.h"
+#include "src/objects/js-plural-rules.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator.h"
#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-plural-rules.h"
#include "src/objects/js-relative-time-format.h"
+#include "src/objects/js-segmenter.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
#include "src/objects/microtask-inl.h"
+#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
@@ -364,8 +372,17 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
Handle<Object> input) {
DisallowJavascriptExecution no_js(isolate);
- if (input->IsString() || input->IsNumeric() || input->IsOddball()) {
+ if (input->IsString() || input->IsNumber() || input->IsOddball()) {
return Object::ToString(isolate, input).ToHandleChecked();
+ } else if (input->IsBigInt()) {
+ MaybeHandle<String> maybe_string =
+ BigInt::ToString(isolate, Handle<BigInt>::cast(input), 10, kDontThrow);
+ Handle<String> result;
+ if (maybe_string.ToHandle(&result)) return result;
+ // BigInt-to-String conversion can fail on 32-bit platforms where
+ // String::kMaxLength is too small to fit this BigInt.
+ return isolate->factory()->NewStringFromStaticChars(
+ "<a very large BigInt>");
} else if (input->IsFunction()) {
// -- F u n c t i o n
Handle<String> fun_str;
@@ -1444,16 +1461,24 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_MODULE_NAMESPACE_TYPE:
return JSModuleNamespace::kHeaderSize;
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ return JSV8BreakIterator::kSize;
case JS_INTL_COLLATOR_TYPE:
return JSCollator::kSize;
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ return JSDateTimeFormat::kSize;
case JS_INTL_LIST_FORMAT_TYPE:
return JSListFormat::kSize;
case JS_INTL_LOCALE_TYPE:
return JSLocale::kSize;
+ case JS_INTL_NUMBER_FORMAT_TYPE:
+ return JSNumberFormat::kSize;
case JS_INTL_PLURAL_RULES_TYPE:
return JSPluralRules::kSize;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
return JSRelativeTimeFormat::kSize;
+ case JS_INTL_SEGMENTER_TYPE:
+ return JSSegmenter::kSize;
#endif // V8_INTL_SUPPORT
case WASM_GLOBAL_TYPE:
return WasmGlobalObject::kSize;
@@ -2202,9 +2227,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
if (use_set) {
LookupIterator it(target, next_key, target);
- Maybe<bool> result =
- Object::SetProperty(&it, prop_value, LanguageMode::kStrict,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ Maybe<bool> result = Object::SetProperty(
+ &it, prop_value, LanguageMode::kStrict, StoreOrigin::kNamed);
if (result.IsNothing()) return result;
if (stable) stable = from->map() == *map;
} else {
@@ -2267,7 +2291,8 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, status,
Runtime::SetObjectProperty(isolate, target, next_key, prop_value,
- LanguageMode::kStrict),
+ LanguageMode::kStrict,
+ StoreOrigin::kMaybeKeyed),
Nothing<bool>());
} else {
if (excluded_properties != nullptr &&
@@ -2536,12 +2561,12 @@ std::ostream& operator<<(std::ostream& os, const Brief& v) {
HeapObject* heap_object;
if (maybe_object->ToSmi(&smi)) {
smi->SmiPrint(os);
- } else if (maybe_object->IsClearedWeakHeapObject()) {
+ } else if (maybe_object->IsCleared()) {
os << "[cleared]";
- } else if (maybe_object->ToWeakHeapObject(&heap_object)) {
+ } else if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
os << "[weak] ";
heap_object->HeapObjectShortPrint(os);
- } else if (maybe_object->ToStrongHeapObject(&heap_object)) {
+ } else if (maybe_object->GetHeapObjectIfStrong(&heap_object)) {
heap_object->HeapObjectShortPrint(os);
} else {
UNREACHABLE();
@@ -2599,7 +2624,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(this->SupportsExternalization());
- DCHECK(!resource->IsCompressible());
+ DCHECK(resource->IsCacheable());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
@@ -2612,7 +2637,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
#endif // DEBUG
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
- if (size < ExternalString::kShortSize) return false;
+ if (size < ExternalString::kUncachedSize) return false;
Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
@@ -2626,23 +2651,25 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
- // string occupies is too small for a regular external string.
- // Instead, we resort to a short external string instead, omitting
- // the field caching the address of the backing store. When we encounter
- // short external strings in generated code, we need to bailout to runtime.
+ // string occupies is too small for a regular external string. Instead, we
+ // resort to an uncached external string instead, omitting the field caching
+ // the address of the backing store. When we encounter uncached external
+ // strings in generated code, we need to bailout to runtime.
Map* new_map;
ReadOnlyRoots roots(heap);
if (size < ExternalString::kSize) {
if (is_internalized) {
- new_map =
- is_one_byte
- ? roots
- .short_external_internalized_string_with_one_byte_data_map()
- : roots.short_external_internalized_string_map();
+ if (is_one_byte) {
+ new_map =
+ roots
+ .uncached_external_internalized_string_with_one_byte_data_map();
+ } else {
+ new_map = roots.uncached_external_internalized_string_map();
+ }
} else {
new_map = is_one_byte
- ? roots.short_external_string_with_one_byte_data_map()
- : roots.short_external_string_map();
+ ? roots.uncached_external_string_with_one_byte_data_map()
+ : roots.uncached_external_string_map();
}
} else {
new_map =
@@ -2679,7 +2706,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(this->SupportsExternalization());
- DCHECK(!resource->IsCompressible());
+ DCHECK(resource->IsCacheable());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
@@ -2697,7 +2724,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
#endif // DEBUG
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
- if (size < ExternalString::kShortSize) return false;
+ if (size < ExternalString::kUncachedSize) return false;
Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
@@ -2712,16 +2739,16 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
- // string occupies is too small for a regular external string.
- // Instead, we resort to a short external string instead, omitting
- // the field caching the address of the backing store. When we encounter
- // short external strings in generated code, we need to bailout to runtime.
+ // string occupies is too small for a regular external string. Instead, we
+ // resort to an uncached external string instead, omitting the field caching
+ // the address of the backing store. When we encounter uncached external
+ // strings in generated code, we need to bailout to runtime.
Map* new_map;
ReadOnlyRoots roots(heap);
if (size < ExternalString::kSize) {
new_map = is_internalized
- ? roots.short_external_one_byte_internalized_string_map()
- : roots.short_external_one_byte_string_map();
+ ? roots.uncached_external_one_byte_internalized_string_map()
+ : roots.uncached_external_one_byte_string_map();
} else {
new_map = is_internalized
? roots.external_one_byte_internalized_string_map()
@@ -3069,6 +3096,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case STRING_TABLE_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
case DEBUG_EVALUATE_CONTEXT_TYPE:
@@ -3137,6 +3165,12 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_ARRAY_BUFFER_TYPE:
return kVisitJSArrayBuffer;
+ case JS_DATA_VIEW_TYPE:
+ return kVisitJSDataView;
+
+ case JS_TYPED_ARRAY_TYPE:
+ return kVisitJSTypedArray;
+
case SMALL_ORDERED_HASH_MAP_TYPE:
return kVisitSmallOrderedHashMap;
@@ -3170,11 +3204,10 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_DATE_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_TYPE:
+ case JS_FUNCTION_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
@@ -3187,12 +3220,17 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_REGEXP_TYPE:
case JS_REGEXP_STRING_ITERATOR_TYPE:
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
case JS_INTL_LIST_FORMAT_TYPE:
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
+ case WASM_EXCEPTION_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
@@ -3203,9 +3241,6 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_SPECIAL_API_OBJECT_TYPE:
return kVisitJSApiObject;
- case JS_FUNCTION_TYPE:
- return kVisitJSFunction;
-
case FILLER_TYPE:
case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:
@@ -3234,7 +3269,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case ALLOCATION_SITE_TYPE:
return kVisitAllocationSite;
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
if (instance_type == PROTOTYPE_INFO_TYPE) {
@@ -3335,7 +3370,7 @@ bool JSObject::IsUnmodifiedApiObject(Object** o) {
HeapObject* heap_object = HeapObject::cast(object);
if (!object->IsJSObject()) return false;
JSObject* js_object = JSObject::cast(object);
- if (!js_object->IsApiWrapper()) return false;
+ if (!js_object->IsDroppableApiWrapper()) return false;
Object* maybe_constructor = js_object->map()->GetConstructor();
if (!maybe_constructor->IsJSFunction()) return false;
JSFunction* constructor = JSFunction::cast(maybe_constructor);
@@ -3372,6 +3407,15 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
}
os << ">";
} break;
+ case AWAIT_CONTEXT_TYPE: {
+ os << "<AwaitContext generator= ";
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ Context::cast(this)->extension()->ShortPrint(&accumulator);
+ os << accumulator.ToCString().get();
+ os << '>';
+ break;
+ }
case BLOCK_CONTEXT_TYPE:
os << "<BlockContext[" << FixedArray::cast(this)->length() << "]>";
break;
@@ -3524,8 +3568,8 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case JS_MESSAGE_OBJECT_TYPE:
os << "<JSMessageObject>";
break;
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
os << "<" #Name; \
Name::cast(this)->BriefPrintDetails(os); \
os << ">"; \
@@ -3923,14 +3967,14 @@ MaybeObjectHandle Map::WrapFieldType(Isolate* isolate, Handle<FieldType> type) {
// static
FieldType* Map::UnwrapFieldType(MaybeObject* wrapped_type) {
- if (wrapped_type->IsClearedWeakHeapObject()) {
+ if (wrapped_type->IsCleared()) {
return FieldType::None();
}
HeapObject* heap_object;
- if (wrapped_type->ToWeakHeapObject(&heap_object)) {
+ if (wrapped_type->GetHeapObjectIfWeak(&heap_object)) {
return FieldType::cast(heap_object);
}
- return FieldType::cast(wrapped_type->ToObject());
+ return wrapped_type->cast<FieldType>();
}
MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
@@ -4325,8 +4369,10 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
heap->ClearRecordedSlot(*object,
HeapObject::RawField(*object, index.offset()));
} else {
- DCHECK(!heap->HasRecordedSlot(
- *object, HeapObject::RawField(*object, index.offset())));
+#ifdef DEBUG
+ heap->VerifyClearedSlot(*object,
+ HeapObject::RawField(*object, index.offset()));
+#endif
}
} else {
object->RawFastPropertyAtPut(index, value);
@@ -4691,8 +4737,8 @@ Map* Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
PropertyConstness new_constness,
Representation new_representation,
- MaybeObjectHandle new_wrapped_type) {
- DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakHeapObject());
+ const MaybeObjectHandle& new_wrapped_type) {
+ DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
// We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
@@ -4841,25 +4887,6 @@ Handle<Map> Map::ReconfigureElementsKind(Isolate* isolate, Handle<Map> map,
return mu.ReconfigureElementsKind(new_elements_kind);
}
-// Generalize all fields and update the transition tree.
-Handle<Map> Map::GeneralizeAllFields(Isolate* isolate, Handle<Map> map) {
- Handle<FieldType> any_type = FieldType::Any(isolate);
-
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- for (int i = 0; i < map->NumberOfOwnDescriptors(); ++i) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() == kField) {
- DCHECK_EQ(kData, details.kind());
- MapUpdater mu(isolate, map);
- map = mu.ReconfigureToDataField(i, details.attributes(),
- PropertyConstness::kMutable,
- Representation::Tagged(), any_type);
- }
- }
- return map;
-}
-
-
// static
MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
DisallowHeapAllocation no_allocation;
@@ -4988,18 +5015,16 @@ Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
MaybeHandle<Object> Object::SetProperty(Isolate* isolate, Handle<Object> object,
Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
LookupIterator it(isolate, object, name);
- MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
+ MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_origin));
return value;
}
-
Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Handle<Object> value,
LanguageMode language_mode,
- StoreFromKeyed store_mode,
- bool* found) {
+ StoreOrigin store_origin, bool* found) {
it->UpdateProtector();
DCHECK(it->IsFound());
ShouldThrow should_throw =
@@ -5104,14 +5129,13 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
return Nothing<bool>();
}
-
Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
if (it->IsFound()) {
bool found = true;
Maybe<bool> result =
- SetPropertyInternal(it, value, language_mode, store_mode, &found);
+ SetPropertyInternal(it, value, language_mode, store_origin, &found);
if (found) return result;
}
@@ -5126,19 +5150,18 @@ Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
ShouldThrow should_throw =
is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
- return AddDataProperty(it, value, NONE, should_throw, store_mode);
+ return AddDataProperty(it, value, NONE, should_throw, store_origin);
}
-
Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
Isolate* isolate = it->isolate();
if (it->IsFound()) {
bool found = true;
Maybe<bool> result =
- SetPropertyInternal(it, value, language_mode, store_mode, &found);
+ SetPropertyInternal(it, value, language_mode, store_origin, &found);
if (found) return result;
}
@@ -5217,7 +5240,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
}
}
- return AddDataProperty(&own_lookup, value, NONE, should_throw, store_mode);
+ return AddDataProperty(&own_lookup, value, NONE, should_throw, store_origin);
}
Maybe<bool> Object::CannotCreateProperty(Isolate* isolate,
@@ -5314,11 +5337,10 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
return Just(true);
}
-
Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
PropertyAttributes attributes,
ShouldThrow should_throw,
- StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
if (!it->GetReceiver()->IsJSReceiver()) {
return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
value, should_throw);
@@ -5380,7 +5402,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
// Migrate to the most up-to-date map that will be able to store |value|
// under it->name() with |attributes|.
it->PrepareTransitionToDataProperty(receiver, value, attributes,
- store_mode);
+ store_origin);
DCHECK_EQ(LookupIterator::TRANSITION, it->state());
it->ApplyTransitionToDataProperty(receiver);
@@ -5859,7 +5881,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
LookupIterator it =
LookupIterator::PropertyOrElement(isolate, receiver, name, target);
return Object::SetSuperProperty(&it, value, language_mode,
- Object::MAY_BE_STORE_FROM_KEYED);
+ StoreOrigin::kMaybeKeyed);
}
Handle<Object> trap_result;
@@ -6225,7 +6247,7 @@ void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
DCHECK(object->map()->is_extensible() || name->IsPrivate());
#endif
CHECK(AddDataProperty(&it, value, attributes, kThrowOnError,
- CERTAINLY_NOT_STORE_FROM_KEYED)
+ StoreOrigin::kNamed)
.IsJust());
}
@@ -6328,7 +6350,7 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
}
return AddDataProperty(it, value, attributes, should_throw,
- CERTAINLY_NOT_STORE_FROM_KEYED);
+ StoreOrigin::kNamed);
}
MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
@@ -6408,7 +6430,7 @@ MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
DisallowHeapAllocation no_gc;
MaybeObject* value = WeakFixedArray::Get(GetIndex(fast_map));
HeapObject* heap_object;
- if (!value->ToWeakHeapObject(&heap_object)) {
+ if (!value->GetHeapObjectIfWeak(&heap_object)) {
return MaybeHandle<Map>();
}
@@ -6698,6 +6720,11 @@ Object* SetHashAndUpdateProperties(Isolate* isolate, HeapObject* properties,
return properties;
}
+ if (properties->IsGlobalDictionary()) {
+ GlobalDictionary::cast(properties)->SetHash(hash);
+ return properties;
+ }
+
DCHECK(properties->IsNameDictionary());
NameDictionary::cast(properties)->SetHash(hash);
return properties;
@@ -8096,144 +8123,6 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
}
-bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
- ElementsKind kind,
- Object* object) {
- Isolate* isolate = GetIsolate();
- if (IsObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
- int length = IsJSArray() ? Smi::ToInt(JSArray::cast(this)->length())
- : elements->length();
- for (int i = 0; i < length; ++i) {
- Object* element = elements->get(i);
- if (!element->IsTheHole(isolate) && element == object) return true;
- }
- } else {
- DCHECK(kind == DICTIONARY_ELEMENTS || kind == SLOW_STRING_WRAPPER_ELEMENTS);
- Object* key = NumberDictionary::cast(elements)->SlowReverseLookup(object);
- if (!key->IsUndefined(isolate)) return true;
- }
- return false;
-}
-
-
-// Check whether this object references another object.
-bool JSObject::ReferencesObject(Object* obj) {
- Map* map_of_this = map();
- Heap* heap = GetHeap();
- DisallowHeapAllocation no_allocation;
-
- // Is the object the constructor for this object?
- if (map_of_this->GetConstructor() == obj) {
- return true;
- }
-
- // Is the object the prototype for this object?
- if (map_of_this->prototype() == obj) {
- return true;
- }
-
- // Check if the object is among the named properties.
- Object* key = SlowReverseLookup(obj);
- if (!key->IsUndefined(heap->isolate())) {
- return true;
- }
-
- // Check if the object is among the indexed properties.
- ElementsKind kind = GetElementsKind();
- switch (kind) {
- // Raw pixels and external arrays do not reference other
- // objects.
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- case PACKED_DOUBLE_ELEMENTS:
- case HOLEY_DOUBLE_ELEMENTS:
- break;
- case PACKED_SMI_ELEMENTS:
- case HOLEY_SMI_ELEMENTS:
- break;
- case PACKED_ELEMENTS:
- case HOLEY_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(this->elements());
- if (ReferencesObjectFromElements(elements, kind, obj)) return true;
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- SloppyArgumentsElements* elements =
- SloppyArgumentsElements::cast(this->elements());
- // Check the mapped parameters.
- for (uint32_t i = 0; i < elements->parameter_map_length(); ++i) {
- Object* value = elements->get_mapped_entry(i);
- if (!value->IsTheHole(heap->isolate()) && value == obj) return true;
- }
- // Check the arguments.
- FixedArray* arguments = elements->arguments();
- kind = arguments->IsNumberDictionary() ? DICTIONARY_ELEMENTS
- : HOLEY_ELEMENTS;
- if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
- break;
- }
- case NO_ELEMENTS:
- break;
- }
-
- // For functions check the context.
- if (IsJSFunction()) {
- // Get the constructor function for arguments array.
- Map* arguments_map =
- heap->isolate()->context()->native_context()->sloppy_arguments_map();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_map->GetConstructor());
-
- // Get the context and don't check if it is the native context.
- JSFunction* f = JSFunction::cast(this);
- Context* context = f->context();
- if (context->IsNativeContext()) {
- return false;
- }
-
- // Check the non-special context slots.
- for (int i = Context::MIN_CONTEXT_SLOTS; i < context->length(); i++) {
- // Only check JS objects.
- if (context->get(i)->IsJSObject()) {
- JSObject* ctxobj = JSObject::cast(context->get(i));
- // If it is an arguments array check the content.
- if (ctxobj->map()->GetConstructor() == arguments_function) {
- if (ctxobj->ReferencesObject(obj)) {
- return true;
- }
- } else if (ctxobj == obj) {
- return true;
- }
- }
- }
-
- // Check the context extension (if any) if it can have references.
- if (context->has_extension() && !context->IsCatchContext() &&
- !context->IsModuleContext()) {
- // With harmony scoping, a JSFunction may have a script context.
- // TODO(mvstanton): walk into the ScopeInfo.
- if (context->IsScriptContext()) {
- return false;
- }
-
- return context->extension_object()->ReferencesObject(obj);
- }
- }
-
- // No references to object.
- return false;
-}
-
-
Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
IntegrityLevel level,
ShouldThrow should_throw) {
@@ -8758,7 +8647,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
// typed array elements. Freeze works only if there are no actual elements.
if (object->HasFixedTypedArrayElements()) {
if (attrs == FROZEN &&
- JSArrayBufferView::cast(*object)->byte_length()->Number() > 0) {
+ JSArrayBufferView::cast(*object)->byte_length() > 0) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kCannotFreezeArrayBufferView));
return Nothing<bool>();
@@ -9937,7 +9826,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Object> value,
PropertyAttributes attributes,
PropertyConstness constness,
- StoreFromKeyed store_mode) {
+ StoreOrigin store_origin) {
RuntimeCallTimerScope stats_scope(
isolate, *map,
map->is_prototype_map()
@@ -9966,7 +9855,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
TransitionFlag flag = INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
- if (!map->TooManyFastProperties(store_mode)) {
+ if (!map->TooManyFastProperties(store_origin)) {
if (!FLAG_track_constant_fields && value->IsJSFunction()) {
maybe_map =
Map::CopyWithConstant(isolate, map, name, value, attributes, flag);
@@ -10142,7 +10031,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
pair = AccessorPair::Copy(isolate, Handle<AccessorPair>::cast(maybe_pair));
} else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
- map->TooManyFastProperties(CERTAINLY_NOT_STORE_FROM_KEYED)) {
+ map->TooManyFastProperties(StoreOrigin::kNamed)) {
return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
"TooManyAccessors");
} else {
@@ -10229,7 +10118,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
// READ_ONLY is an invalid attribute for JS setters/getters.
HeapObject* heap_object;
if (details.kind() != kAccessor ||
- !(value_or_field_type->ToStrongHeapObject(&heap_object) &&
+ !(value_or_field_type->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsAccessorPair())) {
mask |= READ_ONLY;
}
@@ -10463,11 +10352,6 @@ Handle<FixedArray> ArrayList::Elements(Isolate* isolate,
return result;
}
-bool ArrayList::IsFull() {
- int capacity = length();
- return kFirstIndex + Length() == capacity;
-}
-
namespace {
Handle<FixedArray> EnsureSpaceInFixedArray(Isolate* isolate,
@@ -10501,7 +10385,7 @@ Handle<ArrayList> ArrayList::EnsureSpace(Isolate* isolate,
// static
Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate,
Handle<WeakArrayList> array,
- MaybeObjectHandle value) {
+ const MaybeObjectHandle& value) {
int length = array->length();
array = EnsureSpace(isolate, array, length + 1);
// Reload length; GC might have removed elements from the array.
@@ -10532,14 +10416,14 @@ Handle<WeakArrayList> WeakArrayList::EnsureSpace(Isolate* isolate,
int WeakArrayList::CountLiveWeakReferences() const {
int live_weak_references = 0;
for (int i = 0; i < length(); i++) {
- if (Get(i)->IsWeakHeapObject()) {
+ if (Get(i)->IsWeak()) {
++live_weak_references;
}
}
return live_weak_references;
}
-bool WeakArrayList::RemoveOne(MaybeObjectHandle value) {
+bool WeakArrayList::RemoveOne(const MaybeObjectHandle& value) {
if (length() == 0) return false;
// Optimize for the most recently added element to be removed again.
int last_index = length() - 1;
@@ -10585,7 +10469,7 @@ Handle<WeakArrayList> PrototypeUsers::Add(Isolate* isolate,
if (empty_slot != kNoEmptySlotsMarker) {
DCHECK_GE(empty_slot, kFirstIndex);
CHECK_LT(empty_slot, array->length());
- int next_empty_slot = Smi::ToInt(array->Get(empty_slot)->ToSmi());
+ int next_empty_slot = Smi::ToInt(array->Get(empty_slot)->cast<Smi>());
array->Set(empty_slot, HeapObjectReference::Weak(*value));
if (assigned_index != nullptr) *assigned_index = empty_slot;
@@ -10624,11 +10508,13 @@ WeakArrayList* PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
int copy_to = kFirstIndex;
for (int i = kFirstIndex; i < array->length(); i++) {
MaybeObject* element = array->Get(i);
- if (element->IsSmi()) continue;
- if (element->IsClearedWeakHeapObject()) continue;
- HeapObject* value = element->ToWeakHeapObject();
- callback(value, i, copy_to);
- new_array->Set(copy_to++, element);
+ HeapObject* value;
+ if (element->GetHeapObjectIfWeak(&value)) {
+ callback(value, i, copy_to);
+ new_array->Set(copy_to++, element);
+ } else {
+ DCHECK(element->IsCleared() || element->IsSmi());
+ }
}
new_array->set_length(copy_to);
set_empty_slot_index(*new_array, kNoEmptySlotsMarker);
@@ -10707,7 +10593,7 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
// Allocate the array of keys.
Handle<WeakFixedArray> result =
factory->NewWeakFixedArrayWithMap<DescriptorArray>(
- Heap::kDescriptorArrayMapRootIndex, LengthFor(size), pretenure);
+ RootIndex::kDescriptorArrayMap, LengthFor(size), pretenure);
result->Set(kDescriptorLengthIndex,
MaybeObject::FromObject(Smi::FromInt(number_of_descriptors)));
result->Set(kEnumCacheIndex, MaybeObject::FromObject(
@@ -11104,32 +10990,6 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
}
-const uc16* String::GetTwoByteData(unsigned start) {
- DCHECK(!IsOneByteRepresentationUnderneath());
- switch (StringShape(this).representation_tag()) {
- case kSeqStringTag:
- return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
- case kExternalStringTag:
- return ExternalTwoByteString::cast(this)->
- ExternalTwoByteStringGetData(start);
- case kSlicedStringTag: {
- SlicedString* slice = SlicedString::cast(this);
- return slice->parent()->GetTwoByteData(start + slice->offset());
- }
- case kConsStringTag:
- case kThinStringTag:
- UNREACHABLE();
- }
- UNREACHABLE();
-}
-
-
-const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
- return reinterpret_cast<uc16*>(
- reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
-}
-
-
void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
Relocatable* current = isolate->relocatable_top();
while (current != nullptr) {
@@ -11185,15 +11045,13 @@ FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
PostGarbageCollection();
}
-
FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
: Relocatable(isolate),
- str_(0),
+ str_(nullptr),
is_one_byte_(true),
length_(input.length()),
start_(input.start()) {}
-
void FlatStringReader::PostGarbageCollection() {
if (str_ == nullptr) return;
Handle<String> str(str_);
@@ -11638,7 +11496,7 @@ class StringComparator {
};
public:
- inline StringComparator() {}
+ inline StringComparator() = default;
template<typename Chars1, typename Chars2>
static inline bool Equals(State* state_1, State* state_2, int to_check) {
@@ -12838,7 +12696,7 @@ void InvalidatePrototypeChainsInternal(Map* map) {
for (int i = PrototypeUsers::kFirstIndex; i < prototype_users->length();
++i) {
HeapObject* heap_object;
- if (prototype_users->Get(i)->ToWeakHeapObject(&heap_object) &&
+ if (prototype_users->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
heap_object->IsMap()) {
// Walk the prototype chain (backwards, towards leaf objects) if
// necessary.
@@ -13135,9 +12993,14 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_GENERATOR_OBJECT_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENTER_TYPE:
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
#endif
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MAP_TYPE:
@@ -13198,7 +13061,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case FIXED_##TYPE##_ARRAY_TYPE:
#undef TYPED_ARRAY_CASE
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
// We must not end up here for these instance types at all.
@@ -13268,6 +13131,8 @@ namespace {
bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
Handle<JSFunction> constructor,
Handle<Map> constructor_initial_map) {
+ // Use the default intrinsic prototype instead.
+ if (!new_target->has_prototype_slot()) return false;
// Check that |function|'s initial map still in sync with the |constructor|,
// otherwise we must create a new initial map for |function|.
if (new_target->has_initial_map() &&
@@ -13342,9 +13207,14 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
Handle<Object> prototype;
if (new_target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
- // Make sure the new.target.prototype is cached.
- EnsureHasInitialMap(function);
- prototype = handle(function->prototype(), isolate);
+ if (function->has_prototype_slot()) {
+ // Make sure the new.target.prototype is cached.
+ EnsureHasInitialMap(function);
+ prototype = handle(function->prototype(), isolate);
+ } else {
+ // No prototype property, use the intrinsict default proto further down.
+ prototype = isolate->factory()->undefined_value();
+ }
} else {
Handle<String> prototype_string = isolate->factory()->prototype_string();
ASSIGN_RETURN_ON_EXCEPTION(
@@ -13384,8 +13254,8 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
}
int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
- if (has_prototype_slot() && has_initial_map() &&
- initial_map()->IsInobjectSlackTrackingInProgress()) {
+ CHECK(has_initial_map());
+ if (initial_map()->IsInobjectSlackTrackingInProgress()) {
int slack = initial_map()->ComputeMinObjectSlack(isolate);
return initial_map()->InstanceSizeFromSlack(slack);
}
@@ -13488,64 +13358,16 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
return NativeCodeFunctionSourceString(shared_info);
}
- if (FLAG_harmony_function_tostring) {
- if (shared_info->function_token_position() == kNoSourcePosition) {
- // If the function token position isn't valid, return [native code] to
- // ensure calling eval on the returned source code throws rather than
- // giving inconsistent call behaviour.
- isolate->CountUsage(v8::Isolate::UseCounterFeature::
- kFunctionTokenOffsetTooLongForToString);
- return NativeCodeFunctionSourceString(shared_info);
- }
- return Handle<String>::cast(
- SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
- }
-
- IncrementalStringBuilder builder(isolate);
- FunctionKind kind = shared_info->kind();
- if (!IsArrowFunction(kind)) {
- if (IsConciseMethod(kind)) {
- if (IsAsyncGeneratorFunction(kind)) {
- builder.AppendCString("async *");
- } else if (IsGeneratorFunction(kind)) {
- builder.AppendCharacter('*');
- } else if (IsAsyncFunction(kind)) {
- builder.AppendCString("async ");
- }
- } else {
- if (IsAsyncGeneratorFunction(kind)) {
- builder.AppendCString("async function* ");
- } else if (IsGeneratorFunction(kind)) {
- builder.AppendCString("function* ");
- } else if (IsAsyncFunction(kind)) {
- builder.AppendCString("async function ");
- } else {
- builder.AppendCString("function ");
- }
- }
- if (shared_info->name_should_print_as_anonymous()) {
- builder.AppendCString("anonymous");
- } else if (!shared_info->is_anonymous_expression()) {
- builder.AppendString(handle(shared_info->Name(), isolate));
- }
- }
- if (shared_info->is_wrapped()) {
- builder.AppendCharacter('(');
- Handle<FixedArray> args(
- Script::cast(shared_info->script())->wrapped_arguments(), isolate);
- int argc = args->length();
- for (int i = 0; i < argc; i++) {
- if (i > 0) builder.AppendCString(", ");
- builder.AppendString(Handle<String>(String::cast(args->get(i)), isolate));
- }
- builder.AppendCString(") {\n");
- }
- builder.AppendString(
- Handle<String>::cast(SharedFunctionInfo::GetSourceCode(shared_info)));
- if (shared_info->is_wrapped()) {
- builder.AppendCString("\n}");
+ if (shared_info->function_token_position() == kNoSourcePosition) {
+ // If the function token position isn't valid, return [native code] to
+ // ensure calling eval on the returned source code throws rather than
+ // giving inconsistent call behaviour.
+ isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kFunctionTokenOffsetTooLongForToString);
+ return NativeCodeFunctionSourceString(shared_info);
}
- return builder.Finish().ToHandleChecked();
+ return Handle<String>::cast(
+ SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
}
void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
@@ -13590,7 +13412,8 @@ int Script::GetEvalPosition() {
void Script::InitLineEnds(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
if (!script->line_ends()->IsUndefined(isolate)) return;
- DCHECK_NE(Script::TYPE_WASM, script->type());
+ DCHECK(script->type() != Script::TYPE_WASM ||
+ script->source_mapping_url()->IsString());
Object* src_obj = script->source();
if (!src_obj->IsString()) {
@@ -13775,7 +13598,7 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
MaybeObject* shared =
shared_function_infos()->Get(fun->function_literal_id());
HeapObject* heap_object;
- if (!shared->ToStrongOrWeakHeapObject(&heap_object) ||
+ if (!shared->GetHeapObject(&heap_object) ||
heap_object->IsUndefined(isolate)) {
return MaybeHandle<SharedFunctionInfo>();
}
@@ -13855,7 +13678,7 @@ SharedFunctionInfo* SharedFunctionInfo::ScriptIterator::Next() {
while (index_ < shared_function_infos_->length()) {
MaybeObject* raw = shared_function_infos_->Get(index_++);
HeapObject* heap_object;
- if (!raw->ToStrongOrWeakHeapObject(&heap_object) ||
+ if (!raw->GetHeapObject(&heap_object) ||
heap_object->IsUndefined(isolate_)) {
continue;
}
@@ -13911,7 +13734,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
DCHECK_LT(function_literal_id, list->length());
MaybeObject* maybe_object = list->Get(function_literal_id);
HeapObject* heap_object;
- if (maybe_object->ToWeakHeapObject(&heap_object)) {
+ if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
DCHECK_EQ(heap_object, *shared);
}
#endif
@@ -13951,7 +13774,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
MaybeObject* raw =
old_script->shared_function_infos()->Get(function_literal_id);
HeapObject* heap_object;
- if (raw->ToWeakHeapObject(&heap_object) && heap_object == *shared) {
+ if (raw->GetHeapObjectIfWeak(&heap_object) && heap_object == *shared) {
old_script->shared_function_infos()->Set(
function_literal_id, HeapObjectReference::Strong(
ReadOnlyRoots(isolate).undefined_value()));
@@ -14323,6 +14146,75 @@ void SharedFunctionInfo::SetFunctionTokenPosition(int function_token_position,
set_raw_function_token_offset(offset);
}
+int SharedFunctionInfo::StartPosition() const {
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ if (info->HasPositionInfo()) {
+ return info->StartPosition();
+ }
+ } else if (HasUncompiledData()) {
+ // Works with or without scope.
+ return uncompiled_data()->start_position();
+ } else if (IsApiFunction() || HasBuiltinId()) {
+ DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
+ return 0;
+ }
+ return kNoSourcePosition;
+}
+
+int SharedFunctionInfo::EndPosition() const {
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ if (info->HasPositionInfo()) {
+ return info->EndPosition();
+ }
+ } else if (HasUncompiledData()) {
+ // Works with or without scope.
+ return uncompiled_data()->end_position();
+ } else if (IsApiFunction() || HasBuiltinId()) {
+ DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
+ return 0;
+ }
+ return kNoSourcePosition;
+}
+
+int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
+ // Fast path for the common case when the SFI is uncompiled and so the
+ // function literal id is already in the uncompiled data.
+ if (HasUncompiledData()) {
+ int id = uncompiled_data()->function_literal_id();
+ // Make sure the id is what we should have found with the slow path.
+ DCHECK_EQ(id, FindIndexInScript(isolate));
+ return id;
+ }
+
+ // Otherwise, search for the function in the SFI's script's function list,
+ // and return its index in that list.e
+ return FindIndexInScript(isolate);
+}
+
+void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ if (info->HasPositionInfo()) {
+ info->SetPositionInfo(start_position, end_position);
+ }
+ } else if (HasUncompiledData()) {
+ if (HasUncompiledDataWithPreParsedScope()) {
+ // Clear out preparsed scope data, since the position setter invalidates
+ // any scope data.
+ ClearPreParsedScopeData();
+ }
+ uncompiled_data()->set_start_position(start_position);
+ uncompiled_data()->set_end_position(end_position);
+ } else {
+ UNREACHABLE();
+ }
+}
+
void Map::StartInobjectSlackTracking() {
DCHECK(!IsInobjectSlackTrackingInProgress());
if (UnusedPropertyFields() == 0) return;
@@ -14374,11 +14266,6 @@ void Code::FlushICache() const {
Assembler::FlushICache(raw_instruction_start(), raw_instruction_size());
}
-void Code::CopyFrom(Heap* heap, const CodeDesc& desc) {
- CopyFromNoFlush(heap, desc);
- FlushICache();
-}
-
void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// Copy code.
CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
@@ -14589,7 +14476,7 @@ const char* Code::Kind2String(Kind kind) {
// Identify kind of code.
const char* AbstractCode::Kind2String(Kind kind) {
if (kind < AbstractCode::INTERPRETED_FUNCTION)
- return Code::Kind2String((Code::Kind)kind);
+ return Code::Kind2String(static_cast<Code::Kind>(kind));
if (kind == AbstractCode::INTERPRETED_FUNCTION) return "INTERPRETED_FUNCTION";
UNREACHABLE();
}
@@ -14809,14 +14696,22 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::INT32_REGISTER: {
int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
+ os << "{input=" << converter.NameOfCPURegister(reg_code)
+ << " (int32)}";
+ break;
+ }
+
+ case Translation::INT64_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code)
+ << " (int64)}";
break;
}
case Translation::UINT32_REGISTER: {
int reg_code = iterator.Next();
os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (unsigned)}";
+ << " (uint32)}";
break;
}
@@ -14852,13 +14747,19 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::INT32_STACK_SLOT: {
int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << "}";
+ os << "{input=" << input_slot_index << " (int32)}";
+ break;
+ }
+
+ case Translation::INT64_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (int64)}";
break;
}
case Translation::UINT32_STACK_SLOT: {
int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (unsigned)}";
+ os << "{input=" << input_slot_index << " (uint32)}";
break;
}
@@ -15223,7 +15124,8 @@ void DependentCode::SetDependentCode(Handle<HeapObject> object,
}
}
-void DependentCode::InstallDependency(Isolate* isolate, MaybeObjectHandle code,
+void DependentCode::InstallDependency(Isolate* isolate,
+ const MaybeObjectHandle& code,
Handle<HeapObject> object,
DependencyGroup group) {
Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
@@ -15237,7 +15139,7 @@ void DependentCode::InstallDependency(Isolate* isolate, MaybeObjectHandle code,
Handle<DependentCode> DependentCode::InsertWeakCode(
Isolate* isolate, Handle<DependentCode> entries, DependencyGroup group,
- MaybeObjectHandle code) {
+ const MaybeObjectHandle& code) {
if (entries->length() == 0 || entries->group() > group) {
// There is no such group.
return DependentCode::New(isolate, group, code, entries);
@@ -15270,7 +15172,7 @@ Handle<DependentCode> DependentCode::InsertWeakCode(
Handle<DependentCode> DependentCode::New(Isolate* isolate,
DependencyGroup group,
- MaybeObjectHandle object,
+ const MaybeObjectHandle& object,
Handle<DependentCode> next) {
Handle<DependentCode> result = Handle<DependentCode>::cast(
isolate->factory()->NewWeakFixedArray(kCodesStartIndex + 1, TENURED));
@@ -15295,7 +15197,7 @@ bool DependentCode::Compact() {
int new_count = 0;
for (int i = 0; i < old_count; i++) {
MaybeObject* obj = object_at(i);
- if (!obj->IsClearedWeakHeapObject()) {
+ if (!obj->IsCleared()) {
if (i != new_count) {
copy(i, new_count);
}
@@ -15309,38 +15211,6 @@ bool DependentCode::Compact() {
return new_count < old_count;
}
-bool DependentCode::Contains(DependencyGroup group, MaybeObject* code) {
- if (this->length() == 0 || this->group() > group) {
- // There is no such group.
- return false;
- }
- if (this->group() < group) {
- // The group comes later in the list.
- return next_link()->Contains(group, code);
- }
- DCHECK_EQ(group, this->group());
- int count = this->count();
- for (int i = 0; i < count; i++) {
- if (object_at(i) == code) return true;
- }
- return false;
-}
-
-
-bool DependentCode::IsEmpty(DependencyGroup group) {
- if (this->length() == 0 || this->group() > group) {
- // There is no such group.
- return true;
- }
- if (this->group() < group) {
- // The group comes later in the list.
- return next_link()->IsEmpty(group);
- }
- DCHECK_EQ(group, this->group());
- return count() == 0;
-}
-
-
bool DependentCode::MarkCodeForDeoptimization(
Isolate* isolate,
DependentCode::DependencyGroup group) {
@@ -15359,8 +15229,8 @@ bool DependentCode::MarkCodeForDeoptimization(
int count = this->count();
for (int i = 0; i < count; i++) {
MaybeObject* obj = object_at(i);
- if (obj->IsClearedWeakHeapObject()) continue;
- Code* code = Code::cast(obj->ToWeakHeapObject());
+ if (obj->IsCleared()) continue;
+ Code* code = Code::cast(obj->GetHeapObjectAssumeWeak());
if (!code->marked_for_deoptimization()) {
code->SetMarkedForDeoptimization(DependencyGroupName(group));
marked = true;
@@ -15974,19 +15844,6 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
}
-// static
-bool Map::IsValidElementsTransition(ElementsKind from_kind,
- ElementsKind to_kind) {
- // Transitions can't go backwards.
- if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
- return false;
- }
-
- // Transitions from HOLEY -> PACKED are not allowed.
- return !IsHoleyElementsKind(from_kind) || IsHoleyElementsKind(to_kind);
-}
-
-
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
Map* map = array->map();
// Fast path: "length" is the first fast property of arrays. Since it's not
@@ -16134,6 +15991,18 @@ bool FixedArrayBase::IsCowArray() const {
}
bool JSObject::IsApiWrapper() {
+ // These object types can carry information relevant for embedders. The
+ // *_API_* types are generated through templates which can have embedder
+ // fields. The other types have their embedder fields added at compile time.
+ auto instance_type = map()->instance_type();
+ return instance_type == JS_API_OBJECT_TYPE ||
+ instance_type == JS_ARRAY_BUFFER_TYPE ||
+ instance_type == JS_DATA_VIEW_TYPE ||
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE ||
+ instance_type == JS_TYPED_ARRAY_TYPE;
+}
+
+bool JSObject::IsDroppableApiWrapper() {
auto instance_type = map()->instance_type();
return instance_type == JS_API_OBJECT_TYPE ||
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
@@ -16141,9 +16010,9 @@ bool JSObject::IsApiWrapper() {
const char* Symbol::PrivateSymbolToName() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
-#define SYMBOL_CHECK_AND_PRINT(name) \
+#define SYMBOL_CHECK_AND_PRINT(_, name) \
if (this == roots.name()) return #name;
- PRIVATE_SYMBOL_LIST(SYMBOL_CHECK_AND_PRINT)
+ PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_CHECK_AND_PRINT, /* not used */)
#undef SYMBOL_CHECK_AND_PRINT
return "UNKNOWN";
}
@@ -16754,8 +16623,7 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
Isolate* isolate, int capacity, PretenureFlag pretenure) {
Factory* factory = isolate->factory();
int length = EntryToIndex(capacity);
- Heap::RootListIndex map_root_index =
- static_cast<Heap::RootListIndex>(Shape::GetMapRootIndex());
+ RootIndex map_root_index = Shape::GetMapRootIndex();
Handle<FixedArray> array =
factory->NewFixedArrayWithMap(map_root_index, length, pretenure);
Handle<Derived> table = Handle<Derived>::cast(array);
@@ -17264,7 +17132,7 @@ class StringTableNoAllocateKey : public StringTableKey {
set_hash_field(string->hash_field());
}
- ~StringTableNoAllocateKey() {
+ ~StringTableNoAllocateKey() override {
if (one_byte_) {
if (one_byte_content_ != one_byte_buffer_) delete[] one_byte_content_;
} else {
@@ -17436,18 +17304,6 @@ Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
return set;
}
-Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
- Handle<SharedFunctionInfo> shared,
- LanguageMode language_mode) {
- Isolate* isolate = GetIsolate();
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- int entry = FindEntry(isolate, &key);
- if (entry == kNotFound) return isolate->factory()->undefined_value();
- int index = EntryToIndex(entry);
- if (!get(index)->IsFixedArray()) return isolate->factory()->undefined_value();
- return Handle<Object>(get(index + 1), isolate);
-}
-
namespace {
const int kLiteralEntryLength = 2;
@@ -17468,8 +17324,7 @@ int SearchLiteralsMapEntry(CompilationCacheTable* cache, int cache_entry,
WeakFixedArray* literals_map = WeakFixedArray::cast(obj);
int length = literals_map->length();
for (int i = 0; i < length; i += kLiteralEntryLength) {
- DCHECK(literals_map->Get(i + kLiteralContextOffset)
- ->IsWeakOrClearedHeapObject());
+ DCHECK(literals_map->Get(i + kLiteralContextOffset)->IsWeakOrCleared());
if (literals_map->Get(i + kLiteralContextOffset) ==
HeapObjectReference::Weak(native_context)) {
return i;
@@ -17511,8 +17366,7 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
DCHECK_LT(entry, 0);
int length = old_literals_map->length();
for (int i = 0; i < length; i += kLiteralEntryLength) {
- if (old_literals_map->Get(i + kLiteralContextOffset)
- ->IsClearedWeakHeapObject()) {
+ if (old_literals_map->Get(i + kLiteralContextOffset)->IsCleared()) {
new_literals_map = old_literals_map;
entry = i;
break;
@@ -17535,11 +17389,11 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
#ifdef DEBUG
for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
MaybeObject* object = new_literals_map->Get(i + kLiteralContextOffset);
- DCHECK(object->IsClearedWeakHeapObject() ||
- object->ToWeakHeapObject()->IsNativeContext());
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak()->IsNativeContext());
object = new_literals_map->Get(i + kLiteralLiteralsOffset);
- DCHECK(object->IsClearedWeakHeapObject() ||
- object->ToWeakHeapObject()->IsFeedbackCell());
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak()->IsFeedbackCell());
}
#endif
@@ -17559,9 +17413,9 @@ FeedbackCell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
DCHECK_LE(entry + kLiteralEntryLength, literals_map->length());
MaybeObject* object = literals_map->Get(entry + kLiteralLiteralsOffset);
- result = object->IsClearedWeakHeapObject()
+ result = object->IsCleared()
? nullptr
- : FeedbackCell::cast(object->ToWeakHeapObject());
+ : FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
}
DCHECK(result == nullptr || result->IsFeedbackCell());
return result;
@@ -17618,21 +17472,6 @@ Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
}
-Handle<CompilationCacheTable> CompilationCacheTable::Put(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<SharedFunctionInfo> shared, LanguageMode language_mode,
- Handle<Object> value) {
- Isolate* isolate = cache->GetIsolate();
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- Handle<Object> k = key.AsHandle(isolate);
- cache = EnsureCapacity(isolate, cache, 1);
- int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> native_context, LanguageMode language_mode,
@@ -18220,8 +18059,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
if (capacity > ObjectHashTable::kMaxCapacity) {
for (size_t i = 0; i < 2; ++i) {
isolate->heap()->CollectAllGarbage(
- Heap::kFinalizeIncrementalMarkingMask,
- GarbageCollectionReason::kFullHashtable);
+ Heap::kNoGCFlags, GarbageCollectionReason::kFullHashtable);
}
table->Rehash(isolate);
}
@@ -18390,8 +18228,10 @@ MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
Handle<JSReceiver> new_target, double tv) {
Isolate* const isolate = constructor->GetIsolate();
Handle<JSObject> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- JSObject::New(constructor, new_target), JSDate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
+ JSDate);
if (-DateCache::kMaxTimeInMs <= tv && tv <= DateCache::kMaxTimeInMs) {
tv = DoubleToInteger(tv) + 0.0;
} else {
@@ -18818,6 +18658,82 @@ MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
return MaybeHandle<Name>();
}
+Smi* Smi::LexicographicCompare(Isolate* isolate, Smi* x, Smi* y) {
+ DisallowHeapAllocation no_allocation;
+ DisallowJavascriptExecution no_js(isolate);
+
+ int x_value = Smi::ToInt(x);
+ int y_value = Smi::ToInt(y);
+
+ // If the integers are equal so are the string representations.
+ if (x_value == y_value) return Smi::FromInt(0);
+
+ // If one of the integers is zero the normal integer order is the
+ // same as the lexicographic order of the string representations.
+ if (x_value == 0 || y_value == 0)
+ return Smi::FromInt(x_value < y_value ? -1 : 1);
+
+ // If only one of the integers is negative the negative number is
+ // smallest because the char code of '-' is less than the char code
+ // of any digit. Otherwise, we make both values positive.
+
+ // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
+ // architectures using 32-bit Smis.
+ uint32_t x_scaled = x_value;
+ uint32_t y_scaled = y_value;
+ if (x_value < 0 || y_value < 0) {
+ if (y_value >= 0) return Smi::FromInt(-1);
+ if (x_value >= 0) return Smi::FromInt(1);
+ x_scaled = -x_value;
+ y_scaled = -y_value;
+ }
+
+ // clang-format off
+ static const uint32_t kPowersOf10[] = {
+ 1, 10, 100, 1000,
+ 10 * 1000, 100 * 1000, 1000 * 1000, 10 * 1000 * 1000,
+ 100 * 1000 * 1000, 1000 * 1000 * 1000};
+ // clang-format on
+
+ // If the integers have the same number of decimal digits they can be
+ // compared directly as the numeric order is the same as the
+ // lexicographic order. If one integer has fewer digits, it is scaled
+ // by some power of 10 to have the same number of digits as the longer
+ // integer. If the scaled integers are equal it means the shorter
+ // integer comes first in the lexicographic order.
+
+ // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+ int x_log2 = 31 - base::bits::CountLeadingZeros(x_scaled);
+ int x_log10 = ((x_log2 + 1) * 1233) >> 12;
+ x_log10 -= x_scaled < kPowersOf10[x_log10];
+
+ int y_log2 = 31 - base::bits::CountLeadingZeros(y_scaled);
+ int y_log10 = ((y_log2 + 1) * 1233) >> 12;
+ y_log10 -= y_scaled < kPowersOf10[y_log10];
+
+ int tie = 0;
+
+ if (x_log10 < y_log10) {
+ // X has fewer digits. We would like to simply scale up X but that
+ // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
+ // be scaled up to 9_000_000_000. So we scale up by the next
+ // smallest power and scale down Y to drop one digit. It is OK to
+ // drop one digit from the longer integer since the final digit is
+ // past the length of the shorter integer.
+ x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
+ y_scaled /= 10;
+ tie = -1;
+ } else if (y_log10 < x_log10) {
+ y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
+ x_scaled /= 10;
+ tie = 1;
+ }
+
+ if (x_scaled < y_scaled) return Smi::FromInt(-1);
+ if (x_scaled > y_scaled) return Smi::FromInt(1);
+ return Smi::FromInt(tie);
+}
+
// Force instantiation of template instances class.
// Please note this list is compiler dependent.
// Keep this at the end of this file
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index c848e92af7..cd64199982 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -8,6 +8,7 @@
#include <iosfwd>
#include <memory>
+#include "include/v8-internal.h"
#include "include/v8.h"
#include "include/v8config.h"
#include "src/assert-scope.h"
@@ -75,11 +76,16 @@
// - JSDate
// - JSMessageObject
// - JSModuleNamespace
+// - JSV8BreakIterator // If V8_INTL_SUPPORT enabled.
// - JSCollator // If V8_INTL_SUPPORT enabled.
+// - JSDateTimeFormat // If V8_INTL_SUPPORT enabled.
// - JSListFormat // If V8_INTL_SUPPORT enabled.
// - JSLocale // If V8_INTL_SUPPORT enabled.
+// - JSNumberFormat // If V8_INTL_SUPPORT enabled.
// - JSPluralRules // If V8_INTL_SUPPORT enabled.
// - JSRelativeTimeFormat // If V8_INTL_SUPPORT enabled.
+// - JSSegmenter // If V8_INTL_SUPPORT enabled.
+// - WasmExceptionObject
// - WasmGlobalObject
// - WasmInstanceObject
// - WasmMemoryObject
@@ -170,6 +176,7 @@
// - PromiseFulfillReactionJobTask
// - PromiseRejectReactionJobTask
// - PromiseResolveThenableJobTask
+// - MicrotaskQueue
// - Module
// - ModuleInfoEntry
// - FeedbackCell
@@ -189,73 +196,6 @@ namespace internal {
struct InliningPosition;
class PropertyDescriptorObject;
-enum KeyedAccessLoadMode {
- STANDARD_LOAD,
- LOAD_IGNORE_OUT_OF_BOUNDS,
-};
-
-enum KeyedAccessStoreMode {
- STANDARD_STORE,
- STORE_TRANSITION_TO_OBJECT,
- STORE_TRANSITION_TO_DOUBLE,
- STORE_AND_GROW_NO_TRANSITION_HANDLE_COW,
- STORE_AND_GROW_TRANSITION_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_TO_DOUBLE,
- STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
- STORE_NO_TRANSITION_HANDLE_COW
-};
-
-enum MutableMode {
- MUTABLE,
- IMMUTABLE
-};
-
-
-static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode == STORE_TRANSITION_TO_OBJECT ||
- store_mode == STORE_TRANSITION_TO_DOUBLE ||
- store_mode == STORE_AND_GROW_TRANSITION_TO_OBJECT ||
- store_mode == STORE_AND_GROW_TRANSITION_TO_DOUBLE;
-}
-
-static inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
- store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
-}
-
-static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
- KeyedAccessStoreMode store_mode, bool receiver_was_cow) {
- switch (store_mode) {
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
- case STORE_AND_GROW_TRANSITION_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_TO_DOUBLE:
- store_mode = STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
- break;
- case STANDARD_STORE:
- case STORE_TRANSITION_TO_OBJECT:
- case STORE_TRANSITION_TO_DOUBLE:
- store_mode =
- receiver_was_cow ? STORE_NO_TRANSITION_HANDLE_COW : STANDARD_STORE;
- break;
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
- case STORE_NO_TRANSITION_HANDLE_COW:
- break;
- }
- DCHECK(!IsTransitionStoreMode(store_mode));
- DCHECK_IMPLIES(receiver_was_cow, IsCOWHandlingStoreMode(store_mode));
- return store_mode;
-}
-
-
-static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW &&
- store_mode <= STORE_AND_GROW_TRANSITION_TO_DOUBLE;
-}
-
-
-enum IcCheckType { ELEMENT, PROPERTY };
-
-
// SKIP_WRITE_BARRIER skips the write barrier.
// UPDATE_WEAK_WRITE_BARRIER skips the marking part of the write barrier and
// only performs the generational part.
@@ -351,8 +291,8 @@ const uint32_t kOneByteDataHintTag = 0x10;
// If bit 7 is clear and string representation indicates an external string,
// then bit 5 indicates whether the data pointer is cached.
-const uint32_t kShortExternalStringMask = 0x20;
-const uint32_t kShortExternalStringTag = 0x20;
+const uint32_t kUncachedExternalStringMask = 0x20;
+const uint32_t kUncachedExternalStringTag = 0x20;
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector. We don't allocate any
@@ -383,15 +323,15 @@ enum InstanceType : uint16_t {
EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag |
kInternalizedTag,
- SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_INTERNALIZED_STRING_TYPE |
- kShortExternalStringTag |
- kInternalizedTag,
- SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
- EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kShortExternalStringTag |
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
+ kInternalizedTag,
+ UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
kInternalizedTag,
- SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
- kShortExternalStringTag | kInternalizedTag,
+ kUncachedExternalStringTag | kInternalizedTag,
STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
ONE_BYTE_STRING_TYPE =
ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
@@ -409,12 +349,12 @@ enum InstanceType : uint16_t {
EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
kNotInternalizedTag,
- SHORT_EXTERNAL_STRING_TYPE =
- SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE =
- SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+ UNCACHED_EXTERNAL_STRING_TYPE =
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE =
+ UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
kNotInternalizedTag,
THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
THIN_ONE_BYTE_STRING_TYPE =
@@ -422,7 +362,7 @@ enum InstanceType : uint16_t {
// Non-string names
SYMBOL_TYPE =
- 1 + (kIsNotInternalizedMask | kShortExternalStringMask |
+ 1 + (kIsNotInternalizedMask | kUncachedExternalStringMask |
kOneByteDataHintMask | kStringEncodingMask |
kStringRepresentationMask), // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
@@ -488,6 +428,8 @@ enum InstanceType : uint16_t {
PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+ MICROTASK_QUEUE_TYPE,
+
ALLOCATION_SITE_TYPE,
// FixedArrays.
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
@@ -503,7 +445,8 @@ enum InstanceType : uint16_t {
EPHEMERON_HASH_TABLE_TYPE,
SCOPE_INFO_TYPE,
SCRIPT_CONTEXT_TABLE_TYPE,
- BLOCK_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE
+ AWAIT_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE
+ BLOCK_CONTEXT_TYPE,
CATCH_CONTEXT_TYPE,
DEBUG_EVALUATE_CONTEXT_TYPE,
EVAL_CONTEXT_TYPE,
@@ -582,13 +525,18 @@ enum InstanceType : uint16_t {
JS_DATA_VIEW_TYPE,
#ifdef V8_INTL_SUPPORT
+ JS_INTL_V8_BREAK_ITERATOR_TYPE,
JS_INTL_COLLATOR_TYPE,
+ JS_INTL_DATE_TIME_FORMAT_TYPE,
JS_INTL_LIST_FORMAT_TYPE,
JS_INTL_LOCALE_TYPE,
+ JS_INTL_NUMBER_FORMAT_TYPE,
JS_INTL_PLURAL_RULES_TYPE,
JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
+ JS_INTL_SEGMENTER_TYPE,
#endif // V8_INTL_SUPPORT
+ WASM_EXCEPTION_TYPE,
WASM_GLOBAL_TYPE,
WASM_INSTANCE_TYPE,
WASM_MEMORY_TYPE,
@@ -622,7 +570,7 @@ enum InstanceType : uint16_t {
FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE,
LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
// Boundaries for testing if given HeapObject is a Context
- FIRST_CONTEXT_TYPE = BLOCK_CONTEXT_TYPE,
+ FIRST_CONTEXT_TYPE = AWAIT_CONTEXT_TYPE,
LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE,
// Boundaries for testing if given HeapObject is a subclass of Microtask.
FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
@@ -695,22 +643,11 @@ class FixedArrayBase;
class PropertyArray;
class FunctionLiteral;
class FunctionTemplateInfo;
-class JSGeneratorObject;
-class JSAsyncGeneratorObject;
-class JSGlobalObject;
-class JSGlobalProxy;
-#ifdef V8_INTL_SUPPORT
-class JSCollator;
-class JSListFormat;
-class JSLocale;
-class JSPluralRules;
-class JSRelativeTimeFormat;
-#endif // V8_INTL_SUPPORT
-class JSPromise;
class KeyAccumulator;
class LayoutDescriptor;
class LookupIterator;
class FieldType;
+class MicrotaskQueue;
class Module;
class ModuleInfoEntry;
class ObjectHashTable;
@@ -900,6 +837,7 @@ class ZoneForwardList;
V(UncompiledDataWithoutPreParsedScope) \
V(Undetectable) \
V(UniqueName) \
+ V(WasmExceptionObject) \
V(WasmGlobalObject) \
V(WasmInstanceObject) \
V(WasmMemoryObject) \
@@ -911,11 +849,15 @@ class ZoneForwardList;
#ifdef V8_INTL_SUPPORT
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
+ V(JSV8BreakIterator) \
V(JSCollator) \
+ V(JSDateTimeFormat) \
V(JSListFormat) \
V(JSLocale) \
+ V(JSNumberFormat) \
V(JSPluralRules) \
- V(JSRelativeTimeFormat)
+ V(JSRelativeTimeFormat) \
+ V(JSSegmenter)
#else
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V)
#endif // V8_INTL_SUPPORT
@@ -983,6 +925,7 @@ class ZoneForwardList;
V(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) \
V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE) \
V(JSPromise, JS_PROMISE_TYPE) \
+ V(JSProxy, JS_PROXY_TYPE) \
V(JSRegExp, JS_REGEXP_TYPE) \
V(JSRegExpResult, JS_ARRAY_TYPE) \
V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE) \
@@ -1021,6 +964,7 @@ class ZoneForwardList;
UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
V(UncompiledDataWithPreParsedScope, \
UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
+ V(WasmExceptionObject, WASM_EXCEPTION_TYPE) \
V(WasmGlobalObject, WASM_GLOBAL_TYPE) \
V(WasmInstanceObject, WASM_INSTANCE_TYPE) \
V(WasmMemoryObject, WASM_MEMORY_TYPE) \
@@ -1029,13 +973,17 @@ class ZoneForwardList;
V(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
#ifdef V8_INTL_SUPPORT
-#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
- INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
- V(JSCollator, JS_INTL_COLLATOR_TYPE) \
- V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \
- V(JSLocale, JS_INTL_LOCALE_TYPE) \
- V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \
- V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE)
+#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
+ INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
+ V(JSV8BreakIterator, JS_INTL_V8_BREAK_ITERATOR_TYPE) \
+ V(JSCollator, JS_INTL_COLLATOR_TYPE) \
+ V(JSDateTimeFormat, JS_INTL_DATE_TIME_FORMAT_TYPE) \
+ V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \
+ V(JSLocale, JS_INTL_LOCALE_TYPE) \
+ V(JSNumberFormat, JS_INTL_NUMBER_FORMAT_TYPE) \
+ V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \
+ V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
+ V(JSSegmenter, JS_INTL_SEGMENTER_TYPE)
#else
@@ -1060,7 +1008,8 @@ class ZoneForwardList;
#define INSTANCE_TYPE_CHECKERS_CUSTOM(V) \
V(FixedArrayBase) \
V(InternalizedString) \
- V(JSObject)
+ V(JSObject) \
+ V(JSReceiver)
#define INSTANCE_TYPE_CHECKERS(V) \
INSTANCE_TYPE_CHECKERS_SINGLE(V) \
@@ -1120,13 +1069,6 @@ class Object {
V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
V8_INLINE bool IsNullOrUndefined() const;
- // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
- // a keyed store is of the form a[expression] = foo.
- enum StoreFromKeyed {
- MAY_BE_STORE_FROM_KEYED,
- CERTAINLY_NOT_STORE_FROM_KEYED
- };
-
enum class Conversion { kToNumber, kToNumeric };
#define RETURN_FAILURE(isolate, should_throw, call) \
@@ -1333,19 +1275,19 @@ class Object {
// covered by it (eg., concerning API callbacks).
V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
- StoreFromKeyed store_mode);
+ StoreOrigin store_origin);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetProperty(
Isolate* isolate, Handle<Object> object, Handle<Name> name,
Handle<Object> value, LanguageMode language_mode,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+ StoreOrigin store_origin = StoreOrigin::kMaybeKeyed);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
Isolate* isolate, Handle<Object> object, Handle<Name> name,
Handle<Object> value, LanguageMode language_mode,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+ StoreOrigin store_origin = StoreOrigin::kMaybeKeyed);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetSuperProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
- StoreFromKeyed store_mode);
+ StoreOrigin store_origin);
V8_WARN_UNUSED_RESULT static Maybe<bool> CannotCreateProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
@@ -1362,7 +1304,7 @@ class Object {
LookupIterator* it, Handle<Object> value);
V8_WARN_UNUSED_RESULT static Maybe<bool> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ShouldThrow should_throw, StoreFromKeyed store_mode);
+ ShouldThrow should_throw, StoreOrigin store_origin);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Isolate* isolate, Handle<Object> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
@@ -1484,7 +1426,7 @@ class Object {
// Return value is only meaningful if [found] is set to true on return.
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyInternal(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
- StoreFromKeyed store_mode, bool* found);
+ StoreOrigin store_origin, bool* found);
V8_WARN_UNUSED_RESULT static MaybeHandle<Name> ConvertToName(
Isolate* isolate, Handle<Object> input);
@@ -1569,6 +1511,13 @@ class Smi: public Object {
return result;
}
+ // Compare two Smis x, y as if they were converted to strings and then
+ // compared lexicographically. Returns:
+ // -1 if x < y.
+ // 0 if x == y.
+ // 1 if x > y.
+ static Smi* LexicographicCompare(Isolate* isolate, Smi* x, Smi* y);
+
DECL_CAST(Smi)
// Dispatched behavior.
@@ -1588,7 +1537,7 @@ class Smi: public Object {
// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
// encoded in the first word. The class MapWord is an abstraction of the
// value in a heap object's first word.
-class MapWord BASE_EMBEDDED {
+class MapWord {
public:
// Normal state: the map word contains a map pointer.
@@ -1915,912 +1864,6 @@ enum class KeyCollectionMode {
static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
};
-enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
-
-class PropertyArray : public HeapObject {
- public:
- // [length]: length of the array.
- inline int length() const;
-
- // Get the length using acquire loads.
- inline int synchronized_length() const;
-
- // This is only used on a newly allocated PropertyArray which
- // doesn't have an existing hash.
- inline void initialize_length(int length);
-
- inline void SetHash(int hash);
- inline int Hash() const;
-
- inline Object* get(int index) const;
-
- inline void set(int index, Object* value);
- // Setter with explicit barrier mode.
- inline void set(int index, Object* value, WriteBarrierMode mode);
-
- // Gives access to raw memory which stores the array's data.
- inline Object** data_start();
-
- // Garbage collection support.
- static constexpr int SizeFor(int length) {
- return kHeaderSize + length * kPointerSize;
- }
-
- DECL_CAST(PropertyArray)
- DECL_PRINTER(PropertyArray)
- DECL_VERIFIER(PropertyArray)
-
- // Layout description.
- static const int kLengthAndHashOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthAndHashOffset + kPointerSize;
-
- // Garbage collection support.
- typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- static const int kLengthFieldSize = 10;
- class LengthField : public BitField<int, 0, kLengthFieldSize> {};
- static const int kMaxLength = LengthField::kMax;
- class HashField : public BitField<int, kLengthFieldSize,
- kSmiValueSize - kLengthFieldSize - 1> {};
-
- static const int kNoHashSentinel = 0;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyArray);
-};
-
-// JSReceiver includes types on which properties can be defined, i.e.,
-// JSObject and JSProxy.
-class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
- public:
- // Use the mixin methods over the HeapObject methods.
- // TODO(v8:7786) Remove once the HeapObject methods are gone.
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
- // Returns true if there is no slow (ie, dictionary) backing store.
- inline bool HasFastProperties() const;
-
- // Returns the properties array backing store if it
- // exists. Otherwise, returns an empty_property_array when there's a
- // Smi (hash code) or an empty_fixed_array for a fast properties
- // map.
- inline PropertyArray* property_array() const;
-
- // Gets slow properties for non-global objects.
- inline NameDictionary* property_dictionary() const;
-
- // Sets the properties backing store and makes sure any existing hash is moved
- // to the new properties store. To clear out the properties store, pass in the
- // empty_fixed_array(), the hash will be maintained in this case as well.
- void SetProperties(HeapObject* properties);
-
- // There are five possible values for the properties offset.
- // 1) EmptyFixedArray/EmptyPropertyDictionary - This is the standard
- // placeholder.
- //
- // 2) Smi - This is the hash code of the object.
- //
- // 3) PropertyArray - This is similar to a FixedArray but stores
- // the hash code of the object in its length field. This is a fast
- // backing store.
- //
- // 4) NameDictionary - This is the dictionary-mode backing store.
- //
- // 4) GlobalDictionary - This is the backing store for the
- // GlobalObject.
- //
- // This is used only in the deoptimizer and heap. Please use the
- // above typed getters and setters to access the properties.
- DECL_ACCESSORS(raw_properties_or_hash, Object)
-
- inline void initialize_properties();
-
- // Deletes an existing named property in a normalized object.
- static void DeleteNormalizedProperty(Handle<JSReceiver> object, int entry);
-
- DECL_CAST(JSReceiver)
-
- // ES6 section 7.1.1 ToPrimitive
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
- Handle<JSReceiver> receiver,
- ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
-
- // ES6 section 7.1.1.1 OrdinaryToPrimitive
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
- Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
-
- static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
-
- // Get the first non-hidden prototype.
- static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
- Handle<JSReceiver> receiver);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasInPrototypeChain(
- Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
-
- // Reads all enumerable own properties of source and adds them to
- // target, using either Set or CreateDataProperty depending on the
- // use_set argument. This only copies values not present in the
- // maybe_excluded_properties list.
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetOrCopyDataProperties(
- Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
- const ScopedVector<Handle<Object>>* excluded_properties = nullptr,
- bool use_set = true);
-
- // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
- V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasProperty(
- Handle<JSReceiver> object, Handle<Name> name);
- V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasElement(
- Handle<JSReceiver> object, uint32_t index);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasOwnProperty(
- Handle<JSReceiver> object, Handle<Name> name);
- V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasOwnProperty(
- Handle<JSReceiver> object, uint32_t index);
-
- V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
- Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
- V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
- Isolate* isolate, Handle<JSReceiver> receiver, Handle<Name> name);
- V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetElement(
- Isolate* isolate, Handle<JSReceiver> receiver, uint32_t index);
-
- // Implementation of ES6 [[Delete]]
- V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyOrElement(
- Handle<JSReceiver> object, Handle<Name> name,
- LanguageMode language_mode = LanguageMode::kSloppy);
- V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteProperty(
- Handle<JSReceiver> object, Handle<Name> name,
- LanguageMode language_mode = LanguageMode::kSloppy);
- V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteProperty(
- LookupIterator* it, LanguageMode language_mode);
- V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteElement(
- Handle<JSReceiver> object, uint32_t index,
- LanguageMode language_mode = LanguageMode::kSloppy);
-
- V8_WARN_UNUSED_RESULT static Object* DefineProperty(
- Isolate* isolate, Handle<Object> object, Handle<Object> name,
- Handle<Object> attributes);
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> DefineProperties(
- Isolate* isolate, Handle<Object> object, Handle<Object> properties);
-
- // "virtual" dispatcher to the correct [[DefineOwnProperty]] implementation.
- V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
- Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
-
- // ES6 7.3.4 (when passed kDontThrow)
- V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
- Isolate* isolate, Handle<JSReceiver> object, Handle<Name> key,
- Handle<Object> value, ShouldThrow should_throw);
- V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
- LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
-
- // ES6 9.1.6.1
- V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
- Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
- V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
- LookupIterator* it, PropertyDescriptor* desc, ShouldThrow should_throw);
- // ES6 9.1.6.2
- V8_WARN_UNUSED_RESULT static Maybe<bool> IsCompatiblePropertyDescriptor(
- Isolate* isolate, bool extensible, PropertyDescriptor* desc,
- PropertyDescriptor* current, Handle<Name> property_name,
- ShouldThrow should_throw);
- // ES6 9.1.6.3
- // |it| can be NULL in cases where the ES spec passes |undefined| as the
- // receiver. Exactly one of |it| and |property_name| must be provided.
- V8_WARN_UNUSED_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
- Isolate* isolate, LookupIterator* it, bool extensible,
- PropertyDescriptor* desc, PropertyDescriptor* current,
- ShouldThrow should_throw, Handle<Name> property_name);
-
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool>
- GetOwnPropertyDescriptor(Isolate* isolate, Handle<JSReceiver> object,
- Handle<Object> key, PropertyDescriptor* desc);
- V8_WARN_UNUSED_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
- LookupIterator* it, PropertyDescriptor* desc);
-
- typedef PropertyAttributes IntegrityLevel;
-
- // ES6 7.3.14 (when passed kDontThrow)
- // 'level' must be SEALED or FROZEN.
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetIntegrityLevel(
- Handle<JSReceiver> object, IntegrityLevel lvl, ShouldThrow should_throw);
-
- // ES6 7.3.15
- // 'level' must be SEALED or FROZEN.
- V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
- Handle<JSReceiver> object, IntegrityLevel lvl);
-
- // ES6 [[PreventExtensions]] (when passed kDontThrow)
- V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
- Handle<JSReceiver> object, ShouldThrow should_throw);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(
- Handle<JSReceiver> object);
-
- // Returns the class name ([[Class]] property in the specification).
- V8_EXPORT_PRIVATE String* class_name();
-
- // Returns the constructor (the function that was used to instantiate the
- // object).
- static MaybeHandle<JSFunction> GetConstructor(Handle<JSReceiver> receiver);
-
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
- static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
-
- Handle<Context> GetCreationContext();
-
- V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
- GetPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
- V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
- GetOwnPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
- V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
- GetOwnPropertyAttributes(Handle<JSReceiver> object, uint32_t index);
-
- V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
- GetElementAttributes(Handle<JSReceiver> object, uint32_t index);
- V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
- GetOwnElementAttributes(Handle<JSReceiver> object, uint32_t index);
-
- V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
- LookupIterator* it);
-
- // Set the object's prototype (only JSReceiver and null are allowed values).
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
- Handle<JSReceiver> object, Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw);
-
- inline static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
- Handle<Name> name);
- static Handle<Object> GetDataProperty(LookupIterator* it);
-
-
- // Retrieves a permanent object identity hash code. The undefined value might
- // be returned in case no hash was created yet.
- Object* GetIdentityHash(Isolate* isolate);
-
- // Retrieves a permanent object identity hash code. May create and store a
- // hash code if needed and none exists.
- static Smi* CreateIdentityHash(Isolate* isolate, JSReceiver* key);
- Smi* GetOrCreateIdentityHash(Isolate* isolate);
-
- // Stores the hash code. The hash passed in must be masked with
- // JSReceiver::kHashMask.
- void SetIdentityHash(int masked_hash);
-
- // ES6 [[OwnPropertyKeys]] (modulo return type)
- V8_WARN_UNUSED_RESULT static inline MaybeHandle<FixedArray> OwnPropertyKeys(
- Handle<JSReceiver> object);
-
- V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetOwnValues(
- Handle<JSReceiver> object, PropertyFilter filter,
- bool try_fast_path = true);
-
- V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
- Handle<JSReceiver> object, PropertyFilter filter,
- bool try_fast_path = true);
-
- V8_WARN_UNUSED_RESULT static Handle<FixedArray> GetOwnElementIndices(
- Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSObject> object);
-
- static const int kHashMask = PropertyArray::HashField::kMask;
-
- // Layout description.
- static const int kPropertiesOrHashOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
-
- bool HasProxyInPrototype(Isolate* isolate);
-
- bool HasComplexElements();
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
-};
-
-
-// The JSObject describes real heap allocated JavaScript objects with
-// properties.
-// Note that the map of JSObject changes during execution to enable inline
-// caching.
-class JSObject: public JSReceiver {
- public:
- static bool IsUnmodifiedApiObject(Object** o);
-
- static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> New(
- Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
- Handle<AllocationSite> site = Handle<AllocationSite>::null());
-
- static MaybeHandle<Context> GetFunctionRealm(Handle<JSObject> object);
-
- // 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
- // Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
- static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> ObjectCreate(
- Isolate* isolate, Handle<Object> prototype);
-
- // [elements]: The elements (properties with names that are integers).
- //
- // Elements can be in two general modes: fast and slow. Each mode
- // corresponds to a set of object representations of elements that
- // have something in common.
- //
- // In the fast mode elements is a FixedArray and so each element can
- // be quickly accessed. This fact is used in the generated code. The
- // elements array can have one of three maps in this mode:
- // fixed_array_map, sloppy_arguments_elements_map or
- // fixed_cow_array_map (for copy-on-write arrays). In the latter case
- // the elements array may be shared by a few objects and so before
- // writing to any element the array must be copied. Use
- // EnsureWritableFastElements in this case.
- //
- // In the slow mode the elements is either a NumberDictionary, a
- // FixedArray parameter map for a (sloppy) arguments object.
- DECL_ACCESSORS(elements, FixedArrayBase)
- inline void initialize_elements();
- static inline void SetMapAndElements(Handle<JSObject> object,
- Handle<Map> map,
- Handle<FixedArrayBase> elements);
- inline ElementsKind GetElementsKind() const;
- ElementsAccessor* GetElementsAccessor();
- // Returns true if an object has elements of PACKED_SMI_ELEMENTS or
- // HOLEY_SMI_ELEMENTS ElementsKind.
- inline bool HasSmiElements();
- // Returns true if an object has elements of PACKED_ELEMENTS or
- // HOLEY_ELEMENTS ElementsKind.
- inline bool HasObjectElements();
- // Returns true if an object has elements of PACKED_SMI_ELEMENTS,
- // HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, or HOLEY_ELEMENTS.
- inline bool HasSmiOrObjectElements();
- // Returns true if an object has any of the "fast" elements kinds.
- inline bool HasFastElements();
- // Returns true if an object has any of the PACKED elements kinds.
- inline bool HasFastPackedElements();
- // Returns true if an object has elements of PACKED_DOUBLE_ELEMENTS or
- // HOLEY_DOUBLE_ELEMENTS ElementsKind.
- inline bool HasDoubleElements();
- // Returns true if an object has elements of HOLEY_SMI_ELEMENTS,
- // HOLEY_DOUBLE_ELEMENTS, or HOLEY_ELEMENTS ElementsKind.
- inline bool HasHoleyElements();
- inline bool HasSloppyArgumentsElements();
- inline bool HasStringWrapperElements();
- inline bool HasDictionaryElements();
-
- inline bool HasFixedTypedArrayElements();
-
- inline bool HasFixedUint8ClampedElements();
- inline bool HasFixedArrayElements();
- inline bool HasFixedInt8Elements();
- inline bool HasFixedUint8Elements();
- inline bool HasFixedInt16Elements();
- inline bool HasFixedUint16Elements();
- inline bool HasFixedInt32Elements();
- inline bool HasFixedUint32Elements();
- inline bool HasFixedFloat32Elements();
- inline bool HasFixedFloat64Elements();
- inline bool HasFixedBigInt64Elements();
- inline bool HasFixedBigUint64Elements();
-
- inline bool HasFastArgumentsElements();
- inline bool HasSlowArgumentsElements();
- inline bool HasFastStringWrapperElements();
- inline bool HasSlowStringWrapperElements();
- bool HasEnumerableElements();
-
- inline NumberDictionary* element_dictionary(); // Gets slow elements.
-
- // Requires: HasFastElements().
- static void EnsureWritableFastElements(Handle<JSObject> object);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithInterceptor(
- LookupIterator* it, ShouldThrow should_throw, Handle<Object> value);
-
- // The API currently still wants DefineOwnPropertyIgnoreAttributes to convert
- // AccessorInfo objects to data fields. We allow FORCE_FIELD as an exception
- // to the default behavior that calls the setter.
- enum AccessorInfoHandling { FORCE_FIELD, DONT_FORCE_FIELD };
-
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
- DefineOwnPropertyIgnoreAttributes(
- LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- AccessorInfoHandling handling = DONT_FORCE_FIELD);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
- LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ShouldThrow should_throw,
- AccessorInfoHandling handling = DONT_FORCE_FIELD);
-
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
- SetOwnPropertyIgnoreAttributes(Handle<JSObject> object, Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes);
-
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
- SetOwnElementIgnoreAttributes(Handle<JSObject> object, uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes);
-
- // Equivalent to one of the above depending on whether |name| can be converted
- // to an array index.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
- DefinePropertyOrElementIgnoreAttributes(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes = NONE);
-
- // Adds or reconfigures a property to attributes NONE. It will fail when it
- // cannot.
- V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
- LookupIterator* it, Handle<Object> value,
- ShouldThrow should_throw = kDontThrow);
-
- static void AddProperty(Isolate* isolate, Handle<JSObject> object,
- Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes);
-
- static void AddDataElement(Handle<JSObject> receiver, uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes);
-
- // Extend the receiver with a single fast property appeared first in the
- // passed map. This also extends the property backing store if necessary.
- static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
-
- // Migrates the given object to a map whose field representations are the
- // lowest upper bound of all known representations for that field.
- static void MigrateInstance(Handle<JSObject> instance);
-
- // Migrates the given object only if the target map is already available,
- // or returns false if such a map is not yet available.
- static bool TryMigrateInstance(Handle<JSObject> instance);
-
- // Sets the property value in a normalized object given (key, value, details).
- // Handles the special representation of JS global objects.
- static void SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
- Handle<Object> value,
- PropertyDetails details);
- static void SetDictionaryElement(Handle<JSObject> object, uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes);
- static void SetDictionaryArgumentsElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes);
-
- static void OptimizeAsPrototype(Handle<JSObject> object,
- bool enable_setup_mode = true);
- static void ReoptimizeIfPrototype(Handle<JSObject> object);
- static void MakePrototypesFast(Handle<Object> receiver,
- WhereToStart where_to_start, Isolate* isolate);
- static void LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate);
- static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
- Handle<Map> new_map,
- Isolate* isolate);
- static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
- static Map* InvalidatePrototypeChains(Map* map);
- static void InvalidatePrototypeValidityCell(JSGlobalObject* global);
-
- // Updates prototype chain tracking information when an object changes its
- // map from |old_map| to |new_map|.
- static void NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
- Isolate* isolate);
-
- // Utility used by many Array builtins and runtime functions
- static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
-
- // To be passed to PrototypeUsers::Compact.
- static void PrototypeRegistryCompactionCallback(HeapObject* value,
- int old_index, int new_index);
-
- // Retrieve interceptors.
- inline InterceptorInfo* GetNamedInterceptor();
- inline InterceptorInfo* GetIndexedInterceptor();
-
- // Used from JSReceiver.
- V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
- GetPropertyAttributesWithInterceptor(LookupIterator* it);
- V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
- GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
-
- // Defines an AccessorPair property on the given object.
- // TODO(mstarzinger): Rename to SetAccessor().
- static MaybeHandle<Object> DefineAccessor(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes);
- static MaybeHandle<Object> DefineAccessor(LookupIterator* it,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes);
-
- // Defines an AccessorInfo property on the given object.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetAccessor(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> info,
- PropertyAttributes attributes);
-
- // The result must be checked first for exceptions. If there's no exception,
- // the output parameter |done| indicates whether the interceptor has a result
- // or not.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
- LookupIterator* it, bool* done);
-
- static void ValidateElements(JSObject* object);
-
- // Makes sure that this object can contain HeapObject as elements.
- static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
-
- // Makes sure that this object can contain the specified elements.
- static inline void EnsureCanContainElements(
- Handle<JSObject> object,
- Object** elements,
- uint32_t count,
- EnsureElementsMode mode);
- static inline void EnsureCanContainElements(
- Handle<JSObject> object,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode);
- static void EnsureCanContainElements(
- Handle<JSObject> object,
- Arguments* arguments,
- uint32_t first_arg,
- uint32_t arg_count,
- EnsureElementsMode mode);
-
- // Would we convert a fast elements array to dictionary mode given
- // an access at key?
- bool WouldConvertToSlowElements(uint32_t index);
-
- static const uint32_t kMinAddedElementsCapacity = 16;
-
- // Computes the new capacity when expanding the elements of a JSObject.
- static uint32_t NewElementsCapacity(uint32_t old_capacity) {
- // (old_capacity + 50%) + kMinAddedElementsCapacity
- return old_capacity + (old_capacity >> 1) + kMinAddedElementsCapacity;
- }
-
- // These methods do not perform access checks!
- template <AllocationSiteUpdateMode update_or_check =
- AllocationSiteUpdateMode::kUpdate>
- static bool UpdateAllocationSite(Handle<JSObject> object,
- ElementsKind to_kind);
-
- // Lookup interceptors are used for handling properties controlled by host
- // objects.
- inline bool HasNamedInterceptor();
- inline bool HasIndexedInterceptor();
-
- // Support functions for v8 api (needed for correct interceptor behavior).
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedProperty(
- Handle<JSObject> object, Handle<Name> name);
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealElementProperty(
- Handle<JSObject> object, uint32_t index);
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedCallbackProperty(
- Handle<JSObject> object, Handle<Name> name);
-
- // Get the header size for a JSObject. Used to compute the index of
- // embedder fields as well as the number of embedder fields.
- // The |function_has_prototype_slot| parameter is needed only for
- // JSFunction objects.
- static int GetHeaderSize(InstanceType instance_type,
- bool function_has_prototype_slot = false);
- static inline int GetHeaderSize(const Map* map);
- inline int GetHeaderSize() const;
-
- static inline int GetEmbedderFieldCount(const Map* map);
- inline int GetEmbedderFieldCount() const;
- inline int GetEmbedderFieldOffset(int index);
- inline Object* GetEmbedderField(int index);
- inline void SetEmbedderField(int index, Object* value);
- inline void SetEmbedderField(int index, Smi* value);
-
- // Returns true when the object is potentially a wrapper that gets special
- // garbage collection treatment.
- // TODO(mlippautz): Make check exact and replace the pattern match in
- // Heap::TracePossibleWrapper.
- bool IsApiWrapper();
-
- // Returns a new map with all transitions dropped from the object's current
- // map and the ElementsKind set.
- static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind to_kind);
- static void TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind);
-
- // Always use this to migrate an object to a new map.
- // |expected_additional_properties| is only used for fast-to-slow transitions
- // and ignored otherwise.
- static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
- int expected_additional_properties = 0);
-
- // Forces a prototype without any of the checks that the regular SetPrototype
- // would do.
- static void ForceSetPrototype(Handle<JSObject> object, Handle<Object> proto);
-
- // Convert the object to use the canonical dictionary
- // representation. If the object is expected to have additional properties
- // added this number can be indicated to have the backing store allocated to
- // an initial capacity for holding these properties.
- static void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties,
- const char* reason);
-
- // Convert and update the elements backing store to be a
- // NumberDictionary dictionary. Returns the backing after conversion.
- static Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
-
- void RequireSlowElements(NumberDictionary* dictionary);
-
- // Transform slow named properties to fast variants.
- static void MigrateSlowToFast(Handle<JSObject> object,
- int unused_property_fields, const char* reason);
-
- inline bool IsUnboxedDoubleField(FieldIndex index);
-
- // Access fast-case object properties at index.
- static Handle<Object> FastPropertyAt(Handle<JSObject> object,
- Representation representation,
- FieldIndex index);
- inline Object* RawFastPropertyAt(FieldIndex index);
- inline double RawFastDoublePropertyAt(FieldIndex index);
- inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
-
- inline void FastPropertyAtPut(FieldIndex index, Object* value);
- inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
- inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
- inline void WriteToField(int descriptor, PropertyDetails details,
- Object* value);
-
- // Access to in object properties.
- inline int GetInObjectPropertyOffset(int index);
- inline Object* InObjectPropertyAt(int index);
- inline Object* InObjectPropertyAtPut(int index,
- Object* value,
- WriteBarrierMode mode
- = UPDATE_WRITE_BARRIER);
-
- // Set the object's prototype (only JSReceiver and null are allowed values).
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
- Handle<JSObject> object, Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw);
-
- // Makes the object prototype immutable
- // Never called from JavaScript
- static void SetImmutableProto(Handle<JSObject> object);
-
- // Initializes the body starting at |start_offset|. It is responsibility of
- // the caller to initialize object header. Fill the pre-allocated fields with
- // pre_allocated_value and the rest with filler_value.
- // Note: this call does not update write barrier, the caller is responsible
- // to ensure that |filler_value| can be collected without WB here.
- inline void InitializeBody(Map* map, int start_offset,
- Object* pre_allocated_value, Object* filler_value);
-
- // Check whether this object references another object
- bool ReferencesObject(Object* obj);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
- Handle<JSObject> object, IntegrityLevel lvl);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
- Handle<JSObject> object, ShouldThrow should_throw);
-
- static bool IsExtensible(Handle<JSObject> object);
-
- DECL_CAST(JSObject)
-
- // Dispatched behavior.
- void JSObjectShortPrint(StringStream* accumulator);
- DECL_PRINTER(JSObject)
- DECL_VERIFIER(JSObject)
-#ifdef OBJECT_PRINT
- bool PrintProperties(std::ostream& os); // NOLINT
- void PrintElements(std::ostream& os); // NOLINT
-#endif
-#if defined(DEBUG) || defined(OBJECT_PRINT)
- void PrintTransitions(std::ostream& os); // NOLINT
-#endif
-
- static void PrintElementsTransition(
- FILE* file, Handle<JSObject> object,
- ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
- ElementsKind to_kind, Handle<FixedArrayBase> to_elements);
-
- void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
-
-#ifdef DEBUG
- // Structure for collecting spill information about JSObjects.
- class SpillInformation {
- public:
- void Clear();
- void Print();
- int number_of_objects_;
- int number_of_objects_with_fast_properties_;
- int number_of_objects_with_fast_elements_;
- int number_of_fast_used_fields_;
- int number_of_fast_unused_fields_;
- int number_of_slow_used_properties_;
- int number_of_slow_unused_properties_;
- int number_of_fast_used_elements_;
- int number_of_fast_unused_elements_;
- int number_of_slow_used_elements_;
- int number_of_slow_unused_elements_;
- };
-
- void IncrementSpillStatistics(Isolate* isolate, SpillInformation* info);
-#endif
-
-#ifdef VERIFY_HEAP
- // If a GC was caused while constructing this object, the elements pointer
- // may point to a one pointer filler map. The object won't be rooted, but
- // our heap verification code could stumble across it.
- bool ElementsAreSafeToExamine() const;
-#endif
-
- Object* SlowReverseLookup(Object* value);
-
- // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
- // Also maximal value of JSArray's length property.
- static const uint32_t kMaxElementCount = 0xffffffffu;
-
- // Constants for heuristics controlling conversion of fast elements
- // to slow elements.
-
- // Maximal gap that can be introduced by adding an element beyond
- // the current elements length.
- static const uint32_t kMaxGap = 1024;
-
- // Maximal length of fast elements array that won't be checked for
- // being dense enough on expansion.
- static const int kMaxUncheckedFastElementsLength = 5000;
-
- // Same as above but for old arrays. This limit is more strict. We
- // don't want to be wasteful with long lived objects.
- static const int kMaxUncheckedOldFastElementsLength = 500;
-
- // This constant applies only to the initial map of "global.Object" and
- // not to arbitrary other JSObject maps.
- static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
-
- static const int kMaxInstanceSize = 255 * kPointerSize;
-
- // When extending the backing storage for property values, we increase
- // its size by more than the 1 entry necessary, so sequentially adding fields
- // to the same object requires fewer allocations and copies.
- static const int kFieldsAdded = 3;
- STATIC_ASSERT(kMaxNumberOfDescriptors + kFieldsAdded <=
- PropertyArray::kMaxLength);
-
- // Layout description.
- static const int kElementsOffset = JSReceiver::kHeaderSize;
- static const int kHeaderSize = kElementsOffset + kPointerSize;
-
- STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
- static const int kMaxInObjectProperties =
- (kMaxInstanceSize - kHeaderSize) >> kPointerSizeLog2;
- STATIC_ASSERT(kMaxInObjectProperties <= kMaxNumberOfDescriptors);
- // TODO(cbruni): Revisit calculation of the max supported embedder fields.
- static const int kMaxEmbedderFields =
- ((1 << kFirstInobjectPropertyOffsetBitCount) - 1 - kHeaderSize) >>
- kPointerSizeLog2;
- STATIC_ASSERT(kMaxEmbedderFields <= kMaxInObjectProperties);
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- class FastBodyDescriptor;
- // No weak fields.
- typedef FastBodyDescriptor FastBodyDescriptorWeak;
-
- // Gets the number of currently used elements.
- int GetFastElementsUsage();
-
- static bool AllCanRead(LookupIterator* it);
- static bool AllCanWrite(LookupIterator* it);
-
- private:
- friend class JSReceiver;
- friend class Object;
-
- // Used from Object::GetProperty().
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
- GetPropertyWithFailedAccessCheck(LookupIterator* it);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
-
- V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
- LookupIterator* it, ShouldThrow should_throw);
-
- bool ReferencesObjectFromElements(FixedArray* elements,
- ElementsKind kind,
- Object* object);
-
- // Helper for fast versions of preventExtensions, seal, and freeze.
- // attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
- template <PropertyAttributes attrs>
- V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensionsWithTransition(
- Handle<JSObject> object, ShouldThrow should_throw);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
-};
-
-
-// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
-// map. This initial map adds in-object properties for "get", "set",
-// "enumerable" and "configurable" properties, as assigned by the
-// FromPropertyDescriptor function for regular accessor properties.
-class JSAccessorPropertyDescriptor: public JSObject {
- public:
- // Offsets of object fields.
- static const int kGetOffset = JSObject::kHeaderSize;
- static const int kSetOffset = kGetOffset + kPointerSize;
- static const int kEnumerableOffset = kSetOffset + kPointerSize;
- static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
- static const int kSize = kConfigurableOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kGetIndex = 0;
- static const int kSetIndex = 1;
- static const int kEnumerableIndex = 2;
- static const int kConfigurableIndex = 3;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSAccessorPropertyDescriptor);
-};
-
-
-// JSDataPropertyDescriptor is just a JSObject with a specific initial map.
-// This initial map adds in-object properties for "value", "writable",
-// "enumerable" and "configurable" properties, as assigned by the
-// FromPropertyDescriptor function for regular data properties.
-class JSDataPropertyDescriptor: public JSObject {
- public:
- // Offsets of object fields.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kWritableOffset = kValueOffset + kPointerSize;
- static const int kEnumerableOffset = kWritableOffset + kPointerSize;
- static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
- static const int kSize = kConfigurableOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kValueIndex = 0;
- static const int kWritableIndex = 1;
- static const int kEnumerableIndex = 2;
- static const int kConfigurableIndex = 3;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataPropertyDescriptor);
-};
-
-
-// JSIteratorResult is just a JSObject with a specific initial map.
-// This initial map adds in-object properties for "done" and "value",
-// as specified by ES6 section 25.1.1.3 The IteratorResult Interface
-class JSIteratorResult: public JSObject {
- public:
- DECL_ACCESSORS(value, Object)
-
- DECL_ACCESSORS(done, Object)
-
- // Offsets of object fields.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kDoneOffset = kValueOffset + kPointerSize;
- static const int kSize = kDoneOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kValueIndex = 0;
- static const int kDoneIndex = 1;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
-};
-
// FreeSpace are fixed-size free memory blocks used by the heap and GC.
// They look like heap objects (are heap object tagged and have a map) so that
// the heap remains iterable. They have a size and a next pointer.
@@ -2929,899 +1972,10 @@ class AsyncGeneratorRequest : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(AsyncGeneratorRequest);
};
-// List of builtin functions we want to identify to improve code
-// generation.
-//
-// Each entry has a name of a global object property holding an object
-// optionally followed by ".prototype", a name of a builtin function
-// on the object (the one the id is set for), and a label.
-//
-// Installation of ids for the selected builtin functions is handled
-// by the bootstrapper.
-#define FUNCTIONS_WITH_ID_LIST(V) \
- V(Array, isArray, ArrayIsArray) \
- V(Array.prototype, concat, ArrayConcat) \
- V(Array.prototype, every, ArrayEvery) \
- V(Array.prototype, fill, ArrayFill) \
- V(Array.prototype, filter, ArrayFilter) \
- V(Array.prototype, findIndex, ArrayFindIndex) \
- V(Array.prototype, forEach, ArrayForEach) \
- V(Array.prototype, includes, ArrayIncludes) \
- V(Array.prototype, indexOf, ArrayIndexOf) \
- V(Array.prototype, join, ArrayJoin) \
- V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
- V(Array.prototype, map, ArrayMap) \
- V(Array.prototype, pop, ArrayPop) \
- V(Array.prototype, push, ArrayPush) \
- V(Array.prototype, reverse, ArrayReverse) \
- V(Array.prototype, shift, ArrayShift) \
- V(Array.prototype, slice, ArraySlice) \
- V(Array.prototype, some, ArraySome) \
- V(Array.prototype, splice, ArraySplice) \
- V(Array.prototype, unshift, ArrayUnshift) \
- V(Date, now, DateNow) \
- V(Date.prototype, getDate, DateGetDate) \
- V(Date.prototype, getDay, DateGetDay) \
- V(Date.prototype, getFullYear, DateGetFullYear) \
- V(Date.prototype, getHours, DateGetHours) \
- V(Date.prototype, getMilliseconds, DateGetMilliseconds) \
- V(Date.prototype, getMinutes, DateGetMinutes) \
- V(Date.prototype, getMonth, DateGetMonth) \
- V(Date.prototype, getSeconds, DateGetSeconds) \
- V(Date.prototype, getTime, DateGetTime) \
- V(Function.prototype, apply, FunctionApply) \
- V(Function.prototype, bind, FunctionBind) \
- V(Function.prototype, call, FunctionCall) \
- V(Object, assign, ObjectAssign) \
- V(Object, create, ObjectCreate) \
- V(Object, is, ObjectIs) \
- V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
- V(Object.prototype, isPrototypeOf, ObjectIsPrototypeOf) \
- V(Object.prototype, toString, ObjectToString) \
- V(RegExp.prototype, compile, RegExpCompile) \
- V(RegExp.prototype, exec, RegExpExec) \
- V(RegExp.prototype, test, RegExpTest) \
- V(RegExp.prototype, toString, RegExpToString) \
- V(String.prototype, charCodeAt, StringCharCodeAt) \
- V(String.prototype, charAt, StringCharAt) \
- V(String.prototype, codePointAt, StringCodePointAt) \
- V(String.prototype, concat, StringConcat) \
- V(String.prototype, endsWith, StringEndsWith) \
- V(String.prototype, includes, StringIncludes) \
- V(String.prototype, indexOf, StringIndexOf) \
- V(String.prototype, lastIndexOf, StringLastIndexOf) \
- V(String.prototype, repeat, StringRepeat) \
- V(String.prototype, slice, StringSlice) \
- V(String.prototype, startsWith, StringStartsWith) \
- V(String.prototype, substr, StringSubstr) \
- V(String.prototype, substring, StringSubstring) \
- V(String.prototype, toLowerCase, StringToLowerCase) \
- V(String.prototype, toString, StringToString) \
- V(String.prototype, toUpperCase, StringToUpperCase) \
- V(String.prototype, trim, StringTrim) \
- V(String.prototype, trimLeft, StringTrimStart) \
- V(String.prototype, trimRight, StringTrimEnd) \
- V(String.prototype, valueOf, StringValueOf) \
- V(String, fromCharCode, StringFromCharCode) \
- V(String, fromCodePoint, StringFromCodePoint) \
- V(String, raw, StringRaw) \
- V(Math, random, MathRandom) \
- V(Math, floor, MathFloor) \
- V(Math, round, MathRound) \
- V(Math, ceil, MathCeil) \
- V(Math, abs, MathAbs) \
- V(Math, log, MathLog) \
- V(Math, log1p, MathLog1p) \
- V(Math, log2, MathLog2) \
- V(Math, log10, MathLog10) \
- V(Math, cbrt, MathCbrt) \
- V(Math, exp, MathExp) \
- V(Math, expm1, MathExpm1) \
- V(Math, sqrt, MathSqrt) \
- V(Math, pow, MathPow) \
- V(Math, max, MathMax) \
- V(Math, min, MathMin) \
- V(Math, cos, MathCos) \
- V(Math, cosh, MathCosh) \
- V(Math, sign, MathSign) \
- V(Math, sin, MathSin) \
- V(Math, sinh, MathSinh) \
- V(Math, tan, MathTan) \
- V(Math, tanh, MathTanh) \
- V(Math, acos, MathAcos) \
- V(Math, acosh, MathAcosh) \
- V(Math, asin, MathAsin) \
- V(Math, asinh, MathAsinh) \
- V(Math, atan, MathAtan) \
- V(Math, atan2, MathAtan2) \
- V(Math, atanh, MathAtanh) \
- V(Math, imul, MathImul) \
- V(Math, clz32, MathClz32) \
- V(Math, fround, MathFround) \
- V(Math, trunc, MathTrunc) \
- V(Number, isFinite, NumberIsFinite) \
- V(Number, isInteger, NumberIsInteger) \
- V(Number, isNaN, NumberIsNaN) \
- V(Number, isSafeInteger, NumberIsSafeInteger) \
- V(Number, parseFloat, NumberParseFloat) \
- V(Number, parseInt, NumberParseInt) \
- V(Number.prototype, toString, NumberToString) \
- V(Map.prototype, clear, MapClear) \
- V(Map.prototype, delete, MapDelete) \
- V(Map.prototype, entries, MapEntries) \
- V(Map.prototype, forEach, MapForEach) \
- V(Map.prototype, has, MapHas) \
- V(Map.prototype, keys, MapKeys) \
- V(Map.prototype, get, MapGet) \
- V(Map.prototype, set, MapSet) \
- V(Map.prototype, values, MapValues) \
- V(Set.prototype, add, SetAdd) \
- V(Set.prototype, clear, SetClear) \
- V(Set.prototype, delete, SetDelete) \
- V(Set.prototype, entries, SetEntries) \
- V(Set.prototype, forEach, SetForEach) \
- V(Set.prototype, has, SetHas) \
- V(Set.prototype, values, SetValues) \
- V(WeakMap.prototype, delete, WeakMapDelete) \
- V(WeakMap.prototype, has, WeakMapHas) \
- V(WeakMap.prototype, set, WeakMapSet) \
- V(WeakSet.prototype, add, WeakSetAdd) \
- V(WeakSet.prototype, delete, WeakSetDelete) \
- V(WeakSet.prototype, has, WeakSetHas)
-
-#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
- V(Atomics, load, AtomicsLoad) \
- V(Atomics, store, AtomicsStore) \
- V(Atomics, exchange, AtomicsExchange) \
- V(Atomics, compareExchange, AtomicsCompareExchange) \
- V(Atomics, add, AtomicsAdd) \
- V(Atomics, sub, AtomicsSub) \
- V(Atomics, and, AtomicsAnd) \
- V(Atomics, or, AtomicsOr) \
- V(Atomics, xor, AtomicsXor)
-
-enum class BuiltinFunctionId : uint8_t {
- kArrayConstructor,
-#define DECL_FUNCTION_ID(ignored1, ignore2, name) k##name,
- FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
- ATOMIC_FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
-#undef DECL_FUNCTION_ID
- // These are manually assigned to special getters during bootstrapping.
- kArrayBufferByteLength,
- kArrayBufferIsView,
- kArrayEntries,
- kArrayKeys,
- kArrayValues,
- kArrayIteratorNext,
- kBigIntConstructor,
- kMapSize,
- kSetSize,
- kMapIteratorNext,
- kSetIteratorNext,
- kDataViewBuffer,
- kDataViewByteLength,
- kDataViewByteOffset,
- kFunctionHasInstance,
- kGlobalDecodeURI,
- kGlobalDecodeURIComponent,
- kGlobalEncodeURI,
- kGlobalEncodeURIComponent,
- kGlobalEscape,
- kGlobalUnescape,
- kGlobalIsFinite,
- kGlobalIsNaN,
- kNumberConstructor,
- kSymbolConstructor,
- kTypedArrayByteLength,
- kTypedArrayByteOffset,
- kTypedArrayEntries,
- kTypedArrayKeys,
- kTypedArrayLength,
- kTypedArrayToStringTag,
- kTypedArrayValues,
- kSharedArrayBufferByteLength,
- kStringConstructor,
- kStringIterator,
- kStringIteratorNext,
- kStringToLowerCaseIntl,
- kStringToUpperCaseIntl,
- kInvalidBuiltinFunctionId = static_cast<uint8_t>(-1),
-};
-
-// JSBoundFunction describes a bound function exotic object.
-class JSBoundFunction : public JSObject {
- public:
- // [bound_target_function]: The wrapped function object.
- inline Object* raw_bound_target_function() const;
- DECL_ACCESSORS(bound_target_function, JSReceiver)
-
- // [bound_this]: The value that is always passed as the this value when
- // calling the wrapped function.
- DECL_ACCESSORS(bound_this, Object)
-
- // [bound_arguments]: A list of values whose elements are used as the first
- // arguments to any call to the wrapped function.
- DECL_ACCESSORS(bound_arguments, FixedArray)
-
- static MaybeHandle<String> GetName(Isolate* isolate,
- Handle<JSBoundFunction> function);
- static Maybe<int> GetLength(Isolate* isolate,
- Handle<JSBoundFunction> function);
- static MaybeHandle<Context> GetFunctionRealm(
- Handle<JSBoundFunction> function);
-
- DECL_CAST(JSBoundFunction)
-
- // Dispatched behavior.
- DECL_PRINTER(JSBoundFunction)
- DECL_VERIFIER(JSBoundFunction)
-
- // The bound function's string representation implemented according
- // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
- static Handle<String> ToString(Handle<JSBoundFunction> function);
-
- // Layout description.
- static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
- static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
- static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
- static const int kSize = kBoundArgumentsOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSBoundFunction);
-};
-
-
-// JSFunction describes JavaScript functions.
-class JSFunction: public JSObject {
- public:
- // [prototype_or_initial_map]:
- DECL_ACCESSORS(prototype_or_initial_map, Object)
-
- // [shared]: The information about the function that
- // can be shared by instances.
- DECL_ACCESSORS(shared, SharedFunctionInfo)
-
- static const int kLengthDescriptorIndex = 0;
- static const int kNameDescriptorIndex = 1;
- // Home object descriptor index when function has a [[HomeObject]] slot.
- static const int kMaybeHomeObjectDescriptorIndex = 2;
-
- // [context]: The context for this function.
- inline Context* context();
- inline bool has_context() const;
- inline void set_context(Object* context);
- inline JSGlobalProxy* global_proxy();
- inline Context* native_context();
-
- static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
- static Maybe<int> GetLength(Isolate* isolate, Handle<JSFunction> function);
- static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
-
- // [code]: The generated code object for this function. Executed
- // when the function is invoked, e.g. foo() or new foo(). See
- // [[Call]] and [[Construct]] description in ECMA-262, section
- // 8.6.2, page 27.
- inline Code* code();
- inline void set_code(Code* code);
- inline void set_code_no_write_barrier(Code* code);
-
- // Get the abstract code associated with the function, which will either be
- // a Code object or a BytecodeArray.
- inline AbstractCode* abstract_code();
-
- // Tells whether or not this function is interpreted.
- //
- // Note: function->IsInterpreted() does not necessarily return the same value
- // as function->shared()->IsInterpreted() because the closure might have been
- // optimized.
- inline bool IsInterpreted();
-
- // Tells whether or not this function checks its optimization marker in its
- // feedback vector.
- inline bool ChecksOptimizationMarker();
-
- // Tells whether or not this function holds optimized code.
- //
- // Note: Returning false does not necessarily mean that this function hasn't
- // been optimized, as it may have optimized code on its feedback vector.
- inline bool IsOptimized();
-
- // Tells whether or not this function has optimized code available to it,
- // either because it is optimized or because it has optimized code in its
- // feedback vector.
- inline bool HasOptimizedCode();
-
- // Tells whether or not this function has a (non-zero) optimization marker.
- inline bool HasOptimizationMarker();
-
- // Mark this function for lazy recompilation. The function will be recompiled
- // the next time it is executed.
- void MarkForOptimization(ConcurrencyMode mode);
-
- // Tells whether or not the function is already marked for lazy recompilation.
- inline bool IsMarkedForOptimization();
- inline bool IsMarkedForConcurrentOptimization();
-
- // Tells whether or not the function is on the concurrent recompilation queue.
- inline bool IsInOptimizationQueue();
-
- // Clears the optimized code slot in the function's feedback vector.
- inline void ClearOptimizedCodeSlot(const char* reason);
-
- // Sets the optimization marker in the function's feedback vector.
- inline void SetOptimizationMarker(OptimizationMarker marker);
-
- // Clears the optimization marker in the function's feedback vector.
- inline void ClearOptimizationMarker();
-
- // If slack tracking is active, it computes instance size of the initial map
- // with minimum permissible object slack. If it is not active, it simply
- // returns the initial map's instance size.
- int ComputeInstanceSizeWithMinSlack(Isolate* isolate);
-
- // Completes inobject slack tracking on initial map if it is active.
- inline void CompleteInobjectSlackTrackingIfActive();
-
- // [feedback_cell]: The FeedbackCell used to hold the FeedbackVector
- // eventually.
- DECL_ACCESSORS(feedback_cell, FeedbackCell)
-
- // feedback_vector() can be used once the function is compiled.
- inline FeedbackVector* feedback_vector() const;
- inline bool has_feedback_vector() const;
- static void EnsureFeedbackVector(Handle<JSFunction> function);
-
- // Unconditionally clear the type feedback vector.
- void ClearTypeFeedbackInfo();
-
- inline bool has_prototype_slot() const;
-
- // The initial map for an object created by this constructor.
- inline Map* initial_map();
- static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
- Handle<Object> prototype);
- inline bool has_initial_map();
- static void EnsureHasInitialMap(Handle<JSFunction> function);
-
- // Creates a map that matches the constructor's initial map, but with
- // [[prototype]] being new.target.prototype. Because new.target can be a
- // JSProxy, this can call back into JavaScript.
- static V8_WARN_UNUSED_RESULT MaybeHandle<Map> GetDerivedMap(
- Isolate* isolate, Handle<JSFunction> constructor,
- Handle<JSReceiver> new_target);
-
- // Get and set the prototype property on a JSFunction. If the
- // function has an initial map the prototype is set on the initial
- // map. Otherwise, the prototype is put in the initial map field
- // until an initial map is needed.
- inline bool has_prototype();
- inline bool has_instance_prototype();
- inline Object* prototype();
- inline Object* instance_prototype();
- inline bool has_prototype_property();
- inline bool PrototypeRequiresRuntimeLookup();
- static void SetPrototype(Handle<JSFunction> function,
- Handle<Object> value);
-
- // Returns if this function has been compiled to native code yet.
- inline bool is_compiled();
-
- static int GetHeaderSize(bool function_has_prototype_slot) {
- return function_has_prototype_slot ? JSFunction::kSizeWithPrototype
- : JSFunction::kSizeWithoutPrototype;
- }
-
- // Prints the name of the function using PrintF.
- void PrintName(FILE* out = stdout);
-
- DECL_CAST(JSFunction)
-
- // Calculate the instance size and in-object properties count.
- static bool CalculateInstanceSizeForDerivedClass(
- Handle<JSFunction> function, InstanceType instance_type,
- int requested_embedder_fields, int* instance_size,
- int* in_object_properties);
- static void CalculateInstanceSizeHelper(InstanceType instance_type,
- bool has_prototype_slot,
- int requested_embedder_fields,
- int requested_in_object_properties,
- int* instance_size,
- int* in_object_properties);
-
- class BodyDescriptor;
-
- // Dispatched behavior.
- DECL_PRINTER(JSFunction)
- DECL_VERIFIER(JSFunction)
-
- // The function's name if it is configured, otherwise shared function info
- // debug name.
- static Handle<String> GetName(Handle<JSFunction> function);
-
- // ES6 section 9.2.11 SetFunctionName
- // Because of the way this abstract operation is used in the spec,
- // it should never fail, but in practice it will fail if the generated
- // function name's length exceeds String::kMaxLength.
- static V8_WARN_UNUSED_RESULT bool SetName(Handle<JSFunction> function,
- Handle<Name> name,
- Handle<String> prefix);
-
- // The function's displayName if it is set, otherwise name if it is
- // configured, otherwise shared function info
- // debug name.
- static Handle<String> GetDebugName(Handle<JSFunction> function);
-
- // The function's string representation implemented according to
- // ES6 section 19.2.3.5 Function.prototype.toString ( ).
- static Handle<String> ToString(Handle<JSFunction> function);
-
-// Layout description.
-#define JS_FUNCTION_FIELDS(V) \
- /* Pointer fields. */ \
- V(kSharedFunctionInfoOffset, kPointerSize) \
- V(kContextOffset, kPointerSize) \
- V(kFeedbackCellOffset, kPointerSize) \
- V(kEndOfStrongFieldsOffset, 0) \
- V(kCodeOffset, kPointerSize) \
- /* Size of JSFunction object without prototype field. */ \
- V(kSizeWithoutPrototype, 0) \
- V(kPrototypeOrInitialMapOffset, kPointerSize) \
- /* Size of JSFunction object with prototype field. */ \
- V(kSizeWithPrototype, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_FUNCTION_FIELDS)
-#undef JS_FUNCTION_FIELDS
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
-};
-
-
-// JSGlobalProxy's prototype must be a JSGlobalObject or null,
-// and the prototype is hidden. JSGlobalProxy always delegates
-// property accesses to its prototype if the prototype is not null.
-//
-// A JSGlobalProxy can be reinitialized which will preserve its identity.
-//
-// Accessing a JSGlobalProxy requires security check.
-
-class JSGlobalProxy : public JSObject {
- public:
- // [native_context]: the owner native context of this global proxy object.
- // It is null value if this object is not used by any context.
- DECL_ACCESSORS(native_context, Object)
-
- DECL_CAST(JSGlobalProxy)
-
- inline bool IsDetachedFrom(JSGlobalObject* global) const;
-
- static int SizeWithEmbedderFields(int embedder_field_count);
-
- // Dispatched behavior.
- DECL_PRINTER(JSGlobalProxy)
- DECL_VERIFIER(JSGlobalProxy)
-
- // Layout description.
- static const int kNativeContextOffset = JSObject::kHeaderSize;
- static const int kSize = kNativeContextOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
-};
-
-
-// JavaScript global object.
-class JSGlobalObject : public JSObject {
- public:
- // [native context]: the natives corresponding to this global object.
- DECL_ACCESSORS(native_context, Context)
-
- // [global proxy]: the global proxy object of the context
- DECL_ACCESSORS(global_proxy, JSObject)
-
- // Gets global object properties.
- inline GlobalDictionary* global_dictionary();
- inline void set_global_dictionary(GlobalDictionary* dictionary);
-
- static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
- Handle<Name> name);
- // Ensure that the global object has a cell for the given property name.
- static Handle<PropertyCell> EnsureEmptyPropertyCell(
- Handle<JSGlobalObject> global, Handle<Name> name,
- PropertyCellType cell_type, int* entry_out = nullptr);
-
- DECL_CAST(JSGlobalObject)
-
- inline bool IsDetached();
-
- // Dispatched behavior.
- DECL_PRINTER(JSGlobalObject)
- DECL_VERIFIER(JSGlobalObject)
-
- // Layout description.
- static const int kNativeContextOffset = JSObject::kHeaderSize;
- static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
- static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
- static const int kSize = kHeaderSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
-};
-
-
-// Representation for JS Wrapper objects, String, Number, Boolean, etc.
-class JSValue: public JSObject {
- public:
- // [value]: the object being wrapped.
- DECL_ACCESSORS(value, Object)
-
- DECL_CAST(JSValue)
-
- // Dispatched behavior.
- DECL_PRINTER(JSValue)
- DECL_VERIFIER(JSValue)
-
- // Layout description.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
-};
-
-
-class DateCache;
-
-// Representation for JS date objects.
-class JSDate: public JSObject {
- public:
- static V8_WARN_UNUSED_RESULT MaybeHandle<JSDate> New(
- Handle<JSFunction> constructor, Handle<JSReceiver> new_target, double tv);
-
- // If one component is NaN, all of them are, indicating a NaN time value.
- // [value]: the time value.
- DECL_ACCESSORS(value, Object)
- // [year]: caches year. Either undefined, smi, or NaN.
- DECL_ACCESSORS(year, Object)
- // [month]: caches month. Either undefined, smi, or NaN.
- DECL_ACCESSORS(month, Object)
- // [day]: caches day. Either undefined, smi, or NaN.
- DECL_ACCESSORS(day, Object)
- // [weekday]: caches day of week. Either undefined, smi, or NaN.
- DECL_ACCESSORS(weekday, Object)
- // [hour]: caches hours. Either undefined, smi, or NaN.
- DECL_ACCESSORS(hour, Object)
- // [min]: caches minutes. Either undefined, smi, or NaN.
- DECL_ACCESSORS(min, Object)
- // [sec]: caches seconds. Either undefined, smi, or NaN.
- DECL_ACCESSORS(sec, Object)
- // [cache stamp]: sample of the date cache stamp at the
- // moment when chached fields were cached.
- DECL_ACCESSORS(cache_stamp, Object)
-
- DECL_CAST(JSDate)
-
- // Returns the time value (UTC) identifying the current time.
- static double CurrentTimeValue(Isolate* isolate);
-
- // Returns the date field with the specified index.
- // See FieldIndex for the list of date fields.
- static Object* GetField(Object* date, Smi* index);
-
- static Handle<Object> SetValue(Handle<JSDate> date, double v);
-
- void SetValue(Object* value, bool is_value_nan);
-
- // Dispatched behavior.
- DECL_PRINTER(JSDate)
- DECL_VERIFIER(JSDate)
-
- // The order is important. It must be kept in sync with date macros
- // in macros.py.
- enum FieldIndex {
- kDateValue,
- kYear,
- kMonth,
- kDay,
- kWeekday,
- kHour,
- kMinute,
- kSecond,
- kFirstUncachedField,
- kMillisecond = kFirstUncachedField,
- kDays,
- kTimeInDay,
- kFirstUTCField,
- kYearUTC = kFirstUTCField,
- kMonthUTC,
- kDayUTC,
- kWeekdayUTC,
- kHourUTC,
- kMinuteUTC,
- kSecondUTC,
- kMillisecondUTC,
- kDaysUTC,
- kTimeInDayUTC,
- kTimezoneOffset
- };
-
- // Layout description.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kYearOffset = kValueOffset + kPointerSize;
- static const int kMonthOffset = kYearOffset + kPointerSize;
- static const int kDayOffset = kMonthOffset + kPointerSize;
- static const int kWeekdayOffset = kDayOffset + kPointerSize;
- static const int kHourOffset = kWeekdayOffset + kPointerSize;
- static const int kMinOffset = kHourOffset + kPointerSize;
- static const int kSecOffset = kMinOffset + kPointerSize;
- static const int kCacheStampOffset = kSecOffset + kPointerSize;
- static const int kSize = kCacheStampOffset + kPointerSize;
-
- private:
- inline Object* DoGetField(FieldIndex index);
-
- Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache);
-
- // Computes and caches the cacheable fields of the date.
- inline void SetCachedFields(int64_t local_time_ms, DateCache* date_cache);
-
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate);
-};
-
-
-// Representation of message objects used for error reporting through
-// the API. The messages are formatted in JavaScript so this object is
-// a real JavaScript object. The information used for formatting the
-// error messages are not directly accessible from JavaScript to
-// prevent leaking information to user code called during error
-// formatting.
-class JSMessageObject: public JSObject {
- public:
- // [type]: the type of error message.
- inline int type() const;
- inline void set_type(int value);
-
- // [arguments]: the arguments for formatting the error message.
- DECL_ACCESSORS(argument, Object)
-
- // [script]: the script from which the error message originated.
- DECL_ACCESSORS(script, Script)
-
- // [stack_frames]: an array of stack frames for this error object.
- DECL_ACCESSORS(stack_frames, Object)
-
- // [start_position]: the start position in the script for the error message.
- inline int start_position() const;
- inline void set_start_position(int value);
-
- // [end_position]: the end position in the script for the error message.
- inline int end_position() const;
- inline void set_end_position(int value);
-
- // Returns the line number for the error message (1-based), or
- // Message::kNoLineNumberInfo if the line cannot be determined.
- int GetLineNumber() const;
-
- // Returns the offset of the given position within the containing line.
- int GetColumnNumber() const;
-
- // Returns the source code line containing the given source
- // position, or the empty string if the position is invalid.
- Handle<String> GetSourceLine() const;
-
- inline int error_level() const;
- inline void set_error_level(int level);
-
- DECL_CAST(JSMessageObject)
-
- // Dispatched behavior.
- DECL_PRINTER(JSMessageObject)
- DECL_VERIFIER(JSMessageObject)
-
- // Layout description.
- static const int kTypeOffset = JSObject::kHeaderSize;
- static const int kArgumentsOffset = kTypeOffset + kPointerSize;
- static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackFramesOffset = kScriptOffset + kPointerSize;
- static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
- static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
- static const int kErrorLevelOffset = kEndPositionOffset + kPointerSize;
- static const int kSize = kErrorLevelOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<HeapObject::kMapOffset,
- kStackFramesOffset + kPointerSize,
- kSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-};
-
-class AllocationSite : public Struct, public NeverReadOnlySpaceObject {
- public:
- static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
- static const double kPretenureRatio;
- static const int kPretenureMinimumCreated = 100;
-
- // Values for pretenure decision field.
- enum PretenureDecision {
- kUndecided = 0,
- kDontTenure = 1,
- kMaybeTenure = 2,
- kTenure = 3,
- kZombie = 4,
- kLastPretenureDecisionValue = kZombie
- };
-
- // Use the mixin methods over the HeapObject methods.
- // TODO(v8:7786) Remove once the HeapObject methods are gone.
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
- const char* PretenureDecisionName(PretenureDecision decision);
-
- // Contains either a Smi-encoded bitfield or a boilerplate. If it's a Smi the
- // AllocationSite is for a constructed Array.
- DECL_ACCESSORS(transition_info_or_boilerplate, Object)
- DECL_ACCESSORS(boilerplate, JSObject)
- DECL_INT_ACCESSORS(transition_info)
-
- // nested_site threads a list of sites that represent nested literals
- // walked in a particular order. So [[1, 2], 1, 2] will have one
- // nested_site, but [[1, 2], 3, [4]] will have a list of two.
- DECL_ACCESSORS(nested_site, Object)
-
- // Bitfield containing pretenuring information.
- DECL_INT32_ACCESSORS(pretenure_data)
-
- DECL_INT32_ACCESSORS(pretenure_create_count)
- DECL_ACCESSORS(dependent_code, DependentCode)
-
- // heap->allocation_site_list() points to the last AllocationSite which form
- // a linked list through the weak_next property. The GC might remove elements
- // from the list by updateing weak_next.
- DECL_ACCESSORS(weak_next, Object)
-
- inline void Initialize();
-
- // Checks if the allocation site contain weak_next field;
- inline bool HasWeakNext() const;
-
- // This method is expensive, it should only be called for reporting.
- bool IsNested();
-
- // transition_info bitfields, for constructed array transition info.
- class ElementsKindBits: public BitField<ElementsKind, 0, 15> {};
- class UnusedBits: public BitField<int, 15, 14> {};
- class DoNotInlineBit: public BitField<bool, 29, 1> {};
-
- // Bitfields for pretenure_data
- class MementoFoundCountBits: public BitField<int, 0, 26> {};
- class PretenureDecisionBits: public BitField<PretenureDecision, 26, 3> {};
- class DeoptDependentCodeBit: public BitField<bool, 29, 1> {};
- STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
-
- // Increments the mementos found counter and returns true when the first
- // memento was found for a given allocation site.
- inline bool IncrementMementoFoundCount(int increment = 1);
-
- inline void IncrementMementoCreateCount();
-
- PretenureFlag GetPretenureMode() const;
-
- void ResetPretenureDecision();
-
- inline PretenureDecision pretenure_decision() const;
- inline void set_pretenure_decision(PretenureDecision decision);
-
- inline bool deopt_dependent_code() const;
- inline void set_deopt_dependent_code(bool deopt);
-
- inline int memento_found_count() const;
- inline void set_memento_found_count(int count);
-
- inline int memento_create_count() const;
- inline void set_memento_create_count(int count);
-
- // The pretenuring decision is made during gc, and the zombie state allows
- // us to recognize when an allocation site is just being kept alive because
- // a later traversal of new space may discover AllocationMementos that point
- // to this AllocationSite.
- inline bool IsZombie() const;
-
- inline bool IsMaybeTenure() const;
-
- inline void MarkZombie();
-
- inline bool MakePretenureDecision(PretenureDecision current_decision,
- double ratio,
- bool maximum_size_scavenge);
-
- inline bool DigestPretenuringFeedback(bool maximum_size_scavenge);
-
- inline ElementsKind GetElementsKind() const;
- inline void SetElementsKind(ElementsKind kind);
-
- inline bool CanInlineCall() const;
- inline void SetDoNotInlineCall();
-
- inline bool PointsToLiteral() const;
-
- template <AllocationSiteUpdateMode update_or_check =
- AllocationSiteUpdateMode::kUpdate>
- static bool DigestTransitionFeedback(Handle<AllocationSite> site,
- ElementsKind to_kind);
-
- DECL_PRINTER(AllocationSite)
- DECL_VERIFIER(AllocationSite)
-
- DECL_CAST(AllocationSite)
- static inline bool ShouldTrack(ElementsKind boilerplate_elements_kind);
- static bool ShouldTrack(ElementsKind from, ElementsKind to);
- static inline bool CanTrack(InstanceType type);
-
-// Layout description.
-// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
-// and end with WeakNext field.
-#define ALLOCATION_SITE_FIELDS(V) \
- V(kTransitionInfoOrBoilerplateOffset, kPointerSize) \
- V(kNestedSiteOffset, kPointerSize) \
- V(kDependentCodeOffset, kPointerSize) \
- V(kCommonPointerFieldEndOffset, 0) \
- V(kPretenureDataOffset, kInt32Size) \
- V(kPretenureCreateCountOffset, kInt32Size) \
- /* Size of AllocationSite without WeakNext field */ \
- V(kSizeWithoutWeakNext, 0) \
- V(kWeakNextOffset, kPointerSize) \
- /* Size of AllocationSite with WeakNext field */ \
- V(kSizeWithWeakNext, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
-
- static const int kStartOffset = HeapObject::kHeaderSize;
-
- template <bool includeWeakNext>
- class BodyDescriptorImpl;
-
- // BodyDescriptor is used to traverse all the pointer fields including
- // weak_next
- typedef BodyDescriptorImpl<true> BodyDescriptor;
-
- // BodyDescriptorWeak is used to traverse all the pointer fields
- // except for weak_next
- typedef BodyDescriptorImpl<false> BodyDescriptorWeak;
-
- private:
- inline bool PretenuringDecisionMade() const;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
-};
-
-
-class AllocationMemento: public Struct {
- public:
- static const int kAllocationSiteOffset = HeapObject::kHeaderSize;
- static const int kSize = kAllocationSiteOffset + kPointerSize;
-
- DECL_ACCESSORS(allocation_site, Object)
-
- inline bool IsValid() const;
- inline AllocationSite* GetAllocationSite() const;
- inline Address GetAllocationSiteUnchecked() const;
-
- DECL_PRINTER(AllocationMemento)
- DECL_VERIFIER(AllocationMemento)
-
- DECL_CAST(AllocationMemento)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento);
-};
-
-
// Utility superclass for stack-allocated objects that must be updated
// on gc. It provides two ways for the gc to update instances, either
// iterating or updating after gc.
-class Relocatable BASE_EMBEDDED {
+class Relocatable {
public:
explicit inline Relocatable(Isolate* isolate);
inline virtual ~Relocatable();
@@ -3900,8 +2054,6 @@ class Oddball: public HeapObject {
typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
kSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
STATIC_ASSERT(kToNumberRawOffset == HeapNumber::kValueOffset);
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
@@ -3940,8 +2092,6 @@ class Cell: public HeapObject {
typedef FixedBodyDescriptor<kValueOffset,
kValueOffset + kPointerSize,
kSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Cell);
@@ -3968,8 +2118,6 @@ class FeedbackCell : public Struct {
typedef FixedBodyDescriptor<kValueOffset, kValueOffset + kPointerSize, kSize>
BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackCell);
@@ -4026,67 +2174,11 @@ class PropertyCell : public HeapObject {
static const int kSize = kDependentCodeOffset + kPointerSize;
typedef FixedBodyDescriptor<kNameOffset, kSize, kSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
};
-// The [Async-from-Sync Iterator] object
-// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
-// An object which wraps an ordinary Iterator and converts it to behave
-// according to the Async Iterator protocol.
-// (See https://tc39.github.io/proposal-async-iteration/#sec-iteration)
-class JSAsyncFromSyncIterator : public JSObject {
- public:
- DECL_CAST(JSAsyncFromSyncIterator)
- DECL_PRINTER(JSAsyncFromSyncIterator)
- DECL_VERIFIER(JSAsyncFromSyncIterator)
-
- // Async-from-Sync Iterator instances are ordinary objects that inherit
- // properties from the %AsyncFromSyncIteratorPrototype% intrinsic object.
- // Async-from-Sync Iterator instances are initially created with the internal
- // slots listed in Table 4.
- // (proposal-async-iteration/#table-async-from-sync-iterator-internal-slots)
- DECL_ACCESSORS(sync_iterator, JSReceiver)
-
- // The "next" method is loaded during GetIterator, and is not reloaded for
- // subsequent "next" invocations.
- DECL_ACCESSORS(next, Object)
-
- // Offsets of object fields.
- static const int kSyncIteratorOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kSyncIteratorOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncFromSyncIterator);
-};
-
-class JSStringIterator : public JSObject {
- public:
- // Dispatched behavior.
- DECL_PRINTER(JSStringIterator)
- DECL_VERIFIER(JSStringIterator)
-
- DECL_CAST(JSStringIterator)
-
- // [string]: the [[IteratedString]] inobject property.
- DECL_ACCESSORS(string, String)
-
- // [index]: The [[StringIteratorNextIndex]] inobject property.
- inline int index() const;
- inline void set_index(int value);
-
- static const int kStringOffset = JSObject::kHeaderSize;
- static const int kNextIndexOffset = kStringOffset + kPointerSize;
- static const int kSize = kNextIndexOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
-};
-
// Foreign describes objects pointing from JavaScript to C structures.
class Foreign: public HeapObject {
public:
@@ -4109,8 +2201,6 @@ class Foreign: public HeapObject {
STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
friend class Factory;
@@ -4172,66 +2262,6 @@ class AccessorPair: public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
};
-class StackFrameInfo : public Struct, public NeverReadOnlySpaceObject {
- public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
- DECL_INT_ACCESSORS(line_number)
- DECL_INT_ACCESSORS(column_number)
- DECL_INT_ACCESSORS(script_id)
- DECL_ACCESSORS(script_name, Object)
- DECL_ACCESSORS(script_name_or_source_url, Object)
- DECL_ACCESSORS(function_name, Object)
- DECL_BOOLEAN_ACCESSORS(is_eval)
- DECL_BOOLEAN_ACCESSORS(is_constructor)
- DECL_BOOLEAN_ACCESSORS(is_wasm)
- DECL_INT_ACCESSORS(flag)
- DECL_INT_ACCESSORS(id)
-
- DECL_CAST(StackFrameInfo)
-
- // Dispatched behavior.
- DECL_PRINTER(StackFrameInfo)
- DECL_VERIFIER(StackFrameInfo)
-
- static const int kLineNumberIndex = Struct::kHeaderSize;
- static const int kColumnNumberIndex = kLineNumberIndex + kPointerSize;
- static const int kScriptIdIndex = kColumnNumberIndex + kPointerSize;
- static const int kScriptNameIndex = kScriptIdIndex + kPointerSize;
- static const int kScriptNameOrSourceUrlIndex =
- kScriptNameIndex + kPointerSize;
- static const int kFunctionNameIndex =
- kScriptNameOrSourceUrlIndex + kPointerSize;
- static const int kFlagIndex = kFunctionNameIndex + kPointerSize;
- static const int kIdIndex = kFlagIndex + kPointerSize;
- static const int kSize = kIdIndex + kPointerSize;
-
- private:
- // Bit position in the flag, from least significant bit position.
- static const int kIsEvalBit = 0;
- static const int kIsConstructorBit = 1;
- static const int kIsWasmBit = 2;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrameInfo);
-};
-
-class SourcePositionTableWithFrameCache : public Tuple2 {
- public:
- DECL_ACCESSORS(source_position_table, ByteArray)
- DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
-
- DECL_CAST(SourcePositionTableWithFrameCache)
-
- static const int kSourcePositionTableIndex = Struct::kHeaderSize;
- static const int kStackFrameCacheIndex =
- kSourcePositionTableIndex + kPointerSize;
- static const int kSize = kStackFrameCacheIndex + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SourcePositionTableWithFrameCache);
-};
-
// BooleanBit is a helper class for setting and getting a bit in an integer.
class BooleanBit : public AllStatic {
public:
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
new file mode 100644
index 0000000000..2ed280c054
--- /dev/null
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -0,0 +1,197 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ALLOCATION_SITE_INL_H_
+#define V8_OBJECTS_ALLOCATION_SITE_INL_H_
+
+#include "src/objects/allocation-site.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/objects/js-objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(AllocationMemento)
+CAST_ACCESSOR(AllocationSite)
+
+ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
+ kTransitionInfoOrBoilerplateOffset)
+ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
+INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
+INT32_ACCESSORS(AllocationSite, pretenure_create_count,
+ kPretenureCreateCountOffset)
+ACCESSORS(AllocationSite, dependent_code, DependentCode, kDependentCodeOffset)
+ACCESSORS_CHECKED(AllocationSite, weak_next, Object, kWeakNextOffset,
+ HasWeakNext())
+ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
+
+JSObject* AllocationSite::boilerplate() const {
+ DCHECK(PointsToLiteral());
+ return JSObject::cast(transition_info_or_boilerplate());
+}
+
+void AllocationSite::set_boilerplate(JSObject* object, WriteBarrierMode mode) {
+ set_transition_info_or_boilerplate(object, mode);
+}
+
+int AllocationSite::transition_info() const {
+ DCHECK(!PointsToLiteral());
+ return Smi::cast(transition_info_or_boilerplate())->value();
+}
+
+void AllocationSite::set_transition_info(int value) {
+ DCHECK(!PointsToLiteral());
+ set_transition_info_or_boilerplate(Smi::FromInt(value), SKIP_WRITE_BARRIER);
+}
+
+bool AllocationSite::HasWeakNext() const {
+ return map() == GetReadOnlyRoots().allocation_site_map();
+}
+
+void AllocationSite::Initialize() {
+ set_transition_info_or_boilerplate(Smi::kZero);
+ SetElementsKind(GetInitialFastElementsKind());
+ set_nested_site(Smi::kZero);
+ set_pretenure_data(0);
+ set_pretenure_create_count(0);
+ set_dependent_code(
+ DependentCode::cast(GetReadOnlyRoots().empty_weak_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
+
+bool AllocationSite::IsZombie() const {
+ return pretenure_decision() == kZombie;
+}
+
+bool AllocationSite::IsMaybeTenure() const {
+ return pretenure_decision() == kMaybeTenure;
+}
+
+bool AllocationSite::PretenuringDecisionMade() const {
+ return pretenure_decision() != kUndecided;
+}
+
+void AllocationSite::MarkZombie() {
+ DCHECK(!IsZombie());
+ Initialize();
+ set_pretenure_decision(kZombie);
+}
+
+ElementsKind AllocationSite::GetElementsKind() const {
+ return ElementsKindBits::decode(transition_info());
+}
+
+void AllocationSite::SetElementsKind(ElementsKind kind) {
+ set_transition_info(ElementsKindBits::update(transition_info(), kind));
+}
+
+bool AllocationSite::CanInlineCall() const {
+ return DoNotInlineBit::decode(transition_info()) == 0;
+}
+
+void AllocationSite::SetDoNotInlineCall() {
+ set_transition_info(DoNotInlineBit::update(transition_info(), true));
+}
+
+bool AllocationSite::PointsToLiteral() const {
+ Object* raw_value = transition_info_or_boilerplate();
+ DCHECK_EQ(!raw_value->IsSmi(),
+ raw_value->IsJSArray() || raw_value->IsJSObject());
+ return !raw_value->IsSmi();
+}
+
+// Heuristic: We only need to create allocation site info if the boilerplate
+// elements kind is the initial elements kind.
+bool AllocationSite::ShouldTrack(ElementsKind boilerplate_elements_kind) {
+ return IsSmiElementsKind(boilerplate_elements_kind);
+}
+
+inline bool AllocationSite::CanTrack(InstanceType type) {
+ if (FLAG_allocation_site_pretenuring) {
+ // TurboFan doesn't care at all about String pretenuring feedback,
+ // so don't bother even trying to track that.
+ return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
+ }
+ return type == JS_ARRAY_TYPE;
+}
+
+AllocationSite::PretenureDecision AllocationSite::pretenure_decision() const {
+ return PretenureDecisionBits::decode(pretenure_data());
+}
+
+void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
+ int32_t value = pretenure_data();
+ set_pretenure_data(PretenureDecisionBits::update(value, decision));
+}
+
+bool AllocationSite::deopt_dependent_code() const {
+ return DeoptDependentCodeBit::decode(pretenure_data());
+}
+
+void AllocationSite::set_deopt_dependent_code(bool deopt) {
+ int32_t value = pretenure_data();
+ set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
+}
+
+int AllocationSite::memento_found_count() const {
+ return MementoFoundCountBits::decode(pretenure_data());
+}
+
+inline void AllocationSite::set_memento_found_count(int count) {
+ int32_t value = pretenure_data();
+ // Verify that we can count more mementos than we can possibly find in one
+ // new space collection.
+ DCHECK((GetHeap()->MaxSemiSpaceSize() /
+ (Heap::kMinObjectSizeInWords * kPointerSize +
+ AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
+ DCHECK_LT(count, MementoFoundCountBits::kMax);
+ set_pretenure_data(MementoFoundCountBits::update(value, count));
+}
+
+int AllocationSite::memento_create_count() const {
+ return pretenure_create_count();
+}
+
+void AllocationSite::set_memento_create_count(int count) {
+ set_pretenure_create_count(count);
+}
+
+bool AllocationSite::IncrementMementoFoundCount(int increment) {
+ if (IsZombie()) return false;
+
+ int value = memento_found_count();
+ set_memento_found_count(value + increment);
+ return memento_found_count() >= kPretenureMinimumCreated;
+}
+
+inline void AllocationSite::IncrementMementoCreateCount() {
+ DCHECK(FLAG_allocation_site_pretenuring);
+ int value = memento_create_count();
+ set_memento_create_count(value + 1);
+}
+
+bool AllocationMemento::IsValid() const {
+ return allocation_site()->IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site())->IsZombie();
+}
+
+AllocationSite* AllocationMemento::GetAllocationSite() const {
+ DCHECK(IsValid());
+ return AllocationSite::cast(allocation_site());
+}
+
+Address AllocationMemento::GetAllocationSiteUnchecked() const {
+ return reinterpret_cast<Address>(allocation_site());
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_ALLOCATION_SITE_INL_H_
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
new file mode 100644
index 0000000000..d923fd8f23
--- /dev/null
+++ b/deps/v8/src/objects/allocation-site.h
@@ -0,0 +1,186 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ALLOCATION_SITE_H_
+#define V8_OBJECTS_ALLOCATION_SITE_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class AllocationSite : public Struct, public NeverReadOnlySpaceObject {
+ public:
+ static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+ static const double kPretenureRatio;
+ static const int kPretenureMinimumCreated = 100;
+
+ // Values for pretenure decision field.
+ enum PretenureDecision {
+ kUndecided = 0,
+ kDontTenure = 1,
+ kMaybeTenure = 2,
+ kTenure = 3,
+ kZombie = 4,
+ kLastPretenureDecisionValue = kZombie
+ };
+
+ const char* PretenureDecisionName(PretenureDecision decision);
+
+ // Contains either a Smi-encoded bitfield or a boilerplate. If it's a Smi the
+ // AllocationSite is for a constructed Array.
+ DECL_ACCESSORS(transition_info_or_boilerplate, Object)
+ DECL_ACCESSORS(boilerplate, JSObject)
+ DECL_INT_ACCESSORS(transition_info)
+
+ // nested_site threads a list of sites that represent nested literals
+ // walked in a particular order. So [[1, 2], 1, 2] will have one
+ // nested_site, but [[1, 2], 3, [4]] will have a list of two.
+ DECL_ACCESSORS(nested_site, Object)
+
+ // Bitfield containing pretenuring information.
+ DECL_INT32_ACCESSORS(pretenure_data)
+
+ DECL_INT32_ACCESSORS(pretenure_create_count)
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
+ // heap->allocation_site_list() points to the last AllocationSite which form
+ // a linked list through the weak_next property. The GC might remove elements
+ // from the list by updateing weak_next.
+ DECL_ACCESSORS(weak_next, Object)
+
+ inline void Initialize();
+
+ // Checks if the allocation site contain weak_next field;
+ inline bool HasWeakNext() const;
+
+ // This method is expensive, it should only be called for reporting.
+ bool IsNested();
+
+ // transition_info bitfields, for constructed array transition info.
+ class ElementsKindBits : public BitField<ElementsKind, 0, 5> {};
+ class DoNotInlineBit : public BitField<bool, 5, 1> {};
+ // Unused bits 6-30.
+
+ // Bitfields for pretenure_data
+ class MementoFoundCountBits : public BitField<int, 0, 26> {};
+ class PretenureDecisionBits : public BitField<PretenureDecision, 26, 3> {};
+ class DeoptDependentCodeBit : public BitField<bool, 29, 1> {};
+ STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
+
+ // Increments the mementos found counter and returns true when the first
+ // memento was found for a given allocation site.
+ inline bool IncrementMementoFoundCount(int increment = 1);
+
+ inline void IncrementMementoCreateCount();
+
+ PretenureFlag GetPretenureMode() const;
+
+ void ResetPretenureDecision();
+
+ inline PretenureDecision pretenure_decision() const;
+ inline void set_pretenure_decision(PretenureDecision decision);
+
+ inline bool deopt_dependent_code() const;
+ inline void set_deopt_dependent_code(bool deopt);
+
+ inline int memento_found_count() const;
+ inline void set_memento_found_count(int count);
+
+ inline int memento_create_count() const;
+ inline void set_memento_create_count(int count);
+
+ // The pretenuring decision is made during gc, and the zombie state allows
+ // us to recognize when an allocation site is just being kept alive because
+ // a later traversal of new space may discover AllocationMementos that point
+ // to this AllocationSite.
+ inline bool IsZombie() const;
+
+ inline bool IsMaybeTenure() const;
+
+ inline void MarkZombie();
+
+ inline bool MakePretenureDecision(PretenureDecision current_decision,
+ double ratio, bool maximum_size_scavenge);
+
+ inline bool DigestPretenuringFeedback(bool maximum_size_scavenge);
+
+ inline ElementsKind GetElementsKind() const;
+ inline void SetElementsKind(ElementsKind kind);
+
+ inline bool CanInlineCall() const;
+ inline void SetDoNotInlineCall();
+
+ inline bool PointsToLiteral() const;
+
+ template <AllocationSiteUpdateMode update_or_check =
+ AllocationSiteUpdateMode::kUpdate>
+ static bool DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind);
+
+ DECL_PRINTER(AllocationSite)
+ DECL_VERIFIER(AllocationSite)
+
+ DECL_CAST(AllocationSite)
+ static inline bool ShouldTrack(ElementsKind boilerplate_elements_kind);
+ static bool ShouldTrack(ElementsKind from, ElementsKind to);
+ static inline bool CanTrack(InstanceType type);
+
+// Layout description.
+// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
+// and end with WeakNext field.
+#define ALLOCATION_SITE_FIELDS(V) \
+ V(kTransitionInfoOrBoilerplateOffset, kPointerSize) \
+ V(kNestedSiteOffset, kPointerSize) \
+ V(kDependentCodeOffset, kPointerSize) \
+ V(kCommonPointerFieldEndOffset, 0) \
+ V(kPretenureDataOffset, kInt32Size) \
+ V(kPretenureCreateCountOffset, kInt32Size) \
+ /* Size of AllocationSite without WeakNext field */ \
+ V(kSizeWithoutWeakNext, 0) \
+ V(kWeakNextOffset, kPointerSize) \
+ /* Size of AllocationSite with WeakNext field */ \
+ V(kSizeWithWeakNext, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
+
+ static const int kStartOffset = HeapObject::kHeaderSize;
+
+ class BodyDescriptor;
+
+ private:
+ inline bool PretenuringDecisionMade() const;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
+};
+
+class AllocationMemento : public Struct {
+ public:
+ static const int kAllocationSiteOffset = HeapObject::kHeaderSize;
+ static const int kSize = kAllocationSiteOffset + kPointerSize;
+
+ DECL_ACCESSORS(allocation_site, Object)
+
+ inline bool IsValid() const;
+ inline AllocationSite* GetAllocationSite() const;
+ inline Address GetAllocationSiteUnchecked() const;
+
+ DECL_PRINTER(AllocationMemento)
+ DECL_VERIFIER(AllocationMemento)
+
+ DECL_CAST(AllocationMemento)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_ALLOCATION_SITE_H_
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 4f7680d8ed..b8ca8bef20 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -59,8 +59,22 @@ BIT_FIELD_ACCESSORS(AccessorInfo, flags, is_special_data_property,
BIT_FIELD_ACCESSORS(AccessorInfo, flags, replace_on_access,
AccessorInfo::ReplaceOnAccessBit)
BIT_FIELD_ACCESSORS(AccessorInfo, flags, is_sloppy, AccessorInfo::IsSloppyBit)
-BIT_FIELD_ACCESSORS(AccessorInfo, flags, has_no_side_effect,
- AccessorInfo::HasNoSideEffectBit)
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, getter_side_effect_type,
+ AccessorInfo::GetterSideEffectTypeBits)
+
+SideEffectType AccessorInfo::setter_side_effect_type() const {
+ return SetterSideEffectTypeBits::decode(flags());
+}
+
+void AccessorInfo::set_setter_side_effect_type(SideEffectType value) {
+ // We do not support describing setters as having no side effect, since
+ // calling set accessors must go through a store bytecode. Store bytecodes
+ // support checking receivers for temporary objects, but still expect
+ // the receiver to be written to.
+ CHECK_NE(value, SideEffectType::kHasNoSideEffect);
+ set_flags(SetterSideEffectTypeBits::update(flags(), value));
+}
+
BIT_FIELD_ACCESSORS(AccessorInfo, flags, initial_property_attributes,
AccessorInfo::InitialAttributesBits)
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 6f3629a2c3..f7522da8a7 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -48,7 +48,12 @@ class AccessorInfo : public Struct {
DECL_BOOLEAN_ACCESSORS(is_special_data_property)
DECL_BOOLEAN_ACCESSORS(replace_on_access)
DECL_BOOLEAN_ACCESSORS(is_sloppy)
- DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
+
+ inline SideEffectType getter_side_effect_type() const;
+ inline void set_getter_side_effect_type(SideEffectType type);
+
+ inline SideEffectType setter_side_effect_type() const;
+ inline void set_setter_side_effect_type(SideEffectType type);
// The property attributes used when an API object template is instantiated
// for the first time. Changing of this value afterwards does not affect
@@ -89,13 +94,15 @@ class AccessorInfo : public Struct {
inline bool HasExpectedReceiverType();
// Bit positions in |flags|.
-#define ACCESSOR_INFO_FLAGS_BIT_FIELDS(V, _) \
- V(AllCanReadBit, bool, 1, _) \
- V(AllCanWriteBit, bool, 1, _) \
- V(IsSpecialDataPropertyBit, bool, 1, _) \
- V(IsSloppyBit, bool, 1, _) \
- V(ReplaceOnAccessBit, bool, 1, _) \
- V(HasNoSideEffectBit, bool, 1, _) \
+#define ACCESSOR_INFO_FLAGS_BIT_FIELDS(V, _) \
+ V(AllCanReadBit, bool, 1, _) \
+ V(AllCanWriteBit, bool, 1, _) \
+ V(IsSpecialDataPropertyBit, bool, 1, _) \
+ V(IsSloppyBit, bool, 1, _) \
+ V(ReplaceOnAccessBit, bool, 1, _) \
+ V(GetterSideEffectTypeBits, SideEffectType, 2, _) \
+ /* We could save a bit from setter side-effect type, if necessary */ \
+ V(SetterSideEffectTypeBits, SideEffectType, 2, _) \
V(InitialAttributesBits, PropertyAttributes, 3, _)
DEFINE_BIT_FIELDS(ACCESSOR_INFO_FLAGS_BIT_FIELDS)
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 7d92ce0496..222ca7954e 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -63,7 +63,8 @@ bool JSSloppyArgumentsObject::GetSloppyArgumentsLength(Isolate* isolate,
return false;
}
DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
- Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
+ Object* len_obj =
+ object->InObjectPropertyAt(JSArgumentsObjectWithLength::kLengthIndex);
if (!len_obj->IsSmi()) return false;
*out = Max(0, Smi::ToInt(len_obj));
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 36c6204d1a..15f3d2a2f5 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_ARGUMENTS_H_
#define V8_OBJECTS_ARGUMENTS_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,11 +14,21 @@
namespace v8 {
namespace internal {
+// Superclass for all objects with instance type {JS_ARGUMENTS_TYPE}
+class JSArgumentsObject : public JSObject {
+ public:
+ DECL_VERIFIER(JSArgumentsObject)
+ DECL_CAST(JSArgumentsObject)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
+};
+
// Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
// Note that the instance type {JS_ARGUMENTS_TYPE} does _not_ guarantee the
// below layout, the in-object properties might have transitioned to dictionary
// mode already. Only use the below layout with the specific initial maps.
-class JSArgumentsObject : public JSObject {
+class JSArgumentsObjectWithLength : public JSArgumentsObject {
public:
// Offsets of object fields.
static const int kLengthOffset = JSObject::kHeaderSize;
@@ -26,19 +36,19 @@ class JSArgumentsObject : public JSObject {
// Indices of in-object properties.
static const int kLengthIndex = 0;
- DECL_VERIFIER(JSArgumentsObject)
- DECL_CAST(JSArgumentsObject)
+ DECL_VERIFIER(JSArgumentsObjectWithLength)
+ DECL_CAST(JSArgumentsObjectWithLength)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObjectWithLength);
};
// JSSloppyArgumentsObject is just a JSObject with specific initial map.
// This initial map adds in-object properties for "length" and "callee".
-class JSSloppyArgumentsObject : public JSArgumentsObject {
+class JSSloppyArgumentsObject : public JSArgumentsObjectWithLength {
public:
// Offsets of object fields.
- static const int kCalleeOffset = JSArgumentsObject::kSize;
+ static const int kCalleeOffset = JSArgumentsObjectWithLength::kSize;
static const int kSize = kCalleeOffset + kPointerSize;
// Indices of in-object properties.
static const int kCalleeIndex = kLengthIndex + 1;
@@ -53,10 +63,10 @@ class JSSloppyArgumentsObject : public JSArgumentsObject {
// JSStrictArgumentsObject is just a JSObject with specific initial map.
// This initial map adds an in-object property for "length".
-class JSStrictArgumentsObject : public JSArgumentsObject {
+class JSStrictArgumentsObject : public JSArgumentsObjectWithLength {
public:
// Offsets of object fields.
- static const int kSize = JSArgumentsObject::kSize;
+ static const int kSize = JSArgumentsObjectWithLength::kSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSStrictArgumentsObject);
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 458aa7c1eb..dcf99e2f29 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -36,9 +36,6 @@ namespace internal {
class MutableBigInt : public FreshlyAllocatedBigInt,
public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
// Bottleneck for converting MutableBigInts to BigInts.
static MaybeHandle<BigInt> MakeImmutable(MaybeHandle<MutableBigInt> maybe);
static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
@@ -96,7 +93,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
static inline Handle<MutableBigInt> AbsoluteBitwiseOp(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt* result_storage, ExtraDigitsHandling extra_digits,
- SymmetricOp symmetric, std::function<digit_t(digit_t, digit_t)> op);
+ SymmetricOp symmetric,
+ const std::function<digit_t(digit_t, digit_t)>& op);
static Handle<MutableBigInt> AbsoluteAnd(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt* result_storage = nullptr);
@@ -155,9 +153,11 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
static MaybeHandle<String> ToStringBasePowerOfTwo(Isolate* isolate,
Handle<BigIntBase> x,
- int radix);
+ int radix,
+ ShouldThrow should_throw);
static MaybeHandle<String> ToStringGeneric(Isolate* isolate,
- Handle<BigIntBase> x, int radix);
+ Handle<BigIntBase> x, int radix,
+ ShouldThrow should_throw);
static double ToDouble(Handle<BigIntBase> x);
enum Rounding { kRoundDown, kTie, kRoundUp };
@@ -924,14 +924,15 @@ ComparisonResult BigInt::CompareToDouble(Handle<BigInt> x, double y) {
}
MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
- int radix) {
+ int radix, ShouldThrow should_throw) {
if (bigint->is_zero()) {
return isolate->factory()->NewStringFromStaticChars("0");
}
if (base::bits::IsPowerOfTwo(radix)) {
- return MutableBigInt::ToStringBasePowerOfTwo(isolate, bigint, radix);
+ return MutableBigInt::ToStringBasePowerOfTwo(isolate, bigint, radix,
+ should_throw);
}
- return MutableBigInt::ToStringGeneric(isolate, bigint, radix);
+ return MutableBigInt::ToStringGeneric(isolate, bigint, radix, should_throw);
}
MaybeHandle<BigInt> BigInt::FromNumber(Isolate* isolate,
@@ -1255,7 +1256,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt* result_storage, ExtraDigitsHandling extra_digits,
- SymmetricOp symmetric, std::function<digit_t(digit_t, digit_t)> op) {
+ SymmetricOp symmetric, const std::function<digit_t(digit_t, digit_t)>& op) {
int x_length = x->length();
int y_length = y->length();
int num_pairs = y_length;
@@ -1924,9 +1925,9 @@ MaybeHandle<BigInt> BigInt::FromSerializedDigits(
static const char kConversionChars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
-MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Isolate* isolate,
- Handle<BigIntBase> x,
- int radix) {
+MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(
+ Isolate* isolate, Handle<BigIntBase> x, int radix,
+ ShouldThrow should_throw) {
STATIC_ASSERT(base::bits::IsPowerOfTwo(kDigitBits));
DCHECK(base::bits::IsPowerOfTwo(radix));
DCHECK(radix >= 2 && radix <= 32);
@@ -1945,7 +1946,11 @@ MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Isolate* isolate,
(bit_length + bits_per_char - 1) / bits_per_char + sign;
if (chars_required > String::kMaxLength) {
- THROW_NEW_ERROR(isolate, NewInvalidStringLengthError(), String);
+ if (should_throw == kThrowOnError) {
+ THROW_NEW_ERROR(isolate, NewInvalidStringLengthError(), String);
+ } else {
+ return MaybeHandle<String>();
+ }
}
Handle<SeqOneByteString> result =
@@ -1988,7 +1993,8 @@ MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Isolate* isolate,
MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
Handle<BigIntBase> x,
- int radix) {
+ int radix,
+ ShouldThrow should_throw) {
DCHECK(radix >= 2 && radix <= 36);
DCHECK(!x->is_zero());
Heap* heap = isolate->heap();
@@ -2014,7 +2020,11 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
chars_required += sign;
if (chars_required > String::kMaxLength) {
- THROW_NEW_ERROR(isolate, NewInvalidStringLengthError(), String);
+ if (should_throw == kThrowOnError) {
+ THROW_NEW_ERROR(isolate, NewInvalidStringLengthError(), String);
+ } else {
+ return MaybeHandle<String>();
+ }
}
Handle<SeqOneByteString> result =
isolate->factory()
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index a30a4779de..6081c5e3f8 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -173,7 +173,8 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
}
static MaybeHandle<String> ToString(Isolate* isolate, Handle<BigInt> bigint,
- int radix = 10);
+ int radix = 10,
+ ShouldThrow should_throw = kThrowOnError);
// "The Number value for x", see:
// https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type
// Returns a Smi or HeapNumber.
diff --git a/deps/v8/src/objects/builtin-function-id.h b/deps/v8/src/objects/builtin-function-id.h
new file mode 100644
index 0000000000..ed54811a2b
--- /dev/null
+++ b/deps/v8/src/objects/builtin-function-id.h
@@ -0,0 +1,217 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BUILTIN_FUNCTION_ID_H_
+#define V8_OBJECTS_BUILTIN_FUNCTION_ID_H_
+
+#include <stdint.h>
+
+namespace v8 {
+namespace internal {
+
+// List of builtin functions we want to identify to improve code
+// generation.
+//
+// Each entry has a name of a global object property holding an object
+// optionally followed by ".prototype", a name of a builtin function
+// on the object (the one the id is set for), and a label.
+//
+// Installation of ids for the selected builtin functions is handled
+// by the bootstrapper.
+#define FUNCTIONS_WITH_ID_LIST(V) \
+ V(Array, isArray, ArrayIsArray) \
+ V(Array.prototype, concat, ArrayConcat) \
+ V(Array.prototype, every, ArrayEvery) \
+ V(Array.prototype, fill, ArrayFill) \
+ V(Array.prototype, filter, ArrayFilter) \
+ V(Array.prototype, findIndex, ArrayFindIndex) \
+ V(Array.prototype, forEach, ArrayForEach) \
+ V(Array.prototype, includes, ArrayIncludes) \
+ V(Array.prototype, indexOf, ArrayIndexOf) \
+ V(Array.prototype, join, ArrayJoin) \
+ V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
+ V(Array.prototype, map, ArrayMap) \
+ V(Array.prototype, pop, ArrayPop) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, reverse, ArrayReverse) \
+ V(Array.prototype, shift, ArrayShift) \
+ V(Array.prototype, slice, ArraySlice) \
+ V(Array.prototype, some, ArraySome) \
+ V(Array.prototype, splice, ArraySplice) \
+ V(Array.prototype, unshift, ArrayUnshift) \
+ V(Date, now, DateNow) \
+ V(Date.prototype, getDate, DateGetDate) \
+ V(Date.prototype, getDay, DateGetDay) \
+ V(Date.prototype, getFullYear, DateGetFullYear) \
+ V(Date.prototype, getHours, DateGetHours) \
+ V(Date.prototype, getMilliseconds, DateGetMilliseconds) \
+ V(Date.prototype, getMinutes, DateGetMinutes) \
+ V(Date.prototype, getMonth, DateGetMonth) \
+ V(Date.prototype, getSeconds, DateGetSeconds) \
+ V(Date.prototype, getTime, DateGetTime) \
+ V(Function.prototype, apply, FunctionApply) \
+ V(Function.prototype, bind, FunctionBind) \
+ V(Function.prototype, call, FunctionCall) \
+ V(Object, assign, ObjectAssign) \
+ V(Object, create, ObjectCreate) \
+ V(Object, is, ObjectIs) \
+ V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
+ V(Object.prototype, isPrototypeOf, ObjectIsPrototypeOf) \
+ V(Object.prototype, toString, ObjectToString) \
+ V(RegExp.prototype, compile, RegExpCompile) \
+ V(RegExp.prototype, exec, RegExpExec) \
+ V(RegExp.prototype, test, RegExpTest) \
+ V(RegExp.prototype, toString, RegExpToString) \
+ V(String.prototype, charCodeAt, StringCharCodeAt) \
+ V(String.prototype, charAt, StringCharAt) \
+ V(String.prototype, codePointAt, StringCodePointAt) \
+ V(String.prototype, concat, StringConcat) \
+ V(String.prototype, endsWith, StringEndsWith) \
+ V(String.prototype, includes, StringIncludes) \
+ V(String.prototype, indexOf, StringIndexOf) \
+ V(String.prototype, lastIndexOf, StringLastIndexOf) \
+ V(String.prototype, repeat, StringRepeat) \
+ V(String.prototype, slice, StringSlice) \
+ V(String.prototype, startsWith, StringStartsWith) \
+ V(String.prototype, substr, StringSubstr) \
+ V(String.prototype, substring, StringSubstring) \
+ V(String.prototype, toLowerCase, StringToLowerCase) \
+ V(String.prototype, toString, StringToString) \
+ V(String.prototype, toUpperCase, StringToUpperCase) \
+ V(String.prototype, trim, StringTrim) \
+ V(String.prototype, trimLeft, StringTrimStart) \
+ V(String.prototype, trimRight, StringTrimEnd) \
+ V(String.prototype, valueOf, StringValueOf) \
+ V(String, fromCharCode, StringFromCharCode) \
+ V(String, fromCodePoint, StringFromCodePoint) \
+ V(String, raw, StringRaw) \
+ V(Math, random, MathRandom) \
+ V(Math, floor, MathFloor) \
+ V(Math, round, MathRound) \
+ V(Math, ceil, MathCeil) \
+ V(Math, abs, MathAbs) \
+ V(Math, log, MathLog) \
+ V(Math, log1p, MathLog1p) \
+ V(Math, log2, MathLog2) \
+ V(Math, log10, MathLog10) \
+ V(Math, cbrt, MathCbrt) \
+ V(Math, exp, MathExp) \
+ V(Math, expm1, MathExpm1) \
+ V(Math, sqrt, MathSqrt) \
+ V(Math, pow, MathPow) \
+ V(Math, max, MathMax) \
+ V(Math, min, MathMin) \
+ V(Math, cos, MathCos) \
+ V(Math, cosh, MathCosh) \
+ V(Math, sign, MathSign) \
+ V(Math, sin, MathSin) \
+ V(Math, sinh, MathSinh) \
+ V(Math, tan, MathTan) \
+ V(Math, tanh, MathTanh) \
+ V(Math, acos, MathAcos) \
+ V(Math, acosh, MathAcosh) \
+ V(Math, asin, MathAsin) \
+ V(Math, asinh, MathAsinh) \
+ V(Math, atan, MathAtan) \
+ V(Math, atan2, MathAtan2) \
+ V(Math, atanh, MathAtanh) \
+ V(Math, imul, MathImul) \
+ V(Math, clz32, MathClz32) \
+ V(Math, fround, MathFround) \
+ V(Math, trunc, MathTrunc) \
+ V(Number, isFinite, NumberIsFinite) \
+ V(Number, isInteger, NumberIsInteger) \
+ V(Number, isNaN, NumberIsNaN) \
+ V(Number, isSafeInteger, NumberIsSafeInteger) \
+ V(Number, parseFloat, NumberParseFloat) \
+ V(Number, parseInt, NumberParseInt) \
+ V(Number.prototype, toString, NumberToString) \
+ V(Map.prototype, clear, MapClear) \
+ V(Map.prototype, delete, MapDelete) \
+ V(Map.prototype, entries, MapEntries) \
+ V(Map.prototype, forEach, MapForEach) \
+ V(Map.prototype, has, MapHas) \
+ V(Map.prototype, keys, MapKeys) \
+ V(Map.prototype, get, MapGet) \
+ V(Map.prototype, set, MapSet) \
+ V(Map.prototype, values, MapValues) \
+ V(Set.prototype, add, SetAdd) \
+ V(Set.prototype, clear, SetClear) \
+ V(Set.prototype, delete, SetDelete) \
+ V(Set.prototype, entries, SetEntries) \
+ V(Set.prototype, forEach, SetForEach) \
+ V(Set.prototype, has, SetHas) \
+ V(Set.prototype, values, SetValues) \
+ V(WeakMap.prototype, delete, WeakMapDelete) \
+ V(WeakMap.prototype, has, WeakMapHas) \
+ V(WeakMap.prototype, set, WeakMapSet) \
+ V(WeakSet.prototype, add, WeakSetAdd) \
+ V(WeakSet.prototype, delete, WeakSetDelete) \
+ V(WeakSet.prototype, has, WeakSetHas)
+
+#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
+ V(Atomics, load, AtomicsLoad) \
+ V(Atomics, store, AtomicsStore) \
+ V(Atomics, exchange, AtomicsExchange) \
+ V(Atomics, compareExchange, AtomicsCompareExchange) \
+ V(Atomics, add, AtomicsAdd) \
+ V(Atomics, sub, AtomicsSub) \
+ V(Atomics, and, AtomicsAnd) \
+ V(Atomics, or, AtomicsOr) \
+ V(Atomics, xor, AtomicsXor)
+
+enum class BuiltinFunctionId : uint8_t {
+ kArrayConstructor,
+#define DECL_FUNCTION_ID(ignored1, ignore2, name) k##name,
+ FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
+ ATOMIC_FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
+#undef DECL_FUNCTION_ID
+ // These are manually assigned to special getters during bootstrapping.
+ kArrayBufferByteLength,
+ kArrayBufferIsView,
+ kArrayEntries,
+ kArrayKeys,
+ kArrayValues,
+ kArrayIteratorNext,
+ kBigIntConstructor,
+ kMapSize,
+ kSetSize,
+ kMapIteratorNext,
+ kSetIteratorNext,
+ kDataViewBuffer,
+ kDataViewByteLength,
+ kDataViewByteOffset,
+ kFunctionHasInstance,
+ kGlobalDecodeURI,
+ kGlobalDecodeURIComponent,
+ kGlobalEncodeURI,
+ kGlobalEncodeURIComponent,
+ kGlobalEscape,
+ kGlobalUnescape,
+ kGlobalIsFinite,
+ kGlobalIsNaN,
+ kNumberConstructor,
+ kSymbolConstructor,
+ kSymbolPrototypeToString,
+ kSymbolPrototypeValueOf,
+ kTypedArrayByteLength,
+ kTypedArrayByteOffset,
+ kTypedArrayEntries,
+ kTypedArrayKeys,
+ kTypedArrayLength,
+ kTypedArrayToStringTag,
+ kTypedArrayValues,
+ kSharedArrayBufferByteLength,
+ kStringConstructor,
+ kStringIterator,
+ kStringIteratorNext,
+ kStringToLowerCaseIntl,
+ kStringToUpperCaseIntl,
+ kInvalidBuiltinFunctionId = static_cast<uint8_t>(-1),
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_BUILTIN_FUNCTION_ID_H_
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 308e645c84..34c9f2fc28 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -26,6 +26,12 @@ CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
+CAST_ACCESSOR(SourcePositionTableWithFrameCache)
+
+ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
+ kSourcePositionTableIndex)
+ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
+ SimpleNumberDictionary, kStackFrameCacheIndex)
int AbstractCode::raw_instruction_size() {
if (IsCode()) {
@@ -133,14 +139,14 @@ BytecodeArray* AbstractCode::GetBytecodeArray() {
}
DependentCode* DependentCode::next_link() {
- return DependentCode::cast(Get(kNextLinkIndex)->ToStrongHeapObject());
+ return DependentCode::cast(Get(kNextLinkIndex)->GetHeapObjectAssumeStrong());
}
void DependentCode::set_next_link(DependentCode* next) {
Set(kNextLinkIndex, HeapObjectReference::Strong(next));
}
-int DependentCode::flags() { return Smi::ToInt(Get(kFlagsIndex)->ToSmi()); }
+int DependentCode::flags() { return Smi::ToInt(Get(kFlagsIndex)->cast<Smi>()); }
void DependentCode::set_flags(int flags) {
Set(kFlagsIndex, MaybeObject::FromObject(Smi::FromInt(flags)));
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index f3c3c0b5b3..1f1d4b71d6 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -27,8 +27,6 @@ class Register;
// Code describes objects with on-the-fly generated machine code.
class Code : public HeapObject, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
// Opaque data type for encapsulating code flags like kind, inline
// cache state, and arguments count.
typedef uint32_t Flags;
@@ -307,9 +305,6 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
// object has been moved by delta bytes.
void Relocate(intptr_t delta);
- // Migrate code described by desc.
- void CopyFrom(Heap* heap, const CodeDesc& desc);
-
// Migrate code from desc without flushing the instruction cache.
void CopyFromNoFlush(Heap* heap, const CodeDesc& desc);
@@ -459,9 +454,6 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
// field {Code::code_data_container} itself is immutable.
class CodeDataContainer : public HeapObject, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
DECL_ACCESSORS(next_code_link, Object)
DECL_INT_ACCESSORS(kind_specific_flags)
@@ -485,15 +477,7 @@ class CodeDataContainer : public HeapObject, public NeverReadOnlySpaceObject {
static const int kPointerFieldsStrongEndOffset = kNextCodeLinkOffset;
static const int kPointerFieldsWeakEndOffset = kKindSpecificFlagsOffset;
- // Ignores weakness.
- typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
- kPointerFieldsWeakEndOffset, kSize>
- BodyDescriptor;
-
- // Respects weakness.
- typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
- kPointerFieldsStrongEndOffset, kSize>
- BodyDescriptorWeak;
+ class BodyDescriptor;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CodeDataContainer);
@@ -501,9 +485,6 @@ class CodeDataContainer : public HeapObject, public NeverReadOnlySpaceObject {
class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
// All code kinds and INTERPRETED_FUNCTION.
enum Kind {
#define DEFINE_CODE_KIND_ENUM(name) name,
@@ -624,13 +605,10 @@ class DependentCode : public WeakFixedArray {
};
// Register a code dependency of {cell} on {object}.
- static void InstallDependency(Isolate* isolate, MaybeObjectHandle code,
+ static void InstallDependency(Isolate* isolate, const MaybeObjectHandle& code,
Handle<HeapObject> object,
DependencyGroup group);
- bool Contains(DependencyGroup group, MaybeObject* code);
- bool IsEmpty(DependencyGroup group);
-
void DeoptimizeDependentCodeGroup(Isolate* isolate, DependencyGroup group);
bool MarkCodeForDeoptimization(Isolate* isolate, DependencyGroup group);
@@ -650,14 +628,14 @@ class DependentCode : public WeakFixedArray {
Handle<DependentCode> dep);
static Handle<DependentCode> New(Isolate* isolate, DependencyGroup group,
- MaybeObjectHandle object,
+ const MaybeObjectHandle& object,
Handle<DependentCode> next);
static Handle<DependentCode> EnsureSpace(Isolate* isolate,
Handle<DependentCode> entries);
static Handle<DependentCode> InsertWeakCode(Isolate* isolate,
Handle<DependentCode> entries,
DependencyGroup group,
- MaybeObjectHandle code);
+ const MaybeObjectHandle& code);
// Compact by removing cleared weak cells and return true if there was
// any cleared weak cell.
@@ -811,8 +789,6 @@ class BytecodeArray : public FixedArrayBase {
static const int kMaxLength = kMaxSize - kHeaderSize;
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
@@ -903,6 +879,22 @@ class DeoptimizationData : public FixedArray {
static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
};
+class SourcePositionTableWithFrameCache : public Tuple2 {
+ public:
+ DECL_ACCESSORS(source_position_table, ByteArray)
+ DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
+
+ DECL_CAST(SourcePositionTableWithFrameCache)
+
+ static const int kSourcePositionTableIndex = Struct::kHeaderSize;
+ static const int kStackFrameCacheIndex =
+ kSourcePositionTableIndex + kPointerSize;
+ static const int kSize = kStackFrameCacheIndex + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SourcePositionTableWithFrameCache);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index 76deeb9684..8e118a0e60 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -69,12 +69,6 @@ class CompilationCacheTable
: public HashTable<CompilationCacheTable, CompilationCacheShape>,
public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
- // Find cached value for a string key, otherwise return null.
- Handle<Object> Lookup(Handle<String> src, Handle<SharedFunctionInfo> shared,
- LanguageMode language_mode);
MaybeHandle<SharedFunctionInfo> LookupScript(Handle<String> src,
Handle<Context> native_context,
LanguageMode language_mode);
@@ -82,11 +76,6 @@ class CompilationCacheTable
Handle<Context> native_context,
LanguageMode language_mode, int position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
- static Handle<CompilationCacheTable> Put(Handle<CompilationCacheTable> cache,
- Handle<String> src,
- Handle<SharedFunctionInfo> shared,
- LanguageMode language_mode,
- Handle<Object> value);
static Handle<CompilationCacheTable> PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> native_context, LanguageMode language_mode,
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index b77b6e136e..43fcdb5aee 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -375,7 +375,7 @@ void CoverageInfo::Print(std::unique_ptr<char[]> function_name) {
for (int i = 0; i < SlotCount(); i++) {
os << "{" << StartSourcePosition(i) << "," << EndSourcePosition(i) << "}"
- << std::endl;
+ << ": " << BlockCount(i) << std::endl;
}
}
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 3b94a4e46e..84f244c758 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -21,9 +21,6 @@ class BytecodeArray;
// debugged.
class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
enum Flag {
kNone = 0,
kHasBreakInfo = 1 << 0,
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index eac358c1cd..6d7ee42eec 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_DICTIONARY_H_
#define V8_OBJECTS_DICTIONARY_H_
-#include "src/objects/hash-table.h"
-
#include "src/base/export-template.h"
#include "src/globals.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/property-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -113,7 +113,7 @@ class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
static inline uint32_t Hash(Isolate* isolate, Handle<Name> key);
static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const int kEntryValueIndex = 1;
@@ -216,7 +216,7 @@ class GlobalDictionaryShape : public NameDictionaryShape {
static inline Object* Unwrap(Object* key);
static inline bool IsKey(ReadOnlyRoots roots, Object* k);
static inline bool IsLive(ReadOnlyRoots roots, Object* key);
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
};
class GlobalDictionary
@@ -246,7 +246,7 @@ class NumberDictionaryShape : public NumberDictionaryBaseShape {
static const int kPrefixSize = 1;
static const int kEntrySize = 3;
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
};
class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
@@ -266,7 +266,7 @@ class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
UNREACHABLE();
}
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
};
extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 29c4593cfd..8c24ee80be 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -288,8 +288,8 @@ HeapObject* WeakArrayList::Iterator::Next() {
if (array_ != nullptr) {
while (index_ < array_->length()) {
MaybeObject* item = array_->Get(index_++);
- DCHECK(item->IsWeakHeapObject() || item->IsClearedWeakHeapObject());
- if (!item->IsClearedWeakHeapObject()) return item->ToWeakHeapObject();
+ DCHECK(item->IsWeakOrCleared());
+ if (!item->IsCleared()) return item->GetHeapObjectAssumeWeak();
}
array_ = nullptr;
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 287015ef7c..867d04e638 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -188,8 +188,6 @@ class FixedArray : public FixedArrayBase {
#endif
typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
protected:
// Set operation on FixedArray without using write barriers. Can
@@ -256,8 +254,6 @@ class FixedDoubleArray : public FixedArrayBase {
DECL_VERIFIER(FixedDoubleArray)
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
@@ -298,7 +294,6 @@ class WeakFixedArray : public HeapObject {
DECL_VERIFIER(WeakFixedArray)
typedef WeakArrayBodyDescriptor BodyDescriptor;
- typedef BodyDescriptor BodyDescriptorWeak;
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = kLengthOffset + kPointerSize;
@@ -334,7 +329,7 @@ class WeakArrayList : public HeapObject {
static Handle<WeakArrayList> AddToEnd(Isolate* isolate,
Handle<WeakArrayList> array,
- MaybeObjectHandle value);
+ const MaybeObjectHandle& value);
inline MaybeObject* Get(int index) const;
@@ -361,7 +356,6 @@ class WeakArrayList : public HeapObject {
inline void synchronized_set_capacity(int value);
typedef WeakArrayBodyDescriptor BodyDescriptor;
- typedef BodyDescriptor BodyDescriptorWeak;
static const int kCapacityOffset = HeapObject::kHeaderSize;
static const int kLengthOffset = kCapacityOffset + kPointerSize;
@@ -381,7 +375,7 @@ class WeakArrayList : public HeapObject {
// around in the array - this method can only be used in cases where the user
// doesn't care about the indices! Users should make sure there are no
// duplicates.
- bool RemoveOne(MaybeObjectHandle value);
+ bool RemoveOne(const MaybeObjectHandle& value);
class Iterator {
public:
@@ -442,7 +436,6 @@ class ArrayList : public FixedArray {
// Return a copy of the list of size Length() without the first entry. The
// number returned by Length() is stored in the first entry.
static Handle<FixedArray> Elements(Isolate* isolate, Handle<ArrayList> array);
- bool IsFull();
DECL_CAST(ArrayList)
private:
@@ -521,8 +514,6 @@ class ByteArray : public FixedArrayBase {
"ByteArray maxLength not a Smi");
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
@@ -587,8 +578,6 @@ class FixedTypedArrayBase : public FixedArrayBase {
static const size_t kMaxLength = Smi::kMaxValue;
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
inline int size() const;
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 0ae22f9526..5bccfb5807 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -50,7 +50,8 @@ class FrameArray : public FixedArray {
kIsAsmJsWasmFrame = 1 << 2,
kIsStrict = 1 << 3,
kIsConstructor = 1 << 4,
- kAsmJsAtNumberConversion = 1 << 5
+ kAsmJsAtNumberConversion = 1 << 5,
+ kIsAsync = 1 << 6
};
static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 1f2c09316d..bc391fede6 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -58,12 +58,12 @@ void HashTableBase::SetNumberOfDeletedElements(int nod) {
}
template <typename Key>
-int BaseShape<Key>::GetMapRootIndex() {
- return Heap::kHashTableMapRootIndex;
+RootIndex BaseShape<Key>::GetMapRootIndex() {
+ return RootIndex::kHashTableMap;
}
-int EphemeronHashTableShape::GetMapRootIndex() {
- return Heap::kEphemeronHashTableMapRootIndex;
+RootIndex EphemeronHashTableShape::GetMapRootIndex() {
+ return RootIndex::kEphemeronHashTableMap;
}
template <typename Derived, typename Shape>
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index aa86865abf..66d3f6dfb2 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -55,7 +55,7 @@ template <typename KeyT>
class BaseShape {
public:
typedef KeyT Key;
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
static const bool kNeedsHoleCheck = true;
static Object* Unwrap(Object* key) { return key; }
static inline bool IsKey(ReadOnlyRoots roots, Object* key);
@@ -244,7 +244,7 @@ class HashTableKey {
virtual bool IsMatch(Object* other) = 0;
// Returns the hash value for this key.
// Required.
- virtual ~HashTableKey() {}
+ virtual ~HashTableKey() = default;
uint32_t Hash() const { return hash_; }
@@ -321,7 +321,7 @@ class ObjectHashTable
class EphemeronHashTableShape : public ObjectHashTableShape {
public:
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
};
// EphemeronHashTable is similar to ObjectHashTable but gets special treatment
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index b9ceecf9a1..dcacb4dd2f 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -21,543 +21,44 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/js-collator-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/js-date-time-format-inl.h"
+#include "src/objects/js-number-format-inl.h"
#include "src/objects/string.h"
#include "src/property-descriptor.h"
#include "unicode/brkiter.h"
-#include "unicode/bytestream.h"
-#include "unicode/calendar.h"
#include "unicode/coll.h"
-#include "unicode/curramt.h"
-#include "unicode/dcfmtsym.h"
#include "unicode/decimfmt.h"
-#include "unicode/dtfmtsym.h"
-#include "unicode/dtptngen.h"
-#include "unicode/gregocal.h"
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
-#include "unicode/plurrule.h"
-#include "unicode/rbbi.h"
#include "unicode/regex.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
-#include "unicode/uchar.h"
#include "unicode/ucol.h"
-#include "unicode/ucurr.h"
-#include "unicode/unum.h"
-#include "unicode/upluralrules.h"
#include "unicode/ures.h"
#include "unicode/uvernum.h"
#include "unicode/uversion.h"
-#if U_ICU_VERSION_MAJOR_NUM >= 59
-#include "unicode/char16ptr.h"
-#endif
-
namespace v8 {
namespace internal {
-namespace {
-
-bool ExtractStringSetting(Isolate* isolate, Handle<JSObject> options,
- const char* key, icu::UnicodeString* setting) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
- Handle<Object> object =
- JSReceiver::GetProperty(isolate, options, str).ToHandleChecked();
- if (object->IsString()) {
- v8::String::Utf8Value utf8_string(
- v8_isolate, v8::Utils::ToLocal(Handle<String>::cast(object)));
- *setting = icu::UnicodeString::fromUTF8(*utf8_string);
- return true;
- }
- return false;
-}
-
-bool ExtractIntegerSetting(Isolate* isolate, Handle<JSObject> options,
- const char* key, int32_t* value) {
- Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
- Handle<Object> object =
- JSReceiver::GetProperty(isolate, options, str).ToHandleChecked();
- if (object->IsNumber()) {
- return object->ToInt32(value);
- }
- return false;
-}
-
-bool ExtractBooleanSetting(Isolate* isolate, Handle<JSObject> options,
- const char* key, bool* value) {
- Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
- Handle<Object> object =
- JSReceiver::GetProperty(isolate, options, str).ToHandleChecked();
- if (object->IsBoolean()) {
- *value = object->BooleanValue(isolate);
- return true;
- }
- return false;
-}
-
-icu::SimpleDateFormat* CreateICUDateFormat(Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
- // Create time zone as specified by the user. We have to re-create time zone
- // since calendar takes ownership.
- icu::TimeZone* tz = nullptr;
- icu::UnicodeString timezone;
- if (ExtractStringSetting(isolate, options, "timeZone", &timezone)) {
- tz = icu::TimeZone::createTimeZone(timezone);
- } else {
- tz = icu::TimeZone::createDefault();
- }
-
- // Create a calendar using locale, and apply time zone to it.
- UErrorCode status = U_ZERO_ERROR;
- icu::Calendar* calendar =
- icu::Calendar::createInstance(tz, icu_locale, status);
-
- if (calendar->getDynamicClassID() ==
- icu::GregorianCalendar::getStaticClassID()) {
- icu::GregorianCalendar* gc = (icu::GregorianCalendar*)calendar;
- UErrorCode status = U_ZERO_ERROR;
- // The beginning of ECMAScript time, namely -(2**53)
- const double start_of_time = -9007199254740992;
- gc->setGregorianChange(start_of_time, status);
- DCHECK(U_SUCCESS(status));
- }
-
- // Make formatter from skeleton. Calendar and numbering system are added
- // to the locale as Unicode extension (if they were specified at all).
- icu::SimpleDateFormat* date_format = nullptr;
- icu::UnicodeString skeleton;
- if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
- // See https://github.com/tc39/ecma402/issues/225 . The best pattern
- // generation needs to be done in the base locale according to the
- // current spec however odd it may be. See also crbug.com/826549 .
- // This is a temporary work-around to get v8's external behavior to match
- // the current spec, but does not follow the spec provisions mentioned
- // in the above Ecma 402 issue.
- // TODO(jshin): The spec may need to be revised because using the base
- // locale for the pattern match is not quite right. Moreover, what to
- // do with 'related year' part when 'chinese/dangi' calendar is specified
- // has to be discussed. Revisit once the spec is clarified/revised.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- std::unique_ptr<icu::DateTimePatternGenerator> generator(
- icu::DateTimePatternGenerator::createInstance(no_extension_locale,
- status));
- icu::UnicodeString pattern;
- if (U_SUCCESS(status))
- pattern = generator->getBestPattern(skeleton, status);
-
- date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
- if (U_SUCCESS(status)) {
- date_format->adoptCalendar(calendar);
- }
- }
-
- if (U_FAILURE(status)) {
- delete calendar;
- delete date_format;
- date_format = nullptr;
- }
-
- return date_format;
-}
-
-void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
- icu::SimpleDateFormat* date_format,
- Handle<JSObject> resolved) {
- Factory* factory = isolate->factory();
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString pattern;
- date_format->toPattern(pattern);
- JSObject::SetProperty(
- isolate, resolved, factory->intl_pattern_symbol(),
- factory
- ->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
- pattern.length()))
- .ToHandleChecked(),
- LanguageMode::kSloppy)
- .Assert();
-
- // Set time zone and calendar.
- const icu::Calendar* calendar = date_format->getCalendar();
- // getType() returns legacy calendar type name instead of LDML/BCP47 calendar
- // key values. intl.js maps them to BCP47 values for key "ca".
- // TODO(jshin): Consider doing it here, instead.
- const char* calendar_name = calendar->getType();
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("calendar"),
- factory->NewStringFromAsciiChecked(calendar_name), LanguageMode::kSloppy)
- .Assert();
-
- const icu::TimeZone& tz = calendar->getTimeZone();
- icu::UnicodeString time_zone;
- tz.getID(time_zone);
-
- icu::UnicodeString canonical_time_zone;
- icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
- if (U_SUCCESS(status)) {
- // In CLDR (http://unicode.org/cldr/trac/ticket/9943), Etc/UTC is made
- // a separate timezone ID from Etc/GMT even though they're still the same
- // timezone. We have Etc/UTC because 'UTC', 'Etc/Universal',
- // 'Etc/Zulu' and others are turned to 'Etc/UTC' by ICU. Etc/GMT comes
- // from Etc/GMT0, Etc/GMT+0, Etc/GMT-0, Etc/Greenwich.
- // ecma402##sec-canonicalizetimezonename step 3
- if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/UTC") ||
- canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("timeZone"),
- factory->NewStringFromStaticChars("UTC"), LanguageMode::kSloppy)
- .Assert();
- } else {
- JSObject::SetProperty(isolate, resolved,
- factory->NewStringFromStaticChars("timeZone"),
- factory
- ->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(
- canonical_time_zone.getBuffer()),
- canonical_time_zone.length()))
- .ToHandleChecked(),
- LanguageMode::kSloppy)
- .Assert();
- }
- }
-
- // Ugly hack. ICU doesn't expose numbering system in any way, so we have
- // to assume that for given locale NumberingSystem constructor produces the
- // same digits as NumberFormat/Calendar would.
- status = U_ZERO_ERROR;
- icu::NumberingSystem* numbering_system =
- icu::NumberingSystem::createInstance(icu_locale, status);
- if (U_SUCCESS(status)) {
- const char* ns = numbering_system->getName();
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns), LanguageMode::kSloppy)
- .Assert();
- } else {
- JSObject::SetProperty(isolate, resolved,
- factory->NewStringFromStaticChars("numberingSystem"),
- factory->undefined_value(), LanguageMode::kSloppy)
- .Assert();
- }
- delete numbering_system;
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
- FALSE, &status);
- if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
- .Assert();
- } else {
- // This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
- .Assert();
- }
-}
-
-void SetNumericSettings(Isolate* isolate, icu::DecimalFormat* number_format,
- Handle<JSObject> options) {
- int32_t digits;
- if (ExtractIntegerSetting(isolate, options, "minimumIntegerDigits",
- &digits)) {
- number_format->setMinimumIntegerDigits(digits);
- }
-
- if (ExtractIntegerSetting(isolate, options, "minimumFractionDigits",
- &digits)) {
- number_format->setMinimumFractionDigits(digits);
- }
-
- if (ExtractIntegerSetting(isolate, options, "maximumFractionDigits",
- &digits)) {
- number_format->setMaximumFractionDigits(digits);
- }
-
- bool significant_digits_used = false;
- if (ExtractIntegerSetting(isolate, options, "minimumSignificantDigits",
- &digits)) {
- number_format->setMinimumSignificantDigits(digits);
- significant_digits_used = true;
- }
-
- if (ExtractIntegerSetting(isolate, options, "maximumSignificantDigits",
- &digits)) {
- number_format->setMaximumSignificantDigits(digits);
- significant_digits_used = true;
- }
-
- number_format->setSignificantDigitsUsed(significant_digits_used);
-
- number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
-}
-
-icu::DecimalFormat* CreateICUNumberFormat(Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
- // Make formatter from options. Numbering system is added
- // to the locale as Unicode extension (if it was specified at all).
- UErrorCode status = U_ZERO_ERROR;
- icu::DecimalFormat* number_format = nullptr;
- icu::UnicodeString style;
- icu::UnicodeString currency;
- if (ExtractStringSetting(isolate, options, "style", &style)) {
- if (style == UNICODE_STRING_SIMPLE("currency")) {
- icu::UnicodeString display;
- ExtractStringSetting(isolate, options, "currency", &currency);
- ExtractStringSetting(isolate, options, "currencyDisplay", &display);
-
-#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
- icu::NumberFormat::EStyles format_style;
- if (display == UNICODE_STRING_SIMPLE("code")) {
- format_style = icu::NumberFormat::kIsoCurrencyStyle;
- } else if (display == UNICODE_STRING_SIMPLE("name")) {
- format_style = icu::NumberFormat::kPluralCurrencyStyle;
- } else {
- format_style = icu::NumberFormat::kCurrencyStyle;
- }
-#else // ICU version is 4.8 or above (we ignore versions below 4.0).
- UNumberFormatStyle format_style;
- if (display == UNICODE_STRING_SIMPLE("code")) {
- format_style = UNUM_CURRENCY_ISO;
- } else if (display == UNICODE_STRING_SIMPLE("name")) {
- format_style = UNUM_CURRENCY_PLURAL;
- } else {
- format_style = UNUM_CURRENCY;
- }
-#endif
-
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, format_style, status));
-
- if (U_FAILURE(status)) {
- delete number_format;
- return nullptr;
- }
- } else if (style == UNICODE_STRING_SIMPLE("percent")) {
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createPercentInstance(icu_locale, status));
- if (U_FAILURE(status)) {
- delete number_format;
- return nullptr;
- }
- // Make sure 1.1% doesn't go into 2%.
- number_format->setMinimumFractionDigits(1);
- } else {
- // Make a decimal instance by default.
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, status));
- }
- }
-
- if (U_FAILURE(status)) {
- delete number_format;
- return nullptr;
- }
-
- // Set all options.
- if (!currency.isEmpty()) {
- number_format->setCurrency(currency.getBuffer(), status);
- }
-
- SetNumericSettings(isolate, number_format, options);
-
- bool grouping;
- if (ExtractBooleanSetting(isolate, options, "useGrouping", &grouping)) {
- number_format->setGroupingUsed(grouping);
- }
-
- return number_format;
-}
-
-void SetResolvedNumericSettings(Isolate* isolate, const icu::Locale& icu_locale,
- icu::DecimalFormat* number_format,
- Handle<JSObject> resolved) {
- Factory* factory = isolate->factory();
-
- JSObject::SetProperty(
- isolate, resolved,
- factory->NewStringFromStaticChars("minimumIntegerDigits"),
- factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
- LanguageMode::kSloppy)
- .Assert();
-
- JSObject::SetProperty(
- isolate, resolved,
- factory->NewStringFromStaticChars("minimumFractionDigits"),
- factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
- LanguageMode::kSloppy)
- .Assert();
-
- JSObject::SetProperty(
- isolate, resolved,
- factory->NewStringFromStaticChars("maximumFractionDigits"),
- factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
- LanguageMode::kSloppy)
- .Assert();
-
- Handle<String> key =
- factory->NewStringFromStaticChars("minimumSignificantDigits");
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(resolved, key);
- CHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- JSObject::SetProperty(
- isolate, resolved,
- factory->NewStringFromStaticChars("minimumSignificantDigits"),
- factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
- LanguageMode::kSloppy)
- .Assert();
- }
-
- key = factory->NewStringFromStaticChars("maximumSignificantDigits");
- maybe = JSReceiver::HasOwnProperty(resolved, key);
- CHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- JSObject::SetProperty(
- isolate, resolved,
- factory->NewStringFromStaticChars("maximumSignificantDigits"),
- factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
- LanguageMode::kSloppy)
- .Assert();
- }
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
- FALSE, &status);
- if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
- .Assert();
- } else {
- // This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
- .Assert();
- }
-}
-
-void SetResolvedNumberSettings(Isolate* isolate, const icu::Locale& icu_locale,
- icu::DecimalFormat* number_format,
- Handle<JSObject> resolved) {
- Factory* factory = isolate->factory();
-
- // Set resolved currency code in options.currency if not empty.
- icu::UnicodeString currency(number_format->getCurrency());
- if (!currency.isEmpty()) {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("currency"),
- factory
- ->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(currency.getBuffer()),
- currency.length()))
- .ToHandleChecked(),
- LanguageMode::kSloppy)
- .Assert();
- }
-
+std::string Intl::GetNumberingSystem(const icu::Locale& icu_locale) {
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
// to assume that for given locale NumberingSystem constructor produces the
// same digits as NumberFormat/Calendar would.
UErrorCode status = U_ZERO_ERROR;
- icu::NumberingSystem* numbering_system =
- icu::NumberingSystem::createInstance(icu_locale, status);
- if (U_SUCCESS(status)) {
- const char* ns = numbering_system->getName();
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns), LanguageMode::kSloppy)
- .Assert();
- } else {
- JSObject::SetProperty(isolate, resolved,
- factory->NewStringFromStaticChars("numberingSystem"),
- factory->undefined_value(), LanguageMode::kSloppy)
- .Assert();
- }
- delete numbering_system;
-
- JSObject::SetProperty(isolate, resolved,
- factory->NewStringFromStaticChars("useGrouping"),
- factory->ToBoolean(number_format->isGroupingUsed()),
- LanguageMode::kSloppy)
- .Assert();
-
- SetResolvedNumericSettings(isolate, icu_locale, number_format, resolved);
-}
-
-icu::BreakIterator* CreateICUBreakIterator(Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
- UErrorCode status = U_ZERO_ERROR;
- icu::BreakIterator* break_iterator = nullptr;
- icu::UnicodeString type;
- if (!ExtractStringSetting(isolate, options, "type", &type)) return nullptr;
-
- if (type == UNICODE_STRING_SIMPLE("character")) {
- break_iterator =
- icu::BreakIterator::createCharacterInstance(icu_locale, status);
- } else if (type == UNICODE_STRING_SIMPLE("sentence")) {
- break_iterator =
- icu::BreakIterator::createSentenceInstance(icu_locale, status);
- } else if (type == UNICODE_STRING_SIMPLE("line")) {
- break_iterator = icu::BreakIterator::createLineInstance(icu_locale, status);
- } else {
- // Defualt is word iterator.
- break_iterator = icu::BreakIterator::createWordInstance(icu_locale, status);
- }
-
- if (U_FAILURE(status)) {
- delete break_iterator;
- return nullptr;
- }
-
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kBreakIterator);
-
- return break_iterator;
-}
-
-void SetResolvedBreakIteratorSettings(Isolate* isolate,
- const icu::Locale& icu_locale,
- icu::BreakIterator* break_iterator,
- Handle<JSObject> resolved) {
- Factory* factory = isolate->factory();
- UErrorCode status = U_ZERO_ERROR;
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
- FALSE, &status);
+ std::unique_ptr<icu::NumberingSystem> numbering_system(
+ icu::NumberingSystem::createInstance(icu_locale, status));
+ std::string value;
if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
- .Assert();
- } else {
- // This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
- .Assert();
+ value = numbering_system->getName();
}
+ return value;
}
-MaybeHandle<JSObject> CachedOrNewService(Isolate* isolate,
- Handle<String> service,
- Handle<Object> locales,
- Handle<Object> options,
- Handle<Object> internal_options) {
+MaybeHandle<JSObject> Intl::CachedOrNewService(
+ Isolate* isolate, Handle<String> service, Handle<Object> locales,
+ Handle<Object> options, Handle<Object> internal_options) {
Handle<Object> result;
Handle<Object> undefined_value(ReadOnlyRoots(isolate).undefined_value(),
isolate);
@@ -569,7 +70,6 @@ MaybeHandle<JSObject> CachedOrNewService(Isolate* isolate,
JSArray);
return Handle<JSObject>::cast(result);
}
-} // namespace
icu::Locale Intl::CreateICULocale(Isolate* isolate,
Handle<String> bcp47_locale_str) {
@@ -583,14 +83,18 @@ icu::Locale Intl::CreateICULocale(Isolate* isolate,
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
+ int parsed_length = 0;
// bcp47_locale_str should be a canonicalized language tag, which
// means this shouldn't fail.
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
+ &parsed_length, &status);
CHECK(U_SUCCESS(status));
- CHECK_LT(0, icu_length);
+
+ // bcp47_locale is already checked for its structural validity
+ // so that it should be parsed completely.
+ int bcp47length = bcp47_locale.length();
+ CHECK_EQ(bcp47length, parsed_length);
icu::Locale icu_locale(icu_result);
if (icu_locale.isBogus()) {
@@ -601,253 +105,6 @@ icu::Locale Intl::CreateICULocale(Isolate* isolate,
}
// static
-icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
- Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
- Handle<JSObject> resolved) {
- icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
- DCHECK(!icu_locale.isBogus());
-
- icu::SimpleDateFormat* date_format =
- CreateICUDateFormat(isolate, icu_locale, options);
- if (!date_format) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- date_format = CreateICUDateFormat(isolate, no_extension_locale, options);
-
- if (!date_format) {
- FATAL("Failed to create ICU date format, are ICU data files missing?");
- }
-
- // Set resolved settings (pattern, numbering system, calendar).
- SetResolvedDateSettings(isolate, no_extension_locale, date_format,
- resolved);
- } else {
- SetResolvedDateSettings(isolate, icu_locale, date_format, resolved);
- }
-
- CHECK_NOT_NULL(date_format);
- return date_format;
-}
-
-icu::SimpleDateFormat* DateFormat::UnpackDateFormat(Handle<JSObject> obj) {
- return reinterpret_cast<icu::SimpleDateFormat*>(
- obj->GetEmbedderField(DateFormat::kSimpleDateFormatIndex));
-}
-
-void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
- delete reinterpret_cast<icu::SimpleDateFormat*>(data.GetInternalField(0));
- GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
-}
-
-MaybeHandle<JSObject> DateFormat::Unwrap(Isolate* isolate,
- Handle<JSReceiver> receiver,
- const char* method_name) {
- Handle<Context> native_context =
- Handle<Context>(isolate->context()->native_context(), isolate);
- Handle<JSFunction> constructor = Handle<JSFunction>(
- JSFunction::cast(native_context->intl_date_time_format_function()),
- isolate);
- Handle<String> method_name_str =
- isolate->factory()->NewStringFromAsciiChecked(method_name);
-
- return Intl::UnwrapReceiver(isolate, receiver, constructor,
- Intl::Type::kDateTimeFormat, method_name_str,
- true);
-}
-
-// ecma402/#sec-formatdatetime
-// FormatDateTime( dateTimeFormat, x )
-MaybeHandle<String> DateFormat::FormatDateTime(
- Isolate* isolate, Handle<JSObject> date_time_format_holder, double x) {
- double date_value = DateCache::TimeClip(x);
- if (std::isnan(date_value)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- String);
- }
-
- CHECK(Intl::IsObjectOfType(isolate, date_time_format_holder,
- Intl::Type::kDateTimeFormat));
- icu::SimpleDateFormat* date_format =
- DateFormat::UnpackDateFormat(date_time_format_holder);
- CHECK_NOT_NULL(date_format);
-
- icu::UnicodeString result;
- date_format->format(date_value, result);
-
- return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
-}
-
-// ecma402/#sec-datetime-format-functions
-// DateTime Format Functions
-MaybeHandle<String> DateFormat::DateTimeFormat(
- Isolate* isolate, Handle<JSObject> date_time_format_holder,
- Handle<Object> date) {
- // 2. Assert: Type(dtf) is Object and dtf has an [[InitializedDateTimeFormat]]
- // internal slot.
- DCHECK(Intl::IsObjectOfType(isolate, date_time_format_holder,
- Intl::Type::kDateTimeFormat));
-
- // 3. If date is not provided or is undefined, then
- double x;
- if (date->IsUndefined()) {
- // 3.a Let x be Call(%Date_now%, undefined).
- x = JSDate::CurrentTimeValue(isolate);
- } else {
- // 4. Else,
- // a. Let x be ? ToNumber(date).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, date, Object::ToNumber(isolate, date),
- String);
- CHECK(date->IsNumber());
- x = date->Number();
- }
- // 5. Return FormatDateTime(dtf, x).
- return DateFormat::FormatDateTime(isolate, date_time_format_holder, x);
-}
-
-MaybeHandle<String> DateFormat::ToLocaleDateTime(
- Isolate* isolate, Handle<Object> date, Handle<Object> locales,
- Handle<Object> options, const char* required, const char* defaults,
- const char* service) {
- Factory* factory = isolate->factory();
- // 1. Let x be ? thisTimeValue(this value);
- if (!date->IsJSDate()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
- factory->NewStringFromStaticChars("Date")),
- String);
- }
-
- double const x = Handle<JSDate>::cast(date)->value()->Number();
- // 2. If x is NaN, return "Invalid Date"
- if (std::isnan(x)) {
- return factory->NewStringFromStaticChars("Invalid Date");
- }
-
- // 3. Let options be ? ToDateTimeOptions(options, required, defaults).
- Handle<JSObject> internal_options;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, internal_options,
- DateFormat::ToDateTimeOptions(isolate, options, required, defaults),
- String);
-
- // 4. Let dateFormat be ? Construct(%DateTimeFormat%, « locales, options »).
- Handle<JSObject> date_format;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, date_format,
- CachedOrNewService(isolate, factory->NewStringFromAsciiChecked(service),
- locales, options, internal_options),
- String);
-
- // 5. Return FormatDateTime(dateFormat, x).
- return DateFormat::FormatDateTime(isolate, date_format, x);
-}
-
-icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
- Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
- Handle<JSObject> resolved) {
- icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
- DCHECK(!icu_locale.isBogus());
-
- icu::DecimalFormat* number_format =
- CreateICUNumberFormat(isolate, icu_locale, options);
- if (!number_format) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- number_format =
- CreateICUNumberFormat(isolate, no_extension_locale, options);
-
- if (!number_format) {
- FATAL("Failed to create ICU number format, are ICU data files missing?");
- }
-
- // Set resolved settings (pattern, numbering system).
- SetResolvedNumberSettings(isolate, no_extension_locale, number_format,
- resolved);
- } else {
- SetResolvedNumberSettings(isolate, icu_locale, number_format, resolved);
- }
-
- CHECK_NOT_NULL(number_format);
- return number_format;
-}
-
-icu::DecimalFormat* NumberFormat::UnpackNumberFormat(Handle<JSObject> obj) {
- return reinterpret_cast<icu::DecimalFormat*>(
- obj->GetEmbedderField(NumberFormat::kDecimalFormatIndex));
-}
-
-void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
- delete reinterpret_cast<icu::DecimalFormat*>(data.GetInternalField(0));
- GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
-}
-
-icu::BreakIterator* V8BreakIterator::InitializeBreakIterator(
- Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
- Handle<JSObject> resolved) {
- icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
- DCHECK(!icu_locale.isBogus());
-
- icu::BreakIterator* break_iterator =
- CreateICUBreakIterator(isolate, icu_locale, options);
- if (!break_iterator) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- break_iterator =
- CreateICUBreakIterator(isolate, no_extension_locale, options);
-
- if (!break_iterator) {
- FATAL("Failed to create ICU break iterator, are ICU data files missing?");
- }
-
- // Set resolved settings (locale).
- SetResolvedBreakIteratorSettings(isolate, no_extension_locale,
- break_iterator, resolved);
- } else {
- SetResolvedBreakIteratorSettings(isolate, icu_locale, break_iterator,
- resolved);
- }
-
- CHECK_NOT_NULL(break_iterator);
- return break_iterator;
-}
-
-icu::BreakIterator* V8BreakIterator::UnpackBreakIterator(Handle<JSObject> obj) {
- return reinterpret_cast<icu::BreakIterator*>(
- obj->GetEmbedderField(V8BreakIterator::kBreakIteratorIndex));
-}
-
-void V8BreakIterator::DeleteBreakIterator(
- const v8::WeakCallbackInfo<void>& data) {
- delete reinterpret_cast<icu::BreakIterator*>(data.GetInternalField(0));
- delete reinterpret_cast<icu::UnicodeString*>(data.GetInternalField(1));
- GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
-}
-
-void V8BreakIterator::AdoptText(Isolate* isolate,
- Handle<JSObject> break_iterator_holder,
- Handle<String> text) {
- icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
- CHECK_NOT_NULL(break_iterator);
-
- icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
- break_iterator_holder->GetEmbedderField(
- V8BreakIterator::kUnicodeStringIndex));
- delete u_text;
-
- int length = text->length();
- text = String::Flatten(isolate, text);
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = text->GetFlatContent();
- std::unique_ptr<uc16[]> sap;
- const UChar* text_value = GetUCharBufferFromFlat(flat, &sap, length);
- u_text = new icu::UnicodeString(text_value, length);
- break_iterator_holder->SetEmbedderField(V8BreakIterator::kUnicodeStringIndex,
- reinterpret_cast<Smi*>(u_text));
-
- break_iterator->setText(*u_text);
-}
MaybeHandle<String> Intl::ToString(Isolate* isolate,
const icu::UnicodeString& string) {
@@ -902,12 +159,18 @@ void Intl::AddElement(Isolate* isolate, Handle<JSArray> array, int index,
JSObject::AddProperty(isolate, element, additional_property_name,
additional_property_value, NONE);
}
+
+namespace {
+
// Build the shortened locale; eg, convert xx_Yyyy_ZZ to xx_ZZ.
-bool Intl::RemoveLocaleScriptTag(const std::string& icu_locale,
- std::string* locale_less_script) {
+//
+// If locale has a script tag then return true and the locale without the
+// script else return false and an empty string.
+bool RemoveLocaleScriptTag(const std::string& icu_locale,
+ std::string* locale_less_script) {
icu::Locale new_locale = icu::Locale::createCanonical(icu_locale.c_str());
const char* icu_script = new_locale.getScript();
- if (icu_script == NULL || strlen(icu_script) == 0) {
+ if (icu_script == nullptr || strlen(icu_script) == 0) {
*locale_less_script = std::string();
return false;
}
@@ -915,134 +178,33 @@ bool Intl::RemoveLocaleScriptTag(const std::string& icu_locale,
const char* icu_language = new_locale.getLanguage();
const char* icu_country = new_locale.getCountry();
icu::Locale short_locale = icu::Locale(icu_language, icu_country);
- const char* icu_name = short_locale.getName();
- *locale_less_script = std::string(icu_name);
+ *locale_less_script = short_locale.getName();
return true;
}
-namespace {
-
-Maybe<bool> IsPropertyUndefined(Isolate* isolate, Handle<JSObject> options,
- const char* property) {
- Factory* factory = isolate->factory();
- // i. Let prop be the property name.
- // ii. Let value be ? Get(options, prop).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetPropertyOrElement(
- isolate, options, factory->NewStringFromAsciiChecked(property)),
- Nothing<bool>());
- return Just(value->IsUndefined(isolate));
-}
-
} // namespace
-// ecma-402/#sec-todatetimeoptions
-MaybeHandle<JSObject> DateFormat::ToDateTimeOptions(
- Isolate* isolate, Handle<Object> input_options, const char* required,
- const char* defaults) {
- Factory* factory = isolate->factory();
- // 1. If options is undefined, let options be null; otherwise let options be ?
- // ToObject(options).
- Handle<JSObject> options;
- if (input_options->IsUndefined(isolate)) {
- options = factory->NewJSObjectWithNullProto();
- } else {
- Handle<JSReceiver> options_obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
- Object::ToObject(isolate, input_options),
- JSObject);
- // 2. Let options be ObjectCreate(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
- JSObject::ObjectCreate(isolate, options_obj),
- JSObject);
- }
-
- // 3. Let needDefaults be true.
- bool needs_default = true;
-
- bool required_is_any = strcmp(required, "any") == 0;
- // 4. If required is "date" or "any", then
- if (required_is_any || (strcmp(required, "date") == 0)) {
- // a. For each of the property names "weekday", "year", "month", "day", do
- for (auto& prop : {"weekday", "year", "month", "day"}) {
- // i. Let prop be the property name.
- // ii. Let value be ? Get(options, prop)
- Maybe<bool> maybe_undefined = IsPropertyUndefined(isolate, options, prop);
- MAYBE_RETURN(maybe_undefined, Handle<JSObject>());
- // iii. If value is not undefined, let needDefaults be false.
- if (!maybe_undefined.FromJust()) {
- needs_default = false;
- }
- }
- }
-
- // 5. If required is "time" or "any", then
- if (required_is_any || (strcmp(required, "time") == 0)) {
- // a. For each of the property names "hour", "minute", "second", do
- for (auto& prop : {"hour", "minute", "second"}) {
- // i. Let prop be the property name.
- // ii. Let value be ? Get(options, prop)
- Maybe<bool> maybe_undefined = IsPropertyUndefined(isolate, options, prop);
- MAYBE_RETURN(maybe_undefined, Handle<JSObject>());
- // iii. If value is not undefined, let needDefaults be false.
- if (!maybe_undefined.FromJust()) {
- needs_default = false;
- }
- }
- }
-
- // 6. If needDefaults is true and defaults is either "date" or "all", then
- if (needs_default) {
- bool default_is_all = strcmp(defaults, "all") == 0;
- if (default_is_all || (strcmp(defaults, "date") == 0)) {
- // a. For each of the property names "year", "month", "day", do
- // i. Perform ? CreateDataPropertyOrThrow(options, prop, "numeric").
- for (auto& prop : {"year", "month", "day"}) {
- MAYBE_RETURN(
- JSReceiver::CreateDataProperty(
- isolate, options, factory->NewStringFromAsciiChecked(prop),
- factory->numeric_string(), kThrowOnError),
- Handle<JSObject>());
- }
- }
- // 7. If needDefaults is true and defaults is either "time" or "all", then
- if (default_is_all || (strcmp(defaults, "time") == 0)) {
- // a. For each of the property names "hour", "minute", "second", do
- // i. Perform ? CreateDataPropertyOrThrow(options, prop, "numeric").
- for (auto& prop : {"hour", "minute", "second"}) {
- MAYBE_RETURN(
- JSReceiver::CreateDataProperty(
- isolate, options, factory->NewStringFromAsciiChecked(prop),
- factory->numeric_string(), kThrowOnError),
- Handle<JSObject>());
- }
- }
- }
- // 8. Return options.
- return options;
-}
-
-std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
+std::set<std::string> Intl::GetAvailableLocales(const ICUService service) {
const icu::Locale* icu_available_locales = nullptr;
int32_t count = 0;
std::set<std::string> locales;
switch (service) {
- case IcuService::kBreakIterator:
+ case ICUService::kBreakIterator:
+ case ICUService::kSegmenter:
icu_available_locales = icu::BreakIterator::getAvailableLocales(count);
break;
- case IcuService::kCollator:
+ case ICUService::kCollator:
icu_available_locales = icu::Collator::getAvailableLocales(count);
break;
- case IcuService::kDateFormat:
+ case ICUService::kRelativeDateTimeFormatter:
+ case ICUService::kDateFormat:
icu_available_locales = icu::DateFormat::getAvailableLocales(count);
break;
- case IcuService::kNumberFormat:
+ case ICUService::kNumberFormat:
icu_available_locales = icu::NumberFormat::getAvailableLocales(count);
break;
- case IcuService::kPluralRules:
+ case ICUService::kPluralRules:
// TODO(littledan): For PluralRules, filter out locales that
// don't support PluralRules.
// PluralRules is missing an appropriate getAvailableLocales method,
@@ -1050,44 +212,7 @@ std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
// https://ssl.icu-project.org/trac/ticket/12756
icu_available_locales = icu::Locale::getAvailableLocales(count);
break;
- case IcuService::kResourceBundle: {
- UErrorCode status = U_ZERO_ERROR;
- UEnumeration* en = ures_openAvailableLocales(nullptr, &status);
- int32_t length = 0;
- const char* locale_str = uenum_next(en, &length, &status);
- while (U_SUCCESS(status) && (locale_str != nullptr)) {
- std::string locale(locale_str, length);
- std::replace(locale.begin(), locale.end(), '_', '-');
- locales.insert(locale);
- std::string shortened_locale;
- if (Intl::RemoveLocaleScriptTag(locale_str, &shortened_locale)) {
- std::replace(shortened_locale.begin(), shortened_locale.end(), '_',
- '-');
- locales.insert(shortened_locale);
- }
- locale_str = uenum_next(en, &length, &status);
- }
- uenum_close(en);
- return locales;
- }
- case IcuService::kRelativeDateTimeFormatter: {
- // ICU RelativeDateTimeFormatter does not provide a getAvailableLocales()
- // interface, because RelativeDateTimeFormatter depends on
- // 1. NumberFormat and 2. ResourceBundle, return the
- // intersection of these two set.
- // ICU FR at https://unicode-org.atlassian.net/browse/ICU-20009
- // TODO(ftang): change to call ICU's getAvailableLocales() after it is
- // added.
- std::set<std::string> number_format_set(
- Intl::GetAvailableLocales(IcuService::kNumberFormat));
- std::set<std::string> resource_bundle_set(
- Intl::GetAvailableLocales(IcuService::kResourceBundle));
- set_intersection(resource_bundle_set.begin(), resource_bundle_set.end(),
- number_format_set.begin(), number_format_set.end(),
- std::inserter(locales, locales.begin()));
- return locales;
- }
- case IcuService::kListFormatter: {
+ case ICUService::kListFormatter: {
// TODO(ftang): for now just use
// icu::Locale::getAvailableLocales(count) until we migrate to
// Intl::GetAvailableLocales().
@@ -1114,7 +239,7 @@ std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
locales.insert(locale);
std::string shortened_locale;
- if (Intl::RemoveLocaleScriptTag(icu_name, &shortened_locale)) {
+ if (RemoveLocaleScriptTag(icu_name, &shortened_locale)) {
std::replace(shortened_locale.begin(), shortened_locale.end(), '_', '-');
locales.insert(shortened_locale);
}
@@ -1123,30 +248,60 @@ std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
return locales;
}
-IcuService Intl::StringToIcuService(Handle<String> service) {
- if (service->IsUtf8EqualTo(CStrVector("collator"))) {
- return IcuService::kCollator;
- } else if (service->IsUtf8EqualTo(CStrVector("numberformat"))) {
- return IcuService::kNumberFormat;
- } else if (service->IsUtf8EqualTo(CStrVector("dateformat"))) {
- return IcuService::kDateFormat;
- } else if (service->IsUtf8EqualTo(CStrVector("breakiterator"))) {
- return IcuService::kBreakIterator;
- } else if (service->IsUtf8EqualTo(CStrVector("pluralrules"))) {
- return IcuService::kPluralRules;
- } else if (service->IsUtf8EqualTo(CStrVector("relativetimeformat"))) {
- return IcuService::kRelativeDateTimeFormatter;
- } else if (service->IsUtf8EqualTo(CStrVector("listformat"))) {
- return IcuService::kListFormatter;
+namespace {
+
+// TODO(gsathya): Remove this once we port ResolveLocale to C++.
+ICUService StringToICUService(Handle<String> service) {
+ std::unique_ptr<char[]> service_cstr = service->ToCString();
+ if (strcmp(service_cstr.get(), "collator") == 0) {
+ return ICUService::kCollator;
+ } else if (strcmp(service_cstr.get(), "numberformat") == 0) {
+ return ICUService::kNumberFormat;
+ } else if (strcmp(service_cstr.get(), "dateformat") == 0) {
+ return ICUService::kDateFormat;
+ } else if (strcmp(service_cstr.get(), "breakiterator") == 0) {
+ return ICUService::kBreakIterator;
+ } else if (strcmp(service_cstr.get(), "pluralrules") == 0) {
+ return ICUService::kPluralRules;
+ } else if (strcmp(service_cstr.get(), "relativetimeformat") == 0) {
+ return ICUService::kRelativeDateTimeFormatter;
+ } else if (strcmp(service_cstr.get(), "listformat") == 0) {
+ return ICUService::kListFormatter;
+ } else if (service->IsUtf8EqualTo(CStrVector("segmenter"))) {
+ return ICUService::kSegmenter;
+ }
+ UNREACHABLE();
+}
+
+const char* ICUServiceToString(ICUService service) {
+ switch (service) {
+ case ICUService::kCollator:
+ return "Intl.Collator";
+ case ICUService::kNumberFormat:
+ return "Intl.NumberFormat";
+ case ICUService::kDateFormat:
+ return "Intl.DateFormat";
+ case ICUService::kBreakIterator:
+ return "Intl.v8BreakIterator";
+ case ICUService::kPluralRules:
+ return "Intl.PluralRules";
+ case ICUService::kRelativeDateTimeFormatter:
+ return "Intl.RelativeTimeFormat";
+ case ICUService::kListFormatter:
+ return "Intl.kListFormat";
+ case ICUService::kSegmenter:
+ return "Intl.kSegmenter";
}
UNREACHABLE();
}
+} // namespace
+
V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> Intl::AvailableLocalesOf(
Isolate* isolate, Handle<String> service) {
Factory* factory = isolate->factory();
std::set<std::string> results =
- Intl::GetAvailableLocales(StringToIcuService(service));
+ Intl::GetAvailableLocales(StringToICUService(service));
Handle<JSObject> locales = factory->NewJSObjectWithNullProto();
int32_t i = 0;
@@ -1196,24 +351,11 @@ bool Intl::IsObjectOfType(Isolate* isolate, Handle<Object> input,
return type == expected_type;
}
-namespace {
-
-// In ECMA 402 v1, Intl constructors supported a mode of operation
-// where calling them with an existing object as a receiver would
-// transform the receiver into the relevant Intl instance with all
-// internal slots. In ECMA 402 v2, this capability was removed, to
-// avoid adding internal slots on existing objects. In ECMA 402 v3,
-// the capability was re-added as "normative optional" in a mode
-// which chains the underlying Intl instance on any object, when the
-// constructor is called
-//
// See ecma402/#legacy-constructor.
-MaybeHandle<Object> LegacyUnwrapReceiver(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSFunction> constructor,
- Intl::Type type) {
- bool has_initialized_slot = Intl::IsObjectOfType(isolate, receiver, type);
-
+MaybeHandle<Object> Intl::LegacyUnwrapReceiver(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSFunction> constructor,
+ bool has_initialized_slot) {
Handle<Object> obj_is_instance_of;
ASSIGN_RETURN_ON_EXCEPTION(isolate, obj_is_instance_of,
Object::InstanceOf(isolate, receiver, constructor),
@@ -1236,94 +378,9 @@ MaybeHandle<Object> LegacyUnwrapReceiver(Isolate* isolate,
return receiver;
}
-} // namespace
-
-MaybeHandle<JSObject> Intl::UnwrapReceiver(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSFunction> constructor,
- Intl::Type type,
- Handle<String> method_name,
- bool check_legacy_constructor) {
- DCHECK(type == Intl::Type::kCollator || type == Intl::Type::kNumberFormat ||
- type == Intl::Type::kDateTimeFormat ||
- type == Intl::Type::kBreakIterator);
- Handle<Object> new_receiver = receiver;
- if (check_legacy_constructor) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, new_receiver,
- LegacyUnwrapReceiver(isolate, receiver, constructor, type), JSObject);
- }
-
- // Collator has been ported to use regular instance types. We
- // shouldn't be using Intl::IsObjectOfType anymore.
- if (type == Intl::Type::kCollator) {
- if (!receiver->IsJSCollator()) {
- // 3. a. Throw a TypeError exception.
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- method_name, receiver),
- JSObject);
- }
- return Handle<JSCollator>::cast(receiver);
- }
-
- DCHECK_NE(type, Intl::Type::kCollator);
- // 3. If Type(new_receiver) is not Object or nf does not have an
- // [[Initialized...]] internal slot, then
- if (!Intl::IsObjectOfType(isolate, new_receiver, type)) {
- // 3. a. Throw a TypeError exception.
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- method_name, receiver),
- JSObject);
- }
-
- // The above IsObjectOfType returns true only for JSObjects, which
- // makes this cast safe.
- return Handle<JSObject>::cast(new_receiver);
-}
-
-MaybeHandle<JSObject> NumberFormat::Unwrap(Isolate* isolate,
- Handle<JSReceiver> receiver,
- const char* method_name) {
- Handle<Context> native_context =
- Handle<Context>(isolate->context()->native_context(), isolate);
- Handle<JSFunction> constructor = Handle<JSFunction>(
- JSFunction::cast(native_context->intl_number_format_function()), isolate);
- Handle<String> method_name_str =
- isolate->factory()->NewStringFromAsciiChecked(method_name);
-
- return Intl::UnwrapReceiver(isolate, receiver, constructor,
- Intl::Type::kNumberFormat, method_name_str, true);
-}
-
-MaybeHandle<String> NumberFormat::FormatNumber(
- Isolate* isolate, Handle<JSObject> number_format_holder, double value) {
- icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(number_format_holder);
- CHECK_NOT_NULL(number_format);
-
- icu::UnicodeString result;
- number_format->format(value, result);
-
- return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
-}
-
-void Intl::DefineWEProperty(Isolate* isolate, Handle<JSObject> target,
- Handle<Name> key, Handle<Object> value) {
- PropertyDescriptor desc;
- desc.set_writable(true);
- desc.set_enumerable(true);
- desc.set_value(value);
- Maybe<bool> success =
- JSReceiver::DefineOwnProperty(isolate, target, key, &desc, kDontThrow);
- DCHECK(success.IsJust() && success.FromJust());
- USE(success);
-}
-
namespace {
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
// Define general regexp macros.
// Note "(?:" means the regexp group a non-capture group.
#define REGEX_ALPHA "[a-z]"
@@ -1435,6 +492,7 @@ icu::RegexMatcher* GetLanguageVariantRegexMatcher(Isolate* isolate) {
}
return language_variant_regexp_matcher;
}
+#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
} // anonymous namespace
@@ -1560,6 +618,7 @@ char AsciiToLower(char c) {
return c | (1 << 5);
}
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
/**
* Check the structural Validity of the language tag per ECMA 402 6.2.2:
* - Well-formed per RFC 5646 2.1
@@ -1571,7 +630,7 @@ char AsciiToLower(char c) {
* primary/extended language, script, region, variant are not checked
* against the IANA language subtag registry.
*
- * ICU is too permissible and lets invalid tags, like
+ * ICU 62 or earlier is too permissible and lets invalid tags, like
* hant-cmn-cn, through.
*
* Returns false if the language tag is invalid.
@@ -1612,7 +671,7 @@ bool IsStructurallyValidLanguageTag(Isolate* isolate,
// is not valid and would fail LANGUAGE_TAG_RE test.
size_t pos = 0;
std::vector<std::string> parts;
- while ((pos = locale.find("-")) != std::string::npos) {
+ while ((pos = locale.find('-')) != std::string::npos) {
std::string token = locale.substr(0, pos);
parts.push_back(token);
locale = locale.substr(pos + 1);
@@ -1662,6 +721,7 @@ bool IsStructurallyValidLanguageTag(Isolate* isolate,
return true;
}
+#endif // USE_CHROMIUM_ICU == 0 || U_ICU_VERSION_MAJOR_NUM < 63
bool IsLowerAscii(char c) { return c >= 'a' && c < 'z'; }
@@ -1713,6 +773,14 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
}
std::string locale(locale_str->ToCString().get());
+ if (locale.length() == 0 ||
+ !String::IsAscii(locale.data(), static_cast<int>(locale.length()))) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
+ Nothing<std::string>());
+ }
+
// Optimize for the most common case: a 2-letter language code in the
// canonical form/lowercase that is not one of the deprecated codes
// (in, iw, ji, jw). Don't check for ~70 of 3-letter deprecated language
@@ -1726,12 +794,15 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
// Because per BCP 47 2.1.1 language tags are case-insensitive, lowercase
// the input before any more check.
std::transform(locale.begin(), locale.end(), locale.begin(), AsciiToLower);
+
+#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
if (!IsStructurallyValidLanguageTag(isolate, locale)) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
Nothing<std::string>());
}
+#endif
// ICU maps a few grandfathered tags to what looks like a regular language
// tag even though IANA language tag registry does not have a preferred
@@ -1749,11 +820,18 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
// https://unicode-org.atlassian.net/browse/ICU-13417
UErrorCode error = U_ZERO_ERROR;
char icu_result[ULOC_FULLNAME_CAPACITY];
+ // uloc_forLanguageTag checks the structrual validity. If the input BCP47
+ // language tag is parsed all the way to the end, it indicates that the input
+ // is structurally valid. Due to a couple of bugs, we can't use it
+ // without Chromium patches or ICU 62 or earlier.
+ int parsed_length;
uloc_forLanguageTag(locale.c_str(), icu_result, ULOC_FULLNAME_CAPACITY,
- nullptr, &error);
- if (U_FAILURE(error) || error == U_STRING_NOT_TERMINATED_WARNING) {
- // TODO(jshin): This should not happen because the structural validity
- // is already checked. If that's the case, remove this.
+ &parsed_length, &error);
+ if (U_FAILURE(error) ||
+#if USE_CHROMIUM_ICU == 1 || U_ICU_VERSION_MAJOR_NUM >= 63
+ static_cast<size_t>(parsed_length) < locale.length() ||
+#endif
+ error == U_STRING_NOT_TERMINATED_WARNING) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
@@ -1849,120 +927,6 @@ Maybe<std::vector<std::string>> Intl::CanonicalizeLocaleList(
return Just(seen);
}
-// ecma-402/#sec-currencydigits
-Handle<Smi> Intl::CurrencyDigits(Isolate* isolate, Handle<String> currency) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8::String::Value currency_string(v8_isolate, v8::Utils::ToLocal(currency));
- CHECK_NOT_NULL(*currency_string);
-
- DisallowHeapAllocation no_gc;
- UErrorCode status = U_ZERO_ERROR;
- uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
- reinterpret_cast<const UChar*>(*currency_string), &status);
- // For missing currency codes, default to the most common, 2
- if (U_FAILURE(status)) fraction_digits = 2;
- return Handle<Smi>(Smi::FromInt(fraction_digits), isolate);
-}
-
-MaybeHandle<JSObject> Intl::CreateNumberFormat(Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved) {
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_number_format_function(), isolate);
-
- Handle<JSObject> local_object;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, local_object,
- JSObject::New(constructor, constructor), JSObject);
-
- // Set number formatter as embedder field of the resulting JS object.
- icu::DecimalFormat* number_format =
- NumberFormat::InitializeNumberFormat(isolate, locale, options, resolved);
-
- CHECK_NOT_NULL(number_format);
-
- local_object->SetEmbedderField(NumberFormat::kDecimalFormatIndex,
- reinterpret_cast<Smi*>(number_format));
-
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
- NumberFormat::DeleteNumberFormat,
- WeakCallbackType::kInternalFields);
- return local_object;
-}
-
-/**
- * Parses Unicode extension into key - value map.
- * Returns empty object if the extension string is invalid.
- * We are not concerned with the validity of the values at this point.
- * 'attribute' in RFC 6047 is not supported. Keys without explicit
- * values are assigned UNDEFINED.
- * TODO(jshin): Fix the handling of 'attribute' (in RFC 6047, but none
- * has been defined so that it's not used) and boolean keys without
- * an explicit value.
- */
-void Intl::ParseExtension(Isolate* isolate, const std::string& extension,
- std::map<std::string, std::string>& out) {
- if (extension.compare(0, 3, "-u-") != 0) return;
-
- // Key is {2}alphanum, value is {3,8}alphanum.
- // Some keys may not have explicit values (booleans).
- std::string key;
- std::string value;
- // Skip the "-u-".
- size_t start = 3;
- size_t end;
- do {
- end = extension.find("-", start);
- size_t length =
- (end == std::string::npos) ? extension.length() - start : end - start;
- std::string element = extension.substr(start, length);
- // Key is {2}alphanum
- if (length == 2) {
- if (!key.empty()) {
- out.insert(std::pair<std::string, std::string>(key, value));
- value.clear();
- }
- key = element;
- // value is {3,8}alphanum.
- } else if (length >= 3 && length <= 8 && !key.empty()) {
- value = value.empty() ? element : (value + "-" + element);
- } else {
- return;
- }
- start = end + 1;
- } while (end != std::string::npos);
- if (!key.empty()) out.insert(std::pair<std::string, std::string>(key, value));
-}
-
-namespace {
-
-bool IsAToZ(char ch) {
- return IsInRange(AsciiAlphaToLower(ch), 'a', 'z');
-}
-
-} // namespace
-
-// Verifies that the input is a well-formed ISO 4217 currency code.
-// ecma402/#sec-currency-codes
-bool Intl::IsWellFormedCurrencyCode(Isolate* isolate, Handle<String> currency) {
- // 2. If the number of elements in normalized is not 3, return false.
- if (currency->length() != 3) return false;
-
- currency = String::Flatten(isolate, currency);
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = currency->GetFlatContent();
-
- // 1. Let normalized be the result of mapping currency to upper case as
- // described in 6.1. 3. If normalized contains any character that is not in
- // the range "A" to "Z" (U+0041 to U+005A), return false. 4. Return true.
- // Don't uppercase to test. It could convert invalid code into a valid one.
- // For example \u00DFP (Eszett+P) becomes SSP.
- return (IsAToZ(flat.Get(0)) && IsAToZ(flat.Get(1)) && IsAToZ(flat.Get(2)));
- }
-}
-
// ecma402 #sup-string.prototype.tolocalelowercase
// ecma402 #sup-string.prototype.tolocaleuppercase
MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
@@ -1976,7 +940,7 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
std::string requested_locale = requested_locales.size() == 0
? Intl::DefaultLocale(isolate)
: requested_locales[0];
- size_t dash = requested_locale.find("-");
+ size_t dash = requested_locale.find('-');
if (dash != std::string::npos) {
requested_locale = requested_locale.substr(0, dash);
}
@@ -2069,22 +1033,26 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
factory->NewStringFromStaticChars("numberformat"),
locales, options, factory->undefined_value()),
String);
- DCHECK(
- Intl::IsObjectOfType(isolate, number_format_holder, Intl::kNumberFormat));
+ DCHECK(number_format_holder->IsJSNumberFormat());
+ Handle<JSNumberFormat> number_format = Handle<JSNumberFormat>(
+ JSNumberFormat::cast(*number_format_holder), isolate);
+
Handle<Object> number_obj;
ASSIGN_RETURN_ON_EXCEPTION(isolate, number_obj,
Object::ToNumber(isolate, num), String);
// Spec treats -0 and +0 as 0.
double number = number_obj->Number() + 0;
+
// Return FormatNumber(numberFormat, x).
- return NumberFormat::FormatNumber(isolate, number_format_holder, number);
+ return JSNumberFormat::FormatNumber(isolate, number_format, number);
}
+namespace {
+
// ecma402/#sec-defaultnumberoption
-Maybe<int> Intl::DefaultNumberOption(Isolate* isolate, Handle<Object> value,
- int min, int max, int fallback,
- Handle<String> property) {
+Maybe<int> DefaultNumberOption(Isolate* isolate, Handle<Object> value, int min,
+ int max, int fallback, Handle<String> property) {
// 2. Else, return fallback.
if (value->IsUndefined()) return Just(fallback);
@@ -2114,9 +1082,9 @@ Maybe<int> Intl::DefaultNumberOption(Isolate* isolate, Handle<Object> value,
}
// ecma402/#sec-getnumberoption
-Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- Handle<String> property, int min, int max,
- int fallback) {
+Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ Handle<String> property, int min, int max,
+ int fallback) {
// 1. Let value be ? Get(options, property).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -2127,14 +1095,16 @@ Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
return DefaultNumberOption(isolate, value, min, max, fallback, property);
}
-Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- const char* property, int min, int max,
- int fallback) {
+Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ const char* property, int min, int max,
+ int fallback) {
Handle<String> property_str =
isolate->factory()->NewStringFromAsciiChecked(property);
return GetNumberOption(isolate, options, property_str, min, max, fallback);
}
+} // namespace
+
Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
icu::DecimalFormat* number_format,
Handle<JSReceiver> options,
@@ -2174,7 +1144,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
// 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
Handle<Object> mnsd_obj;
Handle<String> mnsd_str =
- isolate->factory()->NewStringFromStaticChars("minimumSignificantDigits");
+ isolate->factory()->minimumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str),
Nothing<bool>());
@@ -2182,7 +1152,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
// 10. Let mxsd be ? Get(options, "maximumSignificantDigits").
Handle<Object> mxsd_obj;
Handle<String> mxsd_str =
- isolate->factory()->NewStringFromStaticChars("maximumSignificantDigits");
+ isolate->factory()->maximumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str),
Nothing<bool>());
@@ -2228,96 +1198,108 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
namespace {
-// ECMA 402 9.2.2 BestAvailableLocale(availableLocales, locale)
-// https://tc39.github.io/ecma402/#sec-bestavailablelocale
-std::string BestAvailableLocale(std::set<std::string> available_locales,
- std::string locale) {
- const char separator = '-';
-
+// ecma402/#sec-bestavailablelocale
+std::string BestAvailableLocale(const std::set<std::string>& available_locales,
+ const std::string& locale) {
// 1. Let candidate be locale.
+ std::string candidate = locale;
+
// 2. Repeat,
- do {
+ while (true) {
// 2.a. If availableLocales contains an element equal to candidate, return
// candidate.
- if (available_locales.find(locale) != available_locales.end()) {
- return locale;
+ if (available_locales.find(candidate) != available_locales.end()) {
+ return candidate;
}
+
// 2.b. Let pos be the character index of the last occurrence of "-"
// (U+002D) within candidate. If that character does not occur, return
// undefined.
- size_t pos = locale.rfind(separator);
+ size_t pos = candidate.rfind('-');
if (pos == std::string::npos) {
- return "";
+ return std::string();
}
+
// 2.c. If pos ≥ 2 and the character "-" occurs at index pos-2 of candidate,
// decrease pos by 2.
- if (pos >= 2 && locale[pos - 2] == separator) {
+ if (pos >= 2 && candidate[pos - 2] == '-') {
pos -= 2;
}
+
// 2.d. Let candidate be the substring of candidate from position 0,
// inclusive, to position pos, exclusive.
- locale = locale.substr(0, pos);
- } while (true);
+ candidate = candidate.substr(0, pos);
+ }
}
-#define ANY_EXTENSION_REGEXP "-[a-z0-9]{1}-.*"
+// Removes unicode extensions from a given bcp47 language tag.
+// For example, converts 'en-US-u-co-emoji' to 'en-US'.
+std::string RemoveUnicodeExtensions(const std::string& locale) {
+ size_t length = locale.length();
-std::unique_ptr<icu::RegexMatcher> GetAnyExtensionRegexpMatcher() {
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::RegexMatcher> matcher(new icu::RegexMatcher(
- icu::UnicodeString(ANY_EXTENSION_REGEXP, -1, US_INV), 0, status));
- DCHECK(U_SUCCESS(status));
- return matcher;
-}
+ // Privateuse or grandfathered locales have no extension sequences.
+ if ((length > 1) && (locale[1] == '-')) {
+ // Check to make sure that this really is a grandfathered or
+ // privateuse extension. ICU can sometimes mess up the
+ // canonicalization.
+ CHECK(locale[0] == 'x' || locale[0] == 'i');
+ return locale;
+ }
-#undef ANY_EXTENSION_REGEXP
+ size_t unicode_extension_start = locale.find("-u-");
-// ECMA 402 9.2.7 LookupSupportedLocales(availableLocales, requestedLocales)
-// https://tc39.github.io/ecma402/#sec-lookupsupportedlocales
-std::vector<std::string> LookupSupportedLocales(
- std::set<std::string> available_locales,
- std::vector<std::string> requested_locales) {
- std::unique_ptr<icu::RegexMatcher> matcher = GetAnyExtensionRegexpMatcher();
+ // No unicode extensions found.
+ if (unicode_extension_start == std::string::npos) return locale;
+
+ size_t private_extension_start = locale.find("-x-");
+
+ // Unicode extensions found within privateuse subtags don't count.
+ if (private_extension_start != std::string::npos &&
+ private_extension_start < unicode_extension_start) {
+ return locale;
+ }
+
+ const std::string beginning = locale.substr(0, unicode_extension_start);
+ size_t unicode_extension_end = length;
+ DCHECK_GT(length, 2);
+
+ // Find the end of the extension production as per the bcp47 grammar
+ // by looking for '-' followed by 2 chars and then another '-'.
+ for (size_t i = unicode_extension_start + 1; i < length - 2; i++) {
+ if (locale[i] != '-') continue;
+
+ if (locale[i + 2] == '-') {
+ unicode_extension_end = i;
+ break;
+ }
+
+ i += 2;
+ }
+ const std::string end = locale.substr(unicode_extension_end);
+ return beginning + end;
+}
+
+// ecma402/#sec-lookupsupportedlocales
+std::vector<std::string> LookupSupportedLocales(
+ const std::set<std::string>& available_locales,
+ const std::vector<std::string>& requested_locales) {
// 1. Let subset be a new empty List.
std::vector<std::string> subset;
// 2. For each element locale of requestedLocales in List order, do
- for (auto locale : requested_locales) {
- // 2.a. Let noExtensionsLocale be the String value that is locale with all
- // Unicode locale extension sequences removed.
- icu::UnicodeString locale_uni(locale.c_str(), -1, US_INV);
- // TODO(bstell): look at using uloc_forLanguageTag to convert the language
- // tag to locale id
- // TODO(bstell): look at using uloc_getBaseName to just get the name without
- // all the keywords
- matcher->reset(locale_uni);
- UErrorCode status = U_ZERO_ERROR;
- // TODO(bstell): need to determine if this is the correct behavior.
- // This matches the JS implementation but might not match the spec.
- // According to
- // https://tc39.github.io/ecma402/#sec-unicode-locale-extension-sequences:
- //
- // This standard uses the term "Unicode locale extension sequence" for
- // any substring of a language tag that is not part of a private use
- // subtag sequence, starts with a separator "-" and the singleton "u",
- // and includes the maximum sequence of following non-singleton subtags
- // and their preceding "-" separators.
- //
- // According to the spec a locale "en-t-aaa-u-bbb-v-ccc-x-u-ddd", should
- // remove only the "-u-bbb" part, and keep everything else, whereas this
- // regexp matcher would leave only the "en".
- icu::UnicodeString no_extensions_locale_uni =
- matcher->replaceAll("", status);
- DCHECK(U_SUCCESS(status));
- std::string no_extensions_locale;
- no_extensions_locale_uni.toUTF8String(no_extensions_locale);
- // 2.b. Let availableLocale be BestAvailableLocale(availableLocales,
- // noExtensionsLocale).
+ for (const std::string& locale : requested_locales) {
+ // 2. a. Let noExtensionsLocale be the String value that is locale
+ // with all Unicode locale extension sequences removed.
+ std::string no_extension_locale = RemoveUnicodeExtensions(locale);
+
+ // 2. b. Let availableLocale be
+ // BestAvailableLocale(availableLocales, noExtensionsLocale).
std::string available_locale =
- BestAvailableLocale(available_locales, no_extensions_locale);
- // 2.c. If availableLocale is not undefined, append locale to the end of
- // subset.
+ BestAvailableLocale(available_locales, no_extension_locale);
+
+ // 2. c. If availableLocale is not undefined, append locale to the
+ // end of subset.
if (!available_locale.empty()) {
subset.push_back(locale);
}
@@ -2330,8 +1312,8 @@ std::vector<std::string> LookupSupportedLocales(
// ECMA 402 9.2.8 BestFitSupportedLocales(availableLocales, requestedLocales)
// https://tc39.github.io/ecma402/#sec-bestfitsupportedlocales
std::vector<std::string> BestFitSupportedLocales(
- std::set<std::string> available_locales,
- std::vector<std::string> requested_locales) {
+ const std::set<std::string>& available_locales,
+ const std::vector<std::string>& requested_locales) {
return LookupSupportedLocales(available_locales, requested_locales);
}
@@ -2378,26 +1360,28 @@ MaybeHandle<JSObject> CreateReadOnlyArray(Isolate* isolate,
// ECMA 402 9.2.9 SupportedLocales(availableLocales, requestedLocales, options)
// https://tc39.github.io/ecma402/#sec-supportedlocales
MaybeHandle<JSObject> SupportedLocales(
- Isolate* isolate, std::string service,
- std::set<std::string> available_locales,
- std::vector<std::string> requested_locales, Handle<Object> options) {
+ Isolate* isolate, ICUService service,
+ const std::set<std::string>& available_locales,
+ const std::vector<std::string>& requested_locales, Handle<Object> options) {
std::vector<std::string> supported_locales;
- // 1. If options is not undefined, then
- // a. Let options be ? ToObject(options).
- // b. Let matcher be ? GetOption(options, "localeMatcher", "string",
- // « "lookup", "best fit" », "best fit").
// 2. Else, let matcher be "best fit".
MatcherOption matcher = kBestFit;
+
+ // 1. If options is not undefined, then
if (!options->IsUndefined(isolate)) {
+ // 1. a. Let options be ? ToObject(options).
Handle<JSReceiver> options_obj;
ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
Object::ToObject(isolate, options), JSObject);
+
+ // 1. b. Let matcher be ? GetOption(options, "localeMatcher", "string",
+ // « "lookup", "best fit" », "best fit").
std::unique_ptr<char[]> matcher_str = nullptr;
std::vector<const char*> matcher_values = {"lookup", "best fit"};
- Maybe<bool> maybe_found_matcher =
- Intl::GetStringOption(isolate, options_obj, "localeMatcher",
- matcher_values, service.c_str(), &matcher_str);
+ Maybe<bool> maybe_found_matcher = Intl::GetStringOption(
+ isolate, options_obj, "localeMatcher", matcher_values,
+ ICUServiceToString(service), &matcher_str);
MAYBE_RETURN(maybe_found_matcher, MaybeHandle<JSObject>());
if (maybe_found_matcher.FromJust()) {
DCHECK_NOT_NULL(matcher_str.get());
@@ -2440,28 +1424,69 @@ MaybeHandle<JSObject> SupportedLocales(
}
} // namespace
-// ECMA 402 10.2.2 Intl.Collator.supportedLocalesOf
-// https://tc39.github.io/ecma402/#sec-intl.collator.supportedlocalesof
-// of Intl::SupportedLocalesOf thru JS
+// ECMA 402 Intl.*.supportedLocalesOf
MaybeHandle<JSObject> Intl::SupportedLocalesOf(Isolate* isolate,
- Handle<String> service,
- Handle<Object> locales_in,
- Handle<Object> options_in) {
+ ICUService service,
+ Handle<Object> locales,
+ Handle<Object> options) {
// Let availableLocales be %Collator%.[[AvailableLocales]].
- IcuService icu_service = Intl::StringToIcuService(service);
- std::set<std::string> available_locales = GetAvailableLocales(icu_service);
- std::vector<std::string> requested_locales;
+ std::set<std::string> available_locales = GetAvailableLocales(service);
+
// Let requestedLocales be ? CanonicalizeLocaleList(locales).
- bool got_requested_locales =
- CanonicalizeLocaleList(isolate, locales_in, false).To(&requested_locales);
- if (!got_requested_locales) {
- return MaybeHandle<JSObject>();
- }
+ Maybe<std::vector<std::string>> requested_locales =
+ CanonicalizeLocaleList(isolate, locales, false);
+ MAYBE_RETURN(requested_locales, MaybeHandle<JSObject>());
// Return ? SupportedLocales(availableLocales, requestedLocales, options).
- std::string service_str(service->ToCString().get());
- return SupportedLocales(isolate, service_str, available_locales,
- requested_locales, options_in);
+ return SupportedLocales(isolate, service, available_locales,
+ requested_locales.FromJust(), options);
+}
+
+std::map<std::string, std::string> Intl::LookupUnicodeExtensions(
+ const icu::Locale& icu_locale, const std::set<std::string>& relevant_keys) {
+ std::map<std::string, std::string> extensions;
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> keywords(
+ icu_locale.createKeywords(status));
+ if (U_FAILURE(status)) return extensions;
+
+ if (!keywords) return extensions;
+ char value[ULOC_FULLNAME_CAPACITY];
+
+ int32_t length;
+ status = U_ZERO_ERROR;
+ for (const char* keyword = keywords->next(&length, status);
+ keyword != nullptr; keyword = keywords->next(&length, status)) {
+ // Ignore failures in ICU and skip to the next keyword.
+ //
+ // This is fine.™
+ if (U_FAILURE(status)) {
+ status = U_ZERO_ERROR;
+ continue;
+ }
+
+ icu_locale.getKeywordValue(keyword, value, ULOC_FULLNAME_CAPACITY, status);
+
+ // Ignore failures in ICU and skip to the next keyword.
+ //
+ // This is fine.™
+ if (U_FAILURE(status)) {
+ status = U_ZERO_ERROR;
+ continue;
+ }
+
+ const char* bcp47_key = uloc_toUnicodeLocaleKey(keyword);
+
+ // Ignore keywords that we don't recognize - spec allows that.
+ if (bcp47_key && (relevant_keys.find(bcp47_key) != relevant_keys.end())) {
+ const char* bcp47_value = uloc_toUnicodeLocaleType(bcp47_key, value);
+ extensions.insert(
+ std::pair<std::string, std::string>(bcp47_key, bcp47_value));
+ }
+ }
+
+ return extensions;
}
} // namespace internal
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 38d11772a4..fd2842ebbb 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -20,10 +20,7 @@
#include "unicode/uversion.h"
namespace U_ICU_NAMESPACE {
-class BreakIterator;
-class Collator;
class DecimalFormat;
-class PluralRules;
class SimpleDateFormat;
class UnicodeString;
}
@@ -33,191 +30,7 @@ namespace internal {
template <typename T>
class Handle;
-
-class DateFormat {
- public:
- // Create a formatter for the specificied locale and options. Returns the
- // resolved settings for the locale / options.
- static icu::SimpleDateFormat* InitializeDateTimeFormat(
- Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
- Handle<JSObject> resolved);
-
- // Unpacks date format object from corresponding JavaScript object.
- static icu::SimpleDateFormat* UnpackDateFormat(Handle<JSObject> obj);
-
- // Release memory we allocated for the DateFormat once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteDateFormat(const v8::WeakCallbackInfo<void>& data);
-
- // ecma402/#sec-formatdatetime
- // FormatDateTime( dateTimeFormat, x )
- V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatDateTime(
- Isolate* isolate, Handle<JSObject> date_time_format_holder, double x);
-
- // ecma402/#sec-datetime-format-functions
- // DateTime Format Functions
- V8_WARN_UNUSED_RESULT static MaybeHandle<String> DateTimeFormat(
- Isolate* isolate, Handle<JSObject> date_time_format_holder,
- Handle<Object> date);
-
- // The UnwrapDateTimeFormat abstract operation gets the underlying
- // DateTimeFormat operation for various methods which implement ECMA-402 v1
- // semantics for supporting initializing existing Intl objects.
- //
- // ecma402/#sec-unwrapdatetimeformat
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> Unwrap(
- Isolate* isolate, Handle<JSReceiver> receiver, const char* method_name);
-
- // ecma-402/#sec-todatetimeoptions
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ToDateTimeOptions(
- Isolate* isolate, Handle<Object> input_options, const char* required,
- const char* defaults);
-
- V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime(
- Isolate* isolate, Handle<Object> date, Handle<Object> locales,
- Handle<Object> options, const char* required, const char* defaults,
- const char* service);
-
- // Layout description.
-#define DATE_FORMAT_FIELDS(V) \
- V(kSimpleDateFormat, kPointerSize) \
- V(kBoundFormat, kPointerSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, DATE_FORMAT_FIELDS)
-#undef DATE_FORMAT_FIELDS
-
- // ContextSlot defines the context structure for the bound
- // DateTimeFormat.prototype.format function
- enum ContextSlot {
- kDateFormat = Context::MIN_CONTEXT_SLOTS,
-
- kLength
- };
-
- // TODO(ryzokuken): Remove this and use regular accessors once DateFormat is a
- // subclass of JSObject
- //
- // This needs to be consistent with the above Layout Description
- static const int kSimpleDateFormatIndex = 0;
- static const int kBoundFormatIndex = 1;
-
- private:
- DateFormat();
-};
-
-class NumberFormat {
- public:
- // Create a formatter for the specificied locale and options. Returns the
- // resolved settings for the locale / options.
- static icu::DecimalFormat* InitializeNumberFormat(Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved);
-
- // Unpacks number format object from corresponding JavaScript object.
- static icu::DecimalFormat* UnpackNumberFormat(Handle<JSObject> obj);
-
- // Release memory we allocated for the NumberFormat once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data);
-
- // The UnwrapNumberFormat abstract operation gets the underlying
- // NumberFormat operation for various methods which implement
- // ECMA-402 v1 semantics for supporting initializing existing Intl
- // objects.
- //
- // ecma402/#sec-unwrapnumberformat
- static MaybeHandle<JSObject> Unwrap(Isolate* isolate,
- Handle<JSReceiver> receiver,
- const char* method_name);
-
- // ecm402/#sec-formatnumber
- static MaybeHandle<String> FormatNumber(Isolate* isolate,
- Handle<JSObject> number_format_holder,
- double value);
-
- // Layout description.
-#define NUMBER_FORMAT_FIELDS(V) \
- /* Pointer fields. */ \
- V(kDecimalFormat, kPointerSize) \
- V(kBoundFormat, kPointerSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, NUMBER_FORMAT_FIELDS)
-#undef NUMBER_FORMAT_FIELDS
-
- // ContextSlot defines the context structure for the bound
- // NumberFormat.prototype.format function.
- enum ContextSlot {
- // The number format instance that the function holding this
- // context is bound to.
- kNumberFormat = Context::MIN_CONTEXT_SLOTS,
-
- kLength
- };
-
- // TODO(gsathya): Remove this and use regular accessors once
- // NumberFormat is a sub class of JSObject.
- //
- // This needs to be consistent with the above LayoutDescription.
- static const int kDecimalFormatIndex = 0;
- static const int kBoundFormatIndex = 1;
-
- private:
- NumberFormat();
-};
-
-class V8BreakIterator {
- public:
- // Create a BreakIterator for the specificied locale and options. Returns the
- // resolved settings for the locale / options.
- static icu::BreakIterator* InitializeBreakIterator(Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved);
-
- // Unpacks break iterator object from corresponding JavaScript object.
- static icu::BreakIterator* UnpackBreakIterator(Handle<JSObject> obj);
-
- // Release memory we allocated for the BreakIterator once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteBreakIterator(const v8::WeakCallbackInfo<void>& data);
-
- static void AdoptText(Isolate* isolate,
- Handle<JSObject> break_iterator_holder,
- Handle<String> text);
-
- // Layout description.
-#define BREAK_ITERATOR_FIELDS(V) \
- /* Pointer fields. */ \
- V(kBreakIterator, kPointerSize) \
- V(kUnicodeString, kPointerSize) \
- V(kBoundAdoptText, kPointerSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, BREAK_ITERATOR_FIELDS)
-#undef BREAK_ITERATOR_FIELDS
-
- // ContextSlot defines the context structure for the bound
- // v8BreakIterator.prototype.adoptText function
- enum class ContextSlot {
- kV8BreakIterator = Context::MIN_CONTEXT_SLOTS,
-
- kLength
- };
-
- // TODO(ryzokuken): Remove this and use regular accessors once v8BreakIterator
- // is a subclass of JSObject
- //
- // This needs to be consistent with the above Layour Description
- static const int kBreakIteratorIndex = 0;
- static const int kUnicodeStringIndex = 1;
- static const int kBoundAdoptTextIndex = 2;
-
- private:
- V8BreakIterator();
-};
+class JSCollator;
class Intl {
public:
@@ -232,6 +45,11 @@ class Intl {
kTypeCount
};
+ enum class BoundFunctionContextSlot {
+ kBoundFunction = Context::MIN_CONTEXT_SLOTS,
+ kLength
+ };
+
inline static Intl::Type TypeFromInt(int type);
inline static Intl::Type TypeFromSmi(Smi* type);
@@ -243,41 +61,27 @@ class Intl {
static bool IsObjectOfType(Isolate* isolate, Handle<Object> object,
Intl::Type expected_type);
- static IcuService StringToIcuService(Handle<String> service);
-
// Gets the ICU locales for a given service. If there is a locale with a
// script tag then the locales also include a locale without the script; eg,
// pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India) would include
// pa_IN.
- static std::set<std::string> GetAvailableLocales(const IcuService& service);
+ static std::set<std::string> GetAvailableLocales(ICUService service);
+
+ // Get the name of the numbering system from locale.
+ // ICU doesn't expose numbering system in any way, so we have to assume that
+ // for given locale NumberingSystem constructor produces the same digits as
+ // NumberFormat/Calendar would.
+ static std::string GetNumberingSystem(const icu::Locale& icu_locale);
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> AvailableLocalesOf(
Isolate* isolate, Handle<String> service);
- static MaybeHandle<JSObject> SupportedLocalesOf(Isolate* isolate,
- Handle<String> service,
- Handle<Object> locales_in,
- Handle<Object> options_in);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> SupportedLocalesOf(
+ Isolate* isolate, ICUService service, Handle<Object> locales_in,
+ Handle<Object> options_in);
static std::string DefaultLocale(Isolate* isolate);
- static void DefineWEProperty(Isolate* isolate, Handle<JSObject> target,
- Handle<Name> key, Handle<Object> value);
-
- // If locale has a script tag then return true and the locale without the
- // script else return false and an empty string
- static bool RemoveLocaleScriptTag(const std::string& icu_locale,
- std::string* locale_less_script);
-
- // Returns the underlying Intl receiver for various methods which
- // implement ECMA-402 v1 semantics for supporting initializing
- // existing Intl objects.
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> UnwrapReceiver(
- Isolate* isolate, Handle<JSReceiver> receiver,
- Handle<JSFunction> constructor, Intl::Type type,
- Handle<String> method_name /* TODO(gsathya): Make this char const* */,
- bool check_legacy_constructor = false);
-
// The ResolveLocale abstract operation compares a BCP 47 language
// priority list requestedLocales against the locales in
// availableLocales and determines the best available language to
@@ -355,22 +159,10 @@ class Intl {
Isolate* isolate, Handle<Object> locales,
bool only_return_one_result = false);
- // ecma-402/#sec-currencydigits
- // The currency is expected to an all upper case string value.
- static Handle<Smi> CurrencyDigits(Isolate* isolate, Handle<String> currency);
-
- // TODO(ftang): Remove this and use ICU to the conversion in the future
- static void ParseExtension(Isolate* isolate, const std::string& extension,
- std::map<std::string, std::string>& out);
-
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> CreateNumberFormat(
Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
Handle<JSObject> resolved);
- // ecma402/#sec-iswellformedcurrencycode
- static bool IsWellFormedCurrencyCode(Isolate* isolate,
- Handle<String> currency);
-
// For locale sensitive functions
V8_WARN_UNUSED_RESULT static MaybeHandle<String> StringLocaleConvertCase(
Isolate* isolate, Handle<String> s, bool is_upper,
@@ -389,19 +181,6 @@ class Intl {
Isolate* isolate, Handle<Object> num, Handle<Object> locales,
Handle<Object> options);
- // ecma402/#sec-defaultnumberoption
- V8_WARN_UNUSED_RESULT static Maybe<int> DefaultNumberOption(
- Isolate* isolate, Handle<Object> value, int min, int max, int fallback,
- Handle<String> property);
-
- // ecma402/#sec-getnumberoption
- V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
- Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
- int min, int max, int fallback);
- V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* property,
- int min, int max, int fallback);
-
// ecma402/#sec-setnfdigitoptions
V8_WARN_UNUSED_RESULT static Maybe<bool> SetNumberFormatDigitOptions(
Isolate* isolate, icu::DecimalFormat* number_format,
@@ -434,6 +213,29 @@ class Intl {
Handle<String> field_type_string, Handle<String> value,
Handle<String> additional_property_name,
Handle<String> additional_property_value);
+
+ // A helper function to help handle Unicode Extensions in locale.
+ static std::map<std::string, std::string> LookupUnicodeExtensions(
+ const icu::Locale& icu_locale, const std::set<std::string>& relevant_keys);
+
+ // In ECMA 402 v1, Intl constructors supported a mode of operation
+ // where calling them with an existing object as a receiver would
+ // transform the receiver into the relevant Intl instance with all
+ // internal slots. In ECMA 402 v2, this capability was removed, to
+ // avoid adding internal slots on existing objects. In ECMA 402 v3,
+ // the capability was re-added as "normative optional" in a mode
+ // which chains the underlying Intl instance on any object, when the
+ // constructor is called
+ //
+ // See ecma402/#legacy-constructor.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> LegacyUnwrapReceiver(
+ Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<JSFunction> constructor, bool has_initialized_slot);
+
+ // A factory method to got cached objects.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> CachedOrNewService(
+ Isolate* isolate, Handle<String> service, Handle<Object> locales,
+ Handle<Object> options, Handle<Object> internal_options);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 43bc294d04..7bc01a8b8a 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -20,6 +20,14 @@ CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
CAST_ACCESSOR(JSTypedArray)
+size_t JSArrayBuffer::byte_length() const {
+ return READ_UINTPTR_FIELD(this, kByteLengthOffset);
+}
+
+void JSArrayBuffer::set_byte_length(size_t value) {
+ WRITE_UINTPTR_FIELD(this, kByteLengthOffset, value);
+}
+
void* JSArrayBuffer::backing_store() const {
intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
return reinterpret_cast<void*>(ptr);
@@ -30,8 +38,6 @@ void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr);
}
-ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
-
size_t JSArrayBuffer::allocation_length() const {
if (backing_store() == nullptr) {
return 0;
@@ -44,7 +50,7 @@ size_t JSArrayBuffer::allocation_length() const {
DCHECK_NOT_NULL(data);
return data->allocation_length;
}
- return byte_length()->Number();
+ return byte_length();
}
void* JSArrayBuffer::allocation_base() const {
@@ -63,13 +69,17 @@ void* JSArrayBuffer::allocation_base() const {
}
bool JSArrayBuffer::is_wasm_memory() const {
- bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
+ bool const is_wasm_memory = IsWasmMemoryBit::decode(bit_field());
DCHECK_EQ(is_wasm_memory,
GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
backing_store()));
return is_wasm_memory;
}
+void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
+ set_bit_field(IsWasmMemoryBit::update(bit_field(), is_wasm_memory));
+}
+
void JSArrayBuffer::set_bit_field(uint32_t bits) {
if (kInt32Size != kPointerSize) {
#if V8_TARGET_LITTLE_ENDIAN
@@ -85,76 +95,46 @@ uint32_t JSArrayBuffer::bit_field() const {
return READ_UINT32_FIELD(this, kBitFieldOffset);
}
-bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
-
-void JSArrayBuffer::set_is_external(bool value) {
- set_bit_field(IsExternal::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::is_neuterable() {
- return IsNeuterable::decode(bit_field());
-}
-
-void JSArrayBuffer::set_is_neuterable(bool value) {
- set_bit_field(IsNeuterable::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::was_neutered() { return WasNeutered::decode(bit_field()); }
-
-void JSArrayBuffer::set_was_neutered(bool value) {
- set_bit_field(WasNeutered::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::is_shared() { return IsShared::decode(bit_field()); }
-
-void JSArrayBuffer::set_is_shared(bool value) {
- set_bit_field(IsShared::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::is_growable() { return IsGrowable::decode(bit_field()); }
-
-void JSArrayBuffer::set_is_growable(bool value) {
- set_bit_field(IsGrowable::update(bit_field(), value));
-}
+// |bit_field| fields.
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_external,
+ JSArrayBuffer::IsExternalBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_neuterable,
+ JSArrayBuffer::IsNeuterableBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, was_neutered,
+ JSArrayBuffer::WasNeuteredBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
+ JSArrayBuffer::IsSharedBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_growable,
+ JSArrayBuffer::IsGrowableBit)
-Object* JSArrayBufferView::byte_offset() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kByteOffsetOffset));
+size_t JSArrayBufferView::byte_offset() const {
+ return READ_UINTPTR_FIELD(this, kByteOffsetOffset);
}
-void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kByteOffsetOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kByteOffsetOffset, value, mode);
+void JSArrayBufferView::set_byte_offset(size_t value) {
+ WRITE_UINTPTR_FIELD(this, kByteOffsetOffset, value);
}
-Object* JSArrayBufferView::byte_length() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kByteLengthOffset));
+size_t JSArrayBufferView::byte_length() const {
+ return READ_UINTPTR_FIELD(this, kByteLengthOffset);
}
-void JSArrayBufferView::set_byte_length(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kByteLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kByteLengthOffset, value, mode);
+void JSArrayBufferView::set_byte_length(size_t value) {
+ WRITE_UINTPTR_FIELD(this, kByteLengthOffset, value);
}
ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
-#ifdef VERIFY_HEAP
-ACCESSORS(JSArrayBufferView, raw_byte_offset, Object, kByteOffsetOffset)
-ACCESSORS(JSArrayBufferView, raw_byte_length, Object, kByteLengthOffset)
-#endif
bool JSArrayBufferView::WasNeutered() const {
return JSArrayBuffer::cast(buffer())->was_neutered();
}
Object* JSTypedArray::length() const {
- if (WasNeutered()) return Smi::kZero;
return Object::cast(READ_FIELD(this, kLengthOffset));
}
size_t JSTypedArray::length_value() const {
- if (WasNeutered()) return 0;
- double val = Object::cast(READ_FIELD(this, kLengthOffset))->Number();
+ double val = length()->Number();
DCHECK_LE(val, kMaxSafeInteger); // 2^53-1
DCHECK_GE(val, -kMaxSafeInteger); // -2^53+1
DCHECK_LE(val, std::numeric_limits<size_t>::max());
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index 5ff7828ead..36950f9de6 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -41,7 +41,7 @@ void JSArrayBuffer::Neuter() {
CHECK(!was_neutered());
CHECK(is_external());
set_backing_store(nullptr);
- set_byte_length(Smi::kZero);
+ set_byte_length(0);
set_was_neutered(true);
set_is_neuterable(false);
// Invalidate the neutering protector.
@@ -51,13 +51,6 @@ void JSArrayBuffer::Neuter() {
}
}
-void JSArrayBuffer::StopTrackingWasmMemory(Isolate* isolate) {
- DCHECK(is_wasm_memory());
- isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(isolate,
- backing_store());
- set_is_wasm_memory(false);
-}
-
void JSArrayBuffer::FreeBackingStoreFromMainThread() {
if (allocation_base() == nullptr) {
return;
@@ -76,7 +69,8 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
isolate->wasm_engine()->memory_tracker();
if (!memory_tracker->FreeMemoryIfIsWasmMemory(isolate,
allocation.backing_store)) {
- CHECK(FreePages(allocation.allocation_base, allocation.length));
+ CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
+ allocation.length));
}
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
@@ -84,28 +78,21 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
}
}
-void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
- set_bit_field(IsWasmMemory::update(bit_field(), is_wasm_memory));
-}
-
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool is_external, void* data, size_t byte_length,
- SharedFlag shared, bool is_wasm_memory) {
+ SharedFlag shared_flag, bool is_wasm_memory) {
DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
v8::ArrayBuffer::kEmbedderFieldCount);
+ DCHECK_LE(byte_length, JSArrayBuffer::kMaxByteLength);
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
array_buffer->SetEmbedderField(i, Smi::kZero);
}
+ array_buffer->set_byte_length(byte_length);
array_buffer->set_bit_field(0);
array_buffer->set_is_external(is_external);
- array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
- array_buffer->set_is_shared(shared == SharedFlag::kShared);
+ array_buffer->set_is_neuterable(shared_flag == SharedFlag::kNotShared);
+ array_buffer->set_is_shared(shared_flag == SharedFlag::kShared);
array_buffer->set_is_wasm_memory(is_wasm_memory);
-
- Handle<Object> heap_byte_length =
- isolate->factory()->NewNumberFromSize(byte_length);
- CHECK(heap_byte_length->IsSmi() || heap_byte_length->IsHeapNumber());
- array_buffer->set_byte_length(*heap_byte_length);
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
// are currently being constructed in the |ArrayBufferTracker|. The
// registration method below handles the case of registering a buffer that has
@@ -120,14 +107,15 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
Isolate* isolate,
size_t allocated_length,
- bool initialize, SharedFlag shared) {
+ bool initialize,
+ SharedFlag shared_flag) {
void* data;
CHECK_NOT_NULL(isolate->array_buffer_allocator());
if (allocated_length != 0) {
if (allocated_length >= MB)
isolate->counters()->array_buffer_big_allocations()->AddSample(
ConvertToMb(allocated_length));
- if (shared == SharedFlag::kShared)
+ if (shared_flag == SharedFlag::kShared)
isolate->counters()->shared_array_allocations()->AddSample(
ConvertToMb(allocated_length));
if (initialize) {
@@ -147,7 +135,7 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
const bool is_external = false;
JSArrayBuffer::Setup(array_buffer, isolate, is_external, data,
- allocated_length, shared);
+ allocated_length, shared_flag);
return true;
}
@@ -175,9 +163,8 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
"JSTypedArray::MaterializeArrayBuffer");
}
buffer->set_is_external(false);
- DCHECK(buffer->byte_length()->IsSmi() ||
- buffer->byte_length()->IsHeapNumber());
- DCHECK(NumberToInt32(buffer->byte_length()) == fixed_typed_array->DataSize());
+ DCHECK_EQ(buffer->byte_length(),
+ static_cast<uintptr_t>(fixed_typed_array->DataSize()));
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
// are currently being constructed in the |ArrayBufferTracker|. The
// registration method below handles the case of registering a buffer that has
@@ -234,9 +221,9 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
// 3b iv. Let length be O.[[ArrayLength]].
- uint32_t length = o->length()->Number();
+ size_t length = o->length_value();
// 3b v. If numericIndex ≥ length, return false.
- if (index >= length) {
+ if (o->WasNeutered() || index >= length) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 109aacbc47..3f0dd064fa 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
-#include "src/objects.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,12 +14,22 @@ namespace v8 {
namespace internal {
// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
-enum class SharedFlag { kNotShared, kShared };
+enum class SharedFlag : uint32_t { kNotShared, kShared };
class JSArrayBuffer : public JSObject {
public:
+// The maximum length for JSArrayBuffer's supported by V8.
+// On 32-bit architectures we limit this to 2GiB, so that
+// we can continue to use CheckBounds with the Unsigned31
+// restriction for the length.
+#if V8_HOST_ARCH_32_BIT
+ static constexpr size_t kMaxByteLength = kMaxInt;
+#else
+ static constexpr size_t kMaxByteLength = kMaxSafeInteger;
+#endif
+
// [byte_length]: length in bytes
- DECL_ACCESSORS(byte_length, Object)
+ DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
// [backing_store]: backing memory for this array
DECL_ACCESSORS(backing_store, void)
@@ -29,26 +39,39 @@ class JSArrayBuffer : public JSObject {
inline size_t allocation_length() const;
inline void* allocation_base() const;
- inline uint32_t bit_field() const;
- inline void set_bit_field(uint32_t bits);
+ // [bit_field]: boolean flags
+ DECL_PRIMITIVE_ACCESSORS(bit_field, uint32_t)
+
+// Bit positions for [bit_field].
+#define JS_ARRAY_BUFFER_BIT_FIELD_FIELDS(V, _) \
+ V(IsExternalBit, bool, 1, _) \
+ V(IsNeuterableBit, bool, 1, _) \
+ V(WasNeuteredBit, bool, 1, _) \
+ V(IsSharedBit, bool, 1, _) \
+ V(IsGrowableBit, bool, 1, _) \
+ V(IsWasmMemoryBit, bool, 1, _)
+ DEFINE_BIT_FIELDS(JS_ARRAY_BUFFER_BIT_FIELD_FIELDS)
+#undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS
// [is_external]: true indicates that the embedder is in charge of freeing the
// backing_store, while is_external == false means that v8 will free the
// memory block once all ArrayBuffers referencing it are collected by the GC.
- inline bool is_external();
- inline void set_is_external(bool value);
+ DECL_BOOLEAN_ACCESSORS(is_external)
+
+ // [is_neuterable]: false indicates that this buffer cannot be detached.
+ DECL_BOOLEAN_ACCESSORS(is_neuterable)
- inline bool is_neuterable();
- inline void set_is_neuterable(bool value);
+ // [was_neutered]: true if the buffer was previously detached.
+ DECL_BOOLEAN_ACCESSORS(was_neutered)
- inline bool was_neutered();
- inline void set_was_neutered(bool value);
+ // [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer.
+ DECL_BOOLEAN_ACCESSORS(is_shared)
- inline bool is_shared();
- inline void set_is_shared(bool value);
+ // [is_growable]: indicates whether it's possible to grow this buffer.
+ DECL_BOOLEAN_ACCESSORS(is_growable)
- inline bool is_growable();
- inline void set_is_growable(bool value);
+ // [is_wasm_memory]: whether the buffer is tracked by the WasmMemoryTracker.
+ DECL_BOOLEAN_ACCESSORS(is_wasm_memory)
DECL_CAST(JSArrayBuffer)
@@ -68,39 +91,30 @@ class JSArrayBuffer : public JSObject {
bool is_wasm_memory;
};
- // Returns whether the buffer is tracked by the WasmMemoryTracker.
- inline bool is_wasm_memory() const;
-
- // Sets whether the buffer is tracked by the WasmMemoryTracker.
- void set_is_wasm_memory(bool is_wasm_memory);
-
- // Removes the backing store from the WasmMemoryTracker and sets
- // |is_wasm_memory| to false.
- void StopTrackingWasmMemory(Isolate* isolate);
-
void FreeBackingStoreFromMainThread();
static void FreeBackingStore(Isolate* isolate, Allocation allocation);
V8_EXPORT_PRIVATE static void Setup(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
void* data, size_t allocated_length,
- SharedFlag shared = SharedFlag::kNotShared, bool is_wasm_memory = false);
+ SharedFlag shared_flag = SharedFlag::kNotShared,
+ bool is_wasm_memory = false);
// Returns false if array buffer contents could not be allocated.
// In this case, |array_buffer| will not be set up.
static bool SetupAllocatingData(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
size_t allocated_length, bool initialize = true,
- SharedFlag shared = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT;
+ SharedFlag shared_flag = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT;
// Dispatched behavior.
DECL_PRINTER(JSArrayBuffer)
DECL_VERIFIER(JSArrayBuffer)
- static const int kByteLengthOffset = JSObject::kHeaderSize;
- // The rest of the fields are not JSObjects, so they are not iterated over in
+ // The fields are not pointers into our heap, so they are not iterated over in
// objects-body-descriptors-inl.h.
- static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
+ static const int kByteLengthOffset = JSObject::kHeaderSize;
+ static const int kBackingStoreOffset = kByteLengthOffset + kUIntptrSize;
static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
static const int kBitFieldOffset = kBitFieldSlot;
@@ -115,15 +129,6 @@ class JSArrayBuffer : public JSObject {
// Iterates all fields in the object including internal ones except
// kBackingStoreOffset and kBitFieldSlot.
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- class IsExternal : public BitField<bool, 1, 1> {};
- class IsNeuterable : public BitField<bool, 2, 1> {};
- class WasNeutered : public BitField<bool, 3, 1> {};
- class IsShared : public BitField<bool, 4, 1> {};
- class IsGrowable : public BitField<bool, 5, 1> {};
- class IsWasmMemory : public BitField<bool, 6, 1> {};
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
@@ -135,10 +140,10 @@ class JSArrayBufferView : public JSObject {
DECL_ACCESSORS(buffer, Object)
// [byte_offset]: offset of typed array in bytes.
- DECL_ACCESSORS(byte_offset, Object)
+ DECL_PRIMITIVE_ACCESSORS(byte_offset, size_t)
// [byte_length]: length of typed array in bytes.
- DECL_ACCESSORS(byte_length, Object)
+ DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
DECL_CAST(JSArrayBufferView)
@@ -148,15 +153,14 @@ class JSArrayBufferView : public JSObject {
static const int kBufferOffset = JSObject::kHeaderSize;
static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
- static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
- static const int kViewSize = kByteLengthOffset + kPointerSize;
+ static const int kByteLengthOffset = kByteOffsetOffset + kUIntptrSize;
+ static const int kHeaderSize = kByteLengthOffset + kUIntptrSize;
- private:
-#ifdef VERIFY_HEAP
- DECL_ACCESSORS(raw_byte_offset, Object)
- DECL_ACCESSORS(raw_byte_length, Object)
-#endif
+ // Iterates all fields in the object including internal ones except
+ // kByteOffset and kByteLengthOffset.
+ class BodyDescriptor;
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
};
@@ -189,9 +193,8 @@ class JSTypedArray : public JSArrayBufferView {
DECL_PRINTER(JSTypedArray)
DECL_VERIFIER(JSTypedArray)
- static const int kLengthOffset = kViewSize;
+ static const int kLengthOffset = JSArrayBufferView::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
-
static const int kSizeWithEmbedderFields =
kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
@@ -213,8 +216,7 @@ class JSDataView : public JSArrayBufferView {
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
- static const int kSize = kViewSize;
-
+ static const int kSize = JSArrayBufferView::kHeaderSize;
static const int kSizeWithEmbedderFields =
kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index b212848ce7..3a9fe48d24 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -5,8 +5,9 @@
#ifndef V8_OBJECTS_JS_ARRAY_H_
#define V8_OBJECTS_JS_ARRAY_H_
-#include "src/objects.h"
+#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
new file mode 100644
index 0000000000..16f8111953
--- /dev/null
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_
+#define V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-break-iterator.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+inline void JSV8BreakIterator::set_type(Type type) {
+ DCHECK_GT(JSV8BreakIterator::Type::COUNT, type);
+ WRITE_FIELD(this, kTypeOffset, Smi::FromInt(static_cast<int>(type)));
+}
+
+inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
+ Object* value = READ_FIELD(this, kTypeOffset);
+ return static_cast<JSV8BreakIterator::Type>(Smi::ToInt(value));
+}
+
+ACCESSORS(JSV8BreakIterator, locale, String, kLocaleOffset)
+ACCESSORS(JSV8BreakIterator, break_iterator, Managed<icu::BreakIterator>,
+ kBreakIteratorOffset)
+ACCESSORS(JSV8BreakIterator, unicode_string, Managed<icu::UnicodeString>,
+ kUnicodeStringOffset)
+ACCESSORS(JSV8BreakIterator, bound_adopt_text, Object, kBoundAdoptTextOffset)
+ACCESSORS(JSV8BreakIterator, bound_first, Object, kBoundFirstOffset)
+ACCESSORS(JSV8BreakIterator, bound_next, Object, kBoundNextOffset)
+ACCESSORS(JSV8BreakIterator, bound_current, Object, kBoundCurrentOffset)
+ACCESSORS(JSV8BreakIterator, bound_break_type, Object, kBoundBreakTypeOffset)
+
+CAST_ACCESSOR(JSV8BreakIterator)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
new file mode 100644
index 0000000000..2031c2cc5b
--- /dev/null
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -0,0 +1,170 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-break-iterator.h"
+
+#include "src/objects/intl-objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-break-iterator-inl.h"
+#include "unicode/brkiter.h"
+
+namespace v8 {
+namespace internal {
+
+JSV8BreakIterator::Type JSV8BreakIterator::getType(const char* str) {
+ if (strcmp(str, "character") == 0) return Type::CHARACTER;
+ if (strcmp(str, "word") == 0) return Type::WORD;
+ if (strcmp(str, "sentence") == 0) return Type::SENTENCE;
+ if (strcmp(str, "line") == 0) return Type::LINE;
+ UNREACHABLE();
+}
+
+MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
+ Handle<Object> locales, Handle<Object> options_obj) {
+ Factory* factory = isolate->factory();
+
+ Handle<JSReceiver> options;
+ if (options_obj->IsUndefined(isolate)) {
+ options = factory->NewJSObjectWithNullProto();
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options,
+ Object::ToObject(isolate, options_obj, "Intl.JSV8BreakIterator"),
+ JSV8BreakIterator);
+ }
+
+ // Extract locale string
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, r,
+ Intl::ResolveLocale(isolate, "breakiterator", locales, options),
+ JSV8BreakIterator);
+ Handle<Object> locale_obj =
+ JSObject::GetDataProperty(r, factory->locale_string());
+ CHECK(locale_obj->IsString());
+ Handle<String> locale = Handle<String>::cast(locale_obj);
+
+ // Extract type from options
+ std::unique_ptr<char[]> type_str = nullptr;
+ std::vector<const char*> type_values = {"character", "word", "sentence",
+ "line"};
+ Maybe<bool> maybe_found_type = Intl::GetStringOption(
+ isolate, options, "type", type_values, "Intl.v8BreakIterator", &type_str);
+ Type type_enum = Type::WORD;
+ MAYBE_RETURN(maybe_found_type, MaybeHandle<JSV8BreakIterator>());
+ if (maybe_found_type.FromJust()) {
+ DCHECK_NOT_NULL(type_str.get());
+ type_enum = getType(type_str.get());
+ }
+
+ // Construct icu_locale using the locale string
+ icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ DCHECK(!icu_locale.isBogus());
+
+ // Construct break_iterator using icu_locale and type
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::BreakIterator> break_iterator = nullptr;
+ switch (type_enum) {
+ case Type::CHARACTER:
+ break_iterator.reset(
+ icu::BreakIterator::createCharacterInstance(icu_locale, status));
+ break;
+ case Type::SENTENCE:
+ break_iterator.reset(
+ icu::BreakIterator::createSentenceInstance(icu_locale, status));
+ break;
+ case Type::LINE:
+ break_iterator.reset(
+ icu::BreakIterator::createLineInstance(icu_locale, status));
+ break;
+ default:
+ break_iterator.reset(
+ icu::BreakIterator::createWordInstance(icu_locale, status));
+ break;
+ }
+
+ // Error handling for break_iterator
+ if (U_FAILURE(status)) {
+ FATAL("Failed to create ICU break iterator, are ICU data files missing?");
+ }
+ CHECK_NOT_NULL(break_iterator.get());
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kBreakIterator);
+
+ // Construct managed objects from pointers
+ Handle<Managed<icu::BreakIterator>> managed_break_iterator =
+ Managed<icu::BreakIterator>::FromUniquePtr(isolate, 0,
+ std::move(break_iterator));
+ Handle<Managed<icu::UnicodeString>> managed_unicode_string =
+ Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, nullptr);
+
+ // Setting fields
+ break_iterator_holder->set_locale(*locale);
+ break_iterator_holder->set_type(type_enum);
+ break_iterator_holder->set_break_iterator(*managed_break_iterator);
+ break_iterator_holder->set_unicode_string(*managed_unicode_string);
+
+ // Return break_iterator_holder
+ return break_iterator_holder;
+}
+
+Handle<JSObject> JSV8BreakIterator::ResolvedOptions(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
+ Factory* factory = isolate->factory();
+
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ Handle<String> locale(break_iterator->locale(), isolate);
+
+ JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
+ NONE);
+ JSObject::AddProperty(isolate, result, factory->type_string(),
+ break_iterator->TypeAsString(), NONE);
+ return result;
+}
+
+void JSV8BreakIterator::AdoptText(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
+ Handle<String> text) {
+ icu::UnicodeString* u_text;
+ int length = text->length();
+ text = String::Flatten(isolate, text);
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = text->GetFlatContent();
+ std::unique_ptr<uc16[]> sap;
+ const UChar* text_value = GetUCharBufferFromFlat(flat, &sap, length);
+ u_text = new icu::UnicodeString(text_value, length);
+ }
+
+ Handle<Managed<icu::UnicodeString>> new_u_text =
+ Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, u_text);
+ break_iterator_holder->set_unicode_string(*new_u_text);
+
+ icu::BreakIterator* break_iterator =
+ break_iterator_holder->break_iterator()->raw();
+ CHECK_NOT_NULL(break_iterator);
+ break_iterator->setText(*u_text);
+}
+
+Handle<String> JSV8BreakIterator::TypeAsString() const {
+ switch (type()) {
+ case Type::CHARACTER:
+ return GetReadOnlyRoots().character_string_handle();
+ case Type::WORD:
+ return GetReadOnlyRoots().word_string_handle();
+ case Type::SENTENCE:
+ return GetReadOnlyRoots().sentence_string_handle();
+ case Type::LINE:
+ return GetReadOnlyRoots().line_string_handle();
+ case Type::COUNT:
+ UNREACHABLE();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
new file mode 100644
index 0000000000..d5847bdaf6
--- /dev/null
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -0,0 +1,87 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_BREAK_ITERATOR_H_
+#define V8_OBJECTS_JS_BREAK_ITERATOR_H_
+
+#include "src/objects.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/managed.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+} // namespace U_ICU_NAMESPACE
+
+namespace v8 {
+namespace internal {
+
+class JSV8BreakIterator : public JSObject {
+ public:
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> Initialize(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
+ Handle<Object> input_locales, Handle<Object> input_options);
+
+ static Handle<JSObject> ResolvedOptions(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator);
+
+ static void AdoptText(Isolate* isolate,
+ Handle<JSV8BreakIterator> break_iterator_holder,
+ Handle<String> text);
+
+ enum class Type { CHARACTER, WORD, SENTENCE, LINE, COUNT };
+ inline void set_type(Type type);
+ inline Type type() const;
+
+ Handle<String> TypeAsString() const;
+
+ DECL_CAST(JSV8BreakIterator)
+ DECL_PRINTER(JSV8BreakIterator)
+ DECL_VERIFIER(JSV8BreakIterator)
+
+ DECL_ACCESSORS(locale, String)
+ DECL_ACCESSORS(break_iterator, Managed<icu::BreakIterator>)
+ DECL_ACCESSORS(unicode_string, Managed<icu::UnicodeString>)
+ DECL_ACCESSORS(bound_adopt_text, Object)
+ DECL_ACCESSORS(bound_first, Object)
+ DECL_ACCESSORS(bound_next, Object)
+ DECL_ACCESSORS(bound_current, Object)
+ DECL_ACCESSORS(bound_break_type, Object)
+
+// Layout description.
+#define BREAK_ITERATOR_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kLocaleOffset, kPointerSize) \
+ V(kTypeOffset, kPointerSize) \
+ V(kBreakIteratorOffset, kPointerSize) \
+ V(kUnicodeStringOffset, kPointerSize) \
+ V(kBoundAdoptTextOffset, kPointerSize) \
+ V(kBoundFirstOffset, kPointerSize) \
+ V(kBoundNextOffset, kPointerSize) \
+ V(kBoundCurrentOffset, kPointerSize) \
+ V(kBoundBreakTypeOffset, kPointerSize) \
+ /* Total Size */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, BREAK_ITERATOR_FIELDS)
+#undef BREAK_ITERATOR_FIELDS
+
+ private:
+ static Type getType(const char* str);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSV8BreakIterator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_BREAK_ITERATOR_H_
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index 279a8bfd49..1a94ac805c 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -20,18 +20,6 @@ namespace internal {
ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kICUCollatorOffset)
ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset);
-SMI_ACCESSORS(JSCollator, flags, kFlagsOffset)
-
-inline void JSCollator::set_usage(Usage usage) {
- DCHECK_LT(usage, Usage::COUNT);
- int hints = flags();
- hints = UsageBits::update(hints, usage);
- set_flags(hints);
-}
-
-inline JSCollator::Usage JSCollator::usage() const {
- return UsageBits::decode(flags());
-}
CAST_ACCESSOR(JSCollator);
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index c6cbecfb01..f62177b875 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -22,6 +22,11 @@ namespace internal {
namespace {
+enum class Usage {
+ SORT,
+ SEARCH,
+};
+
// TODO(gsathya): Consider internalizing the value strings.
void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
Handle<String> key, const char* value) {
@@ -47,6 +52,13 @@ void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
.FromJust());
}
+void toLanguageTag(const icu::Locale& locale, char* tag) {
+ UErrorCode status = U_ZERO_ERROR;
+ uloc_toLanguageTag(locale.getName(), tag, ULOC_FULLNAME_CAPACITY, FALSE,
+ &status);
+ CHECK(U_SUCCESS(status));
+}
+
} // anonymous namespace
// static
@@ -55,11 +67,6 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
Handle<JSObject> options =
isolate->factory()->NewJSObject(isolate->object_function());
- JSCollator::Usage usage = collator->usage();
- CreateDataPropertyForOptions(isolate, options,
- isolate->factory()->usage_string(),
- JSCollator::UsageToString(usage));
-
icu::Collator* icu_collator = collator->icu_collator()->raw();
CHECK_NOT_NULL(icu_collator);
@@ -128,97 +135,71 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
ignore_punctuation);
status = U_ZERO_ERROR;
- const char* collation;
- std::unique_ptr<icu::StringEnumeration> collation_values(
- icu_collator->getKeywordValues("co", status));
- // Collation wasn't provided as a keyword to icu, use default.
- if (status == U_ILLEGAL_ARGUMENT_ERROR) {
- CreateDataPropertyForOptions(
- isolate, options, isolate->factory()->collation_string(), "default");
- } else {
- CHECK(U_SUCCESS(status));
- CHECK_NOT_NULL(collation_values.get());
-
- int32_t length;
- status = U_ZERO_ERROR;
- collation = collation_values->next(&length, status);
- CHECK(U_SUCCESS(status));
-
- // There has to be at least one value.
- CHECK_NOT_NULL(collation);
- CreateDataPropertyForOptions(
- isolate, options, isolate->factory()->collation_string(), collation);
-
- status = U_ZERO_ERROR;
- collation_values->reset(status);
- CHECK(U_SUCCESS(status));
- }
-
- status = U_ZERO_ERROR;
- icu::Locale icu_locale = icu_collator->getLocale(ULOC_VALID_LOCALE, status);
- CHECK(U_SUCCESS(status));
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
- FALSE, &status);
+ icu::Locale icu_locale(icu_collator->getLocale(ULOC_VALID_LOCALE, status));
CHECK(U_SUCCESS(status));
- CreateDataPropertyForOptions(isolate, options,
- isolate->factory()->locale_string(), result);
-
- return options;
-}
-
-namespace {
-
-std::map<std::string, std::string> LookupUnicodeExtensions(
- const icu::Locale& icu_locale, const std::set<std::string>& relevant_keys) {
- std::map<std::string, std::string> extensions;
-
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::StringEnumeration> keywords(
- icu_locale.createKeywords(status));
- if (U_FAILURE(status)) return extensions;
+ const char* collation = "default";
+ const char* usage = "sort";
+ const char* collation_key = "co";
+ const char* legacy_collation_key = uloc_toLegacyKey(collation_key);
+ DCHECK_NOT_NULL(legacy_collation_key);
- if (!keywords) return extensions;
- char value[ULOC_FULLNAME_CAPACITY];
-
- int32_t length;
+ char bcp47_locale_tag[ULOC_FULLNAME_CAPACITY];
+ char legacy_collation_value[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
- for (const char* keyword = keywords->next(&length, status);
- keyword != nullptr; keyword = keywords->next(&length, status)) {
- // Ignore failures in ICU and skip to the next keyword.
- //
- // This is fine.™
- if (U_FAILURE(status)) {
+ int32_t length =
+ icu_locale.getKeywordValue(legacy_collation_key, legacy_collation_value,
+ ULOC_FULLNAME_CAPACITY, status);
+
+ if (length > 0 && U_SUCCESS(status)) {
+ const char* collation_value =
+ uloc_toUnicodeLocaleType(collation_key, legacy_collation_value);
+ CHECK_NOT_NULL(collation_value);
+
+ if (strcmp(collation_value, "search") == 0) {
+ usage = "search";
+
+ // Search is disallowed as a collation value per spec. Let's
+ // use `default`, instead.
+ //
+ // https://tc39.github.io/ecma402/#sec-properties-of-intl-collator-instances
+ collation = "default";
+
+ // We clone the icu::Locale because we don't want the
+ // icu_collator to be affected when we remove the collation key
+ // below.
+ icu::Locale new_icu_locale = icu_locale;
+
+ // The spec forbids the search as a collation value in the
+ // locale tag, so let's filter it out.
status = U_ZERO_ERROR;
- continue;
- }
-
- icu_locale.getKeywordValue(keyword, value, ULOC_FULLNAME_CAPACITY, status);
+ new_icu_locale.setKeywordValue(legacy_collation_key, nullptr, status);
+ CHECK(U_SUCCESS(status));
- // Ignore failures in ICU and skip to the next keyword.
- //
- // This is fine.™
- if (U_FAILURE(status)) {
- status = U_ZERO_ERROR;
- continue;
+ toLanguageTag(new_icu_locale, bcp47_locale_tag);
+ } else {
+ collation = collation_value;
+ toLanguageTag(icu_locale, bcp47_locale_tag);
}
+ } else {
+ toLanguageTag(icu_locale, bcp47_locale_tag);
+ }
- const char* bcp47_key = uloc_toUnicodeLocaleKey(keyword);
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->collation_string(), collation);
- // Ignore keywords that we don't recognize - spec allows that.
- if (bcp47_key && (relevant_keys.find(bcp47_key) != relevant_keys.end())) {
- const char* bcp47_value = uloc_toUnicodeLocaleType(bcp47_key, value);
- extensions.insert(
- std::pair<std::string, std::string>(bcp47_key, bcp47_value));
- }
- }
+ CreateDataPropertyForOptions(isolate, options,
+ isolate->factory()->usage_string(), usage);
+
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->locale_string(), bcp47_locale_tag);
- return extensions;
+ return options;
}
+namespace {
+
void SetCaseFirstOption(icu::Collator* icu_collator, const char* value) {
CHECK_NOT_NULL(icu_collator);
CHECK_NOT_NULL(value);
@@ -236,9 +217,10 @@ void SetCaseFirstOption(icu::Collator* icu_collator, const char* value) {
} // anonymous namespace
// static
-MaybeHandle<JSCollator> JSCollator::InitializeCollator(
- Isolate* isolate, Handle<JSCollator> collator, Handle<Object> locales,
- Handle<Object> options_obj) {
+MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
+ Handle<JSCollator> collator,
+ Handle<Object> locales,
+ Handle<Object> options_obj) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Handle<JSObject> requested_locales;
ASSIGN_RETURN_ON_EXCEPTION(isolate, requested_locales,
@@ -264,7 +246,7 @@ MaybeHandle<JSCollator> JSCollator::InitializeCollator(
// "search" », "sort").
std::vector<const char*> values = {"sort", "search"};
std::unique_ptr<char[]> usage_str = nullptr;
- JSCollator::Usage usage = JSCollator::Usage::SORT;
+ Usage usage = Usage::SORT;
Maybe<bool> found_usage = Intl::GetStringOption(
isolate, options, "usage", values, "Intl.Collator", &usage_str);
MAYBE_RETURN(found_usage, MaybeHandle<JSCollator>());
@@ -272,21 +254,10 @@ MaybeHandle<JSCollator> JSCollator::InitializeCollator(
if (found_usage.FromJust()) {
DCHECK_NOT_NULL(usage_str.get());
if (strcmp(usage_str.get(), "search") == 0) {
- usage = JSCollator::Usage::SEARCH;
+ usage = Usage::SEARCH;
}
}
- // 5. Set collator.[[Usage]] to usage.
- collator->set_usage(usage);
-
- // 6. If usage is "sort", then
- // a. Let localeData be %Collator%.[[SortLocaleData]].
- // 7. Else,
- // a. Let localeData be %Collator%.[[SearchLocaleData]].
- //
- // The above two spec operations aren't required, the Intl spec is
- // crazy. See https://github.com/tc39/ecma402/issues/256
-
// TODO(gsathya): This is currently done as part of the
// Intl::ResolveLocale call below. Fix this once resolveLocale is
// changed to not do the lookup.
@@ -368,7 +339,7 @@ MaybeHandle<JSCollator> JSCollator::InitializeCollator(
DCHECK(!icu_locale.isBogus());
std::map<std::string, std::string> extensions =
- LookupUnicodeExtensions(icu_locale, relevant_extension_keys);
+ Intl::LookupUnicodeExtensions(icu_locale, relevant_extension_keys);
// 19. Let collation be r.[[co]].
//
@@ -386,11 +357,38 @@ MaybeHandle<JSCollator> JSCollator::InitializeCollator(
const std::string& value = co_extension_it->second;
if ((value == "search") || (value == "standard")) {
UErrorCode status = U_ZERO_ERROR;
- icu_locale.setKeywordValue("co", NULL, status);
+ const char* key = uloc_toLegacyKey("co");
+ icu_locale.setKeywordValue(key, nullptr, status);
CHECK(U_SUCCESS(status));
}
}
+ // 5. Set collator.[[Usage]] to usage.
+ //
+ // 6. If usage is "sort", then
+ // a. Let localeData be %Collator%.[[SortLocaleData]].
+ // 7. Else,
+ // a. Let localeData be %Collator%.[[SearchLocaleData]].
+ //
+ // The Intl spec doesn't allow us to use "search" as an extension
+ // value for collation as per:
+ // https://tc39.github.io/ecma402/#sec-intl-collator-internal-slots
+ //
+ // But the only way to pass the value "search" for collation from
+ // the options object to ICU is to use the 'co' extension keyword.
+ //
+ // This will need to be filtered out when creating the
+ // resolvedOptions object.
+ if (usage == Usage::SEARCH) {
+ const char* key = uloc_toLegacyKey("co");
+ CHECK_NOT_NULL(key);
+ const char* value = uloc_toLegacyType(key, "search");
+ CHECK_NOT_NULL(value);
+ UErrorCode status = U_ZERO_ERROR;
+ icu_locale.setKeywordValue(key, value, status);
+ CHECK(U_SUCCESS(status));
+ }
+
// 20. If collation is null, let collation be "default".
// 21. Set collator.[[Collation]] to collation.
//
@@ -525,17 +523,5 @@ MaybeHandle<JSCollator> JSCollator::InitializeCollator(
return collator;
}
-// static
-const char* JSCollator::UsageToString(Usage usage) {
- switch (usage) {
- case Usage::SORT:
- return "sort";
- case Usage::SEARCH:
- return "search";
- case Usage::COUNT:
- UNREACHABLE();
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index b2751a446e..f857df95b1 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -18,13 +18,17 @@
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
+namespace U_ICU_NAMESPACE {
+class Collator;
+} // namespace U_ICU_NAMESPACE
+
namespace v8 {
namespace internal {
class JSCollator : public JSObject {
public:
// ecma402/#sec-initializecollator
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> InitializeCollator(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> Initialize(
Isolate* isolate, Handle<JSCollator> collator, Handle<Object> locales,
Handle<Object> options);
@@ -36,22 +40,9 @@ class JSCollator : public JSObject {
DECL_PRINTER(JSCollator)
DECL_VERIFIER(JSCollator)
- // [[Usage]] is one of the values "sort" or "search", identifying
- // the collator usage.
- enum class Usage {
- SORT,
- SEARCH,
-
- COUNT
- };
- inline void set_usage(Usage usage);
- inline Usage usage() const;
- static const char* UsageToString(Usage usage);
-
// Layout description.
#define JS_COLLATOR_FIELDS(V) \
V(kICUCollatorOffset, kPointerSize) \
- V(kFlagsOffset, kPointerSize) \
V(kBoundCompareOffset, kPointerSize) \
/* Total size. */ \
V(kSize, 0)
@@ -59,26 +50,8 @@ class JSCollator : public JSObject {
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_COLLATOR_FIELDS)
#undef JS_COLLATOR_FIELDS
- // ContextSlot defines the context structure for the bound
- // Collator.prototype.compare function.
- enum ContextSlot {
- // The collator instance that the function holding this context is bound to.
- kCollator = Context::MIN_CONTEXT_SLOTS,
- kLength
- };
-
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) V(UsageBits, Usage, 1, _)
-
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
-
- STATIC_ASSERT(Usage::SORT <= UsageBits::kMax);
- STATIC_ASSERT(Usage::SEARCH <= UsageBits::kMax);
-
DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
DECL_ACCESSORS(bound_compare, Object);
- DECL_INT_ACCESSORS(flags)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollator);
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 47bb7a9c2a..7b5e38e7d8 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -113,9 +113,6 @@ class JSWeakCollection : public JSObject {
// Visit the whole object.
typedef BodyDescriptorImpl BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
};
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
new file mode 100644
index 0000000000..0ad7f363c5
--- /dev/null
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_
+#define V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-date-time-format.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kICULocaleOffset);
+ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
+ Managed<icu::SimpleDateFormat>, kICUSimpleDateFormatOffset)
+ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset);
+
+CAST_ACCESSOR(JSDateTimeFormat);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
new file mode 100644
index 0000000000..6285b74b04
--- /dev/null
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -0,0 +1,980 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-date-time-format.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-date-time-format-inl.h"
+
+#include "unicode/calendar.h"
+#include "unicode/dtptngen.h"
+#include "unicode/gregocal.h"
+#include "unicode/numsys.h"
+#include "unicode/smpdtfmt.h"
+#include "unicode/unistr.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class PatternMap {
+ public:
+ PatternMap(std::string pattern, std::string value)
+ : pattern(std::move(pattern)), value(std::move(value)) {}
+ virtual ~PatternMap() = default;
+ std::string pattern;
+ std::string value;
+};
+
+class PatternItem {
+ public:
+ PatternItem(const std::string property, std::vector<PatternMap> pairs,
+ std::vector<const char*> allowed_values)
+ : property(std::move(property)),
+ pairs(std::move(pairs)),
+ allowed_values(allowed_values) {}
+ virtual ~PatternItem() = default;
+
+ const std::string property;
+ // It is important for the pattern in the pairs from longer one to shorter one
+ // if the longer one contains substring of an shorter one.
+ std::vector<PatternMap> pairs;
+ std::vector<const char*> allowed_values;
+};
+
+const std::vector<PatternItem> GetPatternItems() {
+ const std::vector<const char*> kLongShort = {"long", "short"};
+ const std::vector<const char*> kNarrowLongShort = {"narrow", "long", "short"};
+ const std::vector<const char*> k2DigitNumeric = {"2-digit", "numeric"};
+ const std::vector<const char*> kNarrowLongShort2DigitNumeric = {
+ "narrow", "long", "short", "2-digit", "numeric"};
+ const std::vector<PatternItem> kPatternItems = {
+ PatternItem("weekday",
+ {{"EEEEE", "narrow"}, {"EEEE", "long"}, {"EEE", "short"}},
+ kNarrowLongShort),
+ PatternItem("era",
+ {{"GGGGG", "narrow"}, {"GGGG", "long"}, {"GGG", "short"}},
+ kNarrowLongShort),
+ PatternItem("year", {{"yy", "2-digit"}, {"y", "numeric"}},
+ k2DigitNumeric),
+ // Sometimes we get L instead of M for month - standalone name.
+ PatternItem("month",
+ {{"MMMMM", "narrow"},
+ {"MMMM", "long"},
+ {"MMM", "short"},
+ {"MM", "2-digit"},
+ {"M", "numeric"},
+ {"LLLLL", "narrow"},
+ {"LLLL", "long"},
+ {"LLL", "short"},
+ {"LL", "2-digit"},
+ {"L", "numeric"}},
+ kNarrowLongShort2DigitNumeric),
+ PatternItem("day", {{"dd", "2-digit"}, {"d", "numeric"}}, k2DigitNumeric),
+ PatternItem("hour",
+ {{"HH", "2-digit"},
+ {"H", "numeric"},
+ {"hh", "2-digit"},
+ {"h", "numeric"}},
+ k2DigitNumeric),
+ PatternItem("minute", {{"mm", "2-digit"}, {"m", "numeric"}},
+ k2DigitNumeric),
+ PatternItem("second", {{"ss", "2-digit"}, {"s", "numeric"}},
+ k2DigitNumeric),
+ PatternItem("timeZoneName", {{"zzzz", "long"}, {"z", "short"}},
+ kLongShort)};
+ return kPatternItems;
+}
+
+class PatternData {
+ public:
+ PatternData(const std::string property, std::vector<PatternMap> pairs,
+ std::vector<const char*> allowed_values)
+ : property(std::move(property)), allowed_values(allowed_values) {
+ for (const auto& pair : pairs) {
+ map.insert(std::make_pair(pair.value, pair.pattern));
+ }
+ }
+ virtual ~PatternData() = default;
+
+ const std::string property;
+ std::map<const std::string, const std::string> map;
+ std::vector<const char*> allowed_values;
+};
+
+enum HourOption {
+ H_UNKNOWN,
+ H_12,
+ H_24,
+};
+
+const std::vector<PatternData> CreateCommonData(const PatternData& hour_data) {
+ std::vector<PatternData> build;
+ for (const PatternItem& item : GetPatternItems()) {
+ if (item.property == "hour") {
+ build.push_back(hour_data);
+ } else {
+ build.push_back(
+ PatternData(item.property, item.pairs, item.allowed_values));
+ }
+ }
+ return build;
+}
+
+const std::vector<PatternData> CreateData(const char* digit2,
+ const char* numeric) {
+ static std::vector<const char*> k2DigitNumeric = {"2-digit", "numeric"};
+ return CreateCommonData(PatternData(
+ "hour", {{digit2, "2-digit"}, {numeric, "numeric"}}, k2DigitNumeric));
+}
+
+const std::vector<PatternData> GetPatternData(HourOption option) {
+ const std::vector<PatternData> data = CreateData("jj", "j");
+ const std::vector<PatternData> data_h12 = CreateData("hh", "h");
+ const std::vector<PatternData> data_h24 = CreateData("HH", "H");
+ switch (option) {
+ case HourOption::H_12:
+ return data_h12;
+ case HourOption::H_24:
+ return data_h24;
+ case HourOption::H_UNKNOWN:
+ return data;
+ }
+}
+
+void SetPropertyFromPattern(Isolate* isolate, const std::string& pattern,
+ Handle<JSObject> options) {
+ Factory* factory = isolate->factory();
+ const std::vector<PatternItem> items = GetPatternItems();
+ for (const auto& item : items) {
+ for (const auto& pair : item.pairs) {
+ if (pattern.find(pair.pattern) != std::string::npos) {
+ // After we find the first pair in the item which matching the pattern,
+ // we set the property and look for the next item in kPatternItems.
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options,
+ factory->NewStringFromAsciiChecked(item.property.c_str()),
+ factory->NewStringFromAsciiChecked(pair.value.c_str()),
+ kDontThrow)
+ .FromJust());
+ break;
+ }
+ }
+ }
+ // hour12
+ // b. If p is "hour12", then
+ // i. Let hc be dtf.[[HourCycle]].
+ // ii. If hc is "h11" or "h12", let v be true.
+ // iii. Else if, hc is "h23" or "h24", let v be false.
+ // iv. Else, let v be undefined.
+ if (pattern.find('h') != std::string::npos) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromStaticChars("hour12"),
+ factory->true_value(), kDontThrow)
+ .FromJust());
+ } else if (pattern.find('H') != std::string::npos) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromStaticChars("hour12"),
+ factory->false_value(), kDontThrow)
+ .FromJust());
+ }
+}
+
+std::string GetGMTTzID(Isolate* isolate, const std::string& input) {
+ std::string ret = "Etc/GMT";
+ switch (input.length()) {
+ case 8:
+ if (input[7] == '0') return ret + '0';
+ break;
+ case 9:
+ if ((input[7] == '+' || input[7] == '-') &&
+ IsInRange(input[8], '0', '9')) {
+ return ret + input[7] + input[8];
+ }
+ break;
+ case 10:
+ if ((input[7] == '+' || input[7] == '-') && (input[8] == '1') &&
+ IsInRange(input[9], '0', '4')) {
+ return ret + input[7] + input[8] + input[9];
+ }
+ break;
+ }
+ return "";
+}
+
+// Locale independenty version of isalpha for ascii range. This will return
+// false if the ch is alpha but not in ascii range.
+bool IsAsciiAlpha(char ch) {
+ return IsInRange(ch, 'A', 'Z') || IsInRange(ch, 'a', 'z');
+}
+
+// Locale independent toupper for ascii range. This will not return İ (dotted I)
+// for i under Turkish locale while std::toupper may.
+char LocaleIndependentAsciiToUpper(char ch) {
+ return (IsInRange(ch, 'a', 'z')) ? (ch - 'a' + 'A') : ch;
+}
+
+// Locale independent tolower for ascii range.
+char LocaleIndependentAsciiToLower(char ch) {
+ return (IsInRange(ch, 'A', 'Z')) ? (ch - 'A' + 'a') : ch;
+}
+
+// Returns titlecased location, bueNos_airES -> Buenos_Aires
+// or ho_cHi_minH -> Ho_Chi_Minh. It is locale-agnostic and only
+// deals with ASCII only characters.
+// 'of', 'au' and 'es' are special-cased and lowercased.
+// ICU's timezone parsing is case sensitive, but ECMAScript is case insensitive
+std::string ToTitleCaseTimezoneLocation(Isolate* isolate,
+ const std::string& input) {
+ std::string title_cased;
+ int word_length = 0;
+ for (char ch : input) {
+ // Convert first char to upper case, the rest to lower case
+ if (IsAsciiAlpha(ch)) {
+ title_cased += word_length == 0 ? LocaleIndependentAsciiToUpper(ch)
+ : LocaleIndependentAsciiToLower(ch);
+ word_length++;
+ } else if (ch == '_' || ch == '-' || ch == '/') {
+ // Special case Au/Es/Of to be lower case.
+ if (word_length == 2) {
+ size_t pos = title_cased.length() - 2;
+ std::string substr = title_cased.substr(pos, 2);
+ if (substr == "Of" || substr == "Es" || substr == "Au") {
+ title_cased[pos] = LocaleIndependentAsciiToLower(title_cased[pos]);
+ }
+ }
+ title_cased += ch;
+ word_length = 0;
+ } else {
+ // Invalid input
+ return std::string();
+ }
+ }
+ return title_cased;
+}
+
+} // namespace
+
+std::string JSDateTimeFormat::CanonicalizeTimeZoneID(Isolate* isolate,
+ const std::string& input) {
+ std::string upper = input;
+ transform(upper.begin(), upper.end(), upper.begin(),
+ LocaleIndependentAsciiToUpper);
+ if (upper == "UTC" || upper == "GMT" || upper == "ETC/UTC" ||
+ upper == "ETC/GMT") {
+ return "UTC";
+ }
+ // We expect only _, '-' and / beside ASCII letters.
+ // All inputs should conform to Area/Location(/Location)*, or Etc/GMT* .
+ // TODO(jshin): 1. Support 'GB-Eire", 'EST5EDT", "ROK', 'US/*', 'NZ' and many
+ // other aliases/linked names when moving timezone validation code to C++.
+ // See crbug.com/364374 and crbug.com/v8/8007 .
+ // 2. Resolve the difference betwee CLDR/ICU and IANA time zone db.
+ // See http://unicode.org/cldr/trac/ticket/9892 and crbug.com/645807 .
+ if (strncmp(upper.c_str(), "ETC/GMT", 7) == 0) {
+ return GetGMTTzID(isolate, input);
+ }
+ return ToTitleCaseTimezoneLocation(isolate, input);
+}
+
+MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
+ Factory* factory = isolate->factory();
+ // 4. Let options be ! ObjectCreate(%ObjectPrototype%).
+ Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
+
+ // 5. For each row of Table 6, except the header row, in any order, do
+ // a. Let p be the Property value of the current row.
+ Handle<Object> resolved_obj;
+
+ // locale
+ UErrorCode status = U_ZERO_ERROR;
+ char language[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(date_time_format->icu_locale()->raw()->getName(), language,
+ ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ CHECK(U_SUCCESS(status));
+ Handle<String> locale = factory->NewStringFromAsciiChecked(language);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->locale_string(), locale, kDontThrow)
+ .FromJust());
+
+ icu::SimpleDateFormat* icu_simple_date_format =
+ date_time_format->icu_simple_date_format()->raw();
+ // calendar
+ const icu::Calendar* calendar = icu_simple_date_format->getCalendar();
+ // getType() returns legacy calendar type name instead of LDML/BCP47 calendar
+ // key values. intl.js maps them to BCP47 values for key "ca".
+ // TODO(jshin): Consider doing it here, instead.
+ std::string calendar_str = calendar->getType();
+
+ // Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
+ // See typeMap section in third_party/icu/source/data/misc/keyTypeData.txt
+ // and
+ // http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
+ if (calendar_str == "gregorian") {
+ calendar_str = "gregory";
+ } else if (calendar_str == "ethiopic-amete-alem") {
+ calendar_str = "ethioaa";
+ }
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromStaticChars("calendar"),
+ factory->NewStringFromAsciiChecked(calendar_str.c_str()),
+ kDontThrow)
+ .FromJust());
+
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat/Calendar would.
+ // Tracked by https://unicode-org.atlassian.net/browse/ICU-13431
+ std::unique_ptr<icu::NumberingSystem> numbering_system(
+ icu::NumberingSystem::createInstance(
+ *(date_time_format->icu_locale()->raw()), status));
+ if (U_SUCCESS(status)) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->numberingSystem_string(),
+ factory->NewStringFromAsciiChecked(numbering_system->getName()),
+ kDontThrow)
+ .FromJust());
+ }
+
+ // timezone
+ const icu::TimeZone& tz = calendar->getTimeZone();
+ icu::UnicodeString time_zone;
+ tz.getID(time_zone);
+ status = U_ZERO_ERROR;
+ icu::UnicodeString canonical_time_zone;
+ icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
+ if (U_SUCCESS(status)) {
+ Handle<String> timezone_value;
+ // In CLDR (http://unicode.org/cldr/trac/ticket/9943), Etc/UTC is made
+ // a separate timezone ID from Etc/GMT even though they're still the same
+ // timezone. We have Etc/UTC because 'UTC', 'Etc/Universal',
+ // 'Etc/Zulu' and others are turned to 'Etc/UTC' by ICU. Etc/GMT comes
+ // from Etc/GMT0, Etc/GMT+0, Etc/GMT-0, Etc/Greenwich.
+ // ecma402#sec-canonicalizetimezonename step 3
+ if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/UTC") ||
+ canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
+ timezone_value = factory->NewStringFromStaticChars("UTC");
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, timezone_value,
+ Intl::ToString(isolate, canonical_time_zone),
+ JSObject);
+ }
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromStaticChars("timeZone"),
+ timezone_value, kDontThrow)
+ .FromJust());
+ } else {
+ // Somehow on Windows we will reach here.
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromStaticChars("timeZone"),
+ factory->undefined_value(), kDontThrow)
+ .FromJust());
+ }
+
+ icu::UnicodeString pattern_unicode;
+ icu_simple_date_format->toPattern(pattern_unicode);
+ std::string pattern;
+ pattern_unicode.toUTF8String(pattern);
+ SetPropertyFromPattern(isolate, pattern, options);
+ return options;
+}
+
+namespace {
+
+// ecma402/#sec-formatdatetime
+// FormatDateTime( dateTimeFormat, x )
+MaybeHandle<String> FormatDateTime(Isolate* isolate,
+ Handle<JSDateTimeFormat> date_time_format,
+ double x) {
+ double date_value = DateCache::TimeClip(x);
+ if (std::isnan(date_value)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
+ String);
+ }
+
+ icu::SimpleDateFormat* date_format =
+ date_time_format->icu_simple_date_format()->raw();
+ CHECK_NOT_NULL(date_format);
+
+ icu::UnicodeString result;
+ date_format->format(date_value, result);
+
+ return Intl::ToString(isolate, result);
+}
+
+} // namespace
+
+// ecma402/#sec-datetime-format-functions
+// DateTime Format Functions
+MaybeHandle<String> JSDateTimeFormat::DateTimeFormat(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
+ Handle<Object> date) {
+ // 2. Assert: Type(dtf) is Object and dtf has an [[InitializedDateTimeFormat]]
+ // internal slot.
+
+ // 3. If date is not provided or is undefined, then
+ double x;
+ if (date->IsUndefined()) {
+ // 3.a Let x be Call(%Date_now%, undefined).
+ x = JSDate::CurrentTimeValue(isolate);
+ } else {
+ // 4. Else,
+ // a. Let x be ? ToNumber(date).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, date, Object::ToNumber(isolate, date),
+ String);
+ CHECK(date->IsNumber());
+ x = date->Number();
+ }
+ // 5. Return FormatDateTime(dtf, x).
+ return FormatDateTime(isolate, date_time_format, x);
+}
+
+MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
+ Isolate* isolate, Handle<Object> date, Handle<Object> locales,
+ Handle<Object> options, RequiredOption required, DefaultsOption defaults,
+ const char* service) {
+ Factory* factory = isolate->factory();
+ // 1. Let x be ? thisTimeValue(this value);
+ if (!date->IsJSDate()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ factory->NewStringFromStaticChars("Date")),
+ String);
+ }
+
+ double const x = Handle<JSDate>::cast(date)->value()->Number();
+ // 2. If x is NaN, return "Invalid Date"
+ if (std::isnan(x)) {
+ return factory->NewStringFromStaticChars("Invalid Date");
+ }
+
+ // 3. Let options be ? ToDateTimeOptions(options, required, defaults).
+ Handle<JSObject> internal_options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, internal_options,
+ ToDateTimeOptions(isolate, options, required, defaults), String);
+
+ // 4. Let dateFormat be ? Construct(%DateTimeFormat%, « locales, options »).
+ Handle<JSObject> object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, object,
+ Intl::CachedOrNewService(isolate,
+ factory->NewStringFromAsciiChecked(service),
+ locales, options, internal_options),
+ String);
+
+ CHECK(object->IsJSDateTimeFormat());
+ Handle<JSDateTimeFormat> date_time_format =
+ Handle<JSDateTimeFormat>::cast(object);
+ // 5. Return FormatDateTime(dateFormat, x).
+ return FormatDateTime(isolate, date_time_format, x);
+}
+
+namespace {
+
+Maybe<bool> IsPropertyUndefined(Isolate* isolate, Handle<JSObject> options,
+ const char* property) {
+ Factory* factory = isolate->factory();
+ // i. Let prop be the property name.
+ // ii. Let value be ? Get(options, prop).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(
+ isolate, options, factory->NewStringFromAsciiChecked(property)),
+ Nothing<bool>());
+ return Just(value->IsUndefined(isolate));
+}
+
+Maybe<bool> NeedsDefault(Isolate* isolate, Handle<JSObject> options,
+ const std::vector<std::string>& props) {
+ bool needs_default = true;
+ for (const auto& prop : props) {
+ // i. Let prop be the property name.
+ // ii. Let value be ? Get(options, prop)
+ Maybe<bool> maybe_undefined =
+ IsPropertyUndefined(isolate, options, prop.c_str());
+ MAYBE_RETURN(maybe_undefined, Nothing<bool>());
+ // iii. If value is not undefined, let needDefaults be false.
+ if (!maybe_undefined.FromJust()) {
+ needs_default = false;
+ }
+ }
+ return Just(needs_default);
+}
+
+Maybe<bool> CreateDefault(Isolate* isolate, Handle<JSObject> options,
+ const std::vector<std::string>& props) {
+ Factory* factory = isolate->factory();
+ // i. Perform ? CreateDataPropertyOrThrow(options, prop, "numeric").
+ for (const auto& prop : props) {
+ MAYBE_RETURN(
+ JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromAsciiChecked(prop.c_str()),
+ factory->numeric_string(), kThrowOnError),
+ Nothing<bool>());
+ }
+ return Just(true);
+}
+
+} // namespace
+
+// ecma-402/#sec-todatetimeoptions
+MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
+ Isolate* isolate, Handle<Object> input_options, RequiredOption required,
+ DefaultsOption defaults) {
+ Factory* factory = isolate->factory();
+ // 1. If options is undefined, let options be null; otherwise let options be ?
+ // ToObject(options).
+ Handle<JSObject> options;
+ if (input_options->IsUndefined(isolate)) {
+ options = factory->NewJSObjectWithNullProto();
+ } else {
+ Handle<JSReceiver> options_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
+ Object::ToObject(isolate, input_options),
+ JSObject);
+ // 2. Let options be ObjectCreate(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ JSObject::ObjectCreate(isolate, options_obj),
+ JSObject);
+ }
+
+ // 3. Let needDefaults be true.
+ bool needs_default = true;
+
+ // 4. If required is "date" or "any", then
+ if (required == RequiredOption::kAny || required == RequiredOption::kDate) {
+ // a. For each of the property names "weekday", "year", "month", "day", do
+ const std::vector<std::string> list({"weekday", "year", "month", "day"});
+ Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list);
+ MAYBE_RETURN(maybe_needs_default, Handle<JSObject>());
+ needs_default = maybe_needs_default.FromJust();
+ }
+
+ // 5. If required is "time" or "any", then
+ if (required == RequiredOption::kAny || required == RequiredOption::kTime) {
+ // a. For each of the property names "hour", "minute", "second", do
+ const std::vector<std::string> list({"hour", "minute", "second"});
+ Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list);
+ MAYBE_RETURN(maybe_needs_default, Handle<JSObject>());
+ needs_default &= maybe_needs_default.FromJust();
+ }
+
+ // 6. If needDefaults is true and defaults is either "date" or "all", then
+ if (needs_default) {
+ if (defaults == DefaultsOption::kAll || defaults == DefaultsOption::kDate) {
+ // a. For each of the property names "year", "month", "day", do)
+ const std::vector<std::string> list({"year", "month", "day"});
+ MAYBE_RETURN(CreateDefault(isolate, options, list), Handle<JSObject>());
+ }
+ // 7. If needDefaults is true and defaults is either "time" or "all", then
+ if (defaults == DefaultsOption::kAll || defaults == DefaultsOption::kTime) {
+ // a. For each of the property names "hour", "minute", "second", do
+ const std::vector<std::string> list({"hour", "minute", "second"});
+ MAYBE_RETURN(CreateDefault(isolate, options, list), Handle<JSObject>());
+ }
+ }
+ // 8. Return options.
+ return options;
+}
+
+MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::UnwrapDateTimeFormat(
+ Isolate* isolate, Handle<JSReceiver> format_holder) {
+ Handle<Context> native_context =
+ Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::cast(native_context->intl_date_time_format_function()),
+ isolate);
+ Handle<Object> dtf;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, dtf,
+ Intl::LegacyUnwrapReceiver(isolate, format_holder, constructor,
+ format_holder->IsJSDateTimeFormat()),
+ JSDateTimeFormat);
+ // 2. If Type(dtf) is not Object or dtf does not have an
+ // [[InitializedDateTimeFormat]] internal slot, then
+ if (!dtf->IsJSDateTimeFormat()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "UnwrapDateTimeFormat"),
+ format_holder),
+ JSDateTimeFormat);
+ }
+ // 3. Return dtf.
+ return Handle<JSDateTimeFormat>::cast(dtf);
+}
+
+namespace {
+
+// ecma-402/#sec-isvalidtimezonename
+bool IsValidTimeZoneName(const icu::TimeZone& tz) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString id;
+ tz.getID(id);
+ icu::UnicodeString canonical;
+ icu::TimeZone::getCanonicalID(id, canonical, status);
+ return U_SUCCESS(status) &&
+ canonical != icu::UnicodeString("Etc/Unknown", -1, US_INV);
+}
+
+std::unique_ptr<icu::TimeZone> CreateTimeZone(Isolate* isolate,
+ const char* timezone) {
+ // Create time zone as specified by the user. We have to re-create time zone
+ // since calendar takes ownership.
+ if (timezone == nullptr) {
+ // 19.a. Else / Let timeZone be DefaultTimeZone().
+ return std::unique_ptr<icu::TimeZone>(icu::TimeZone::createDefault());
+ }
+ std::string canonicalized =
+ JSDateTimeFormat::CanonicalizeTimeZoneID(isolate, timezone);
+ if (canonicalized.empty()) return std::unique_ptr<icu::TimeZone>();
+ std::unique_ptr<icu::TimeZone> tz(
+ icu::TimeZone::createTimeZone(canonicalized.c_str()));
+ // 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
+ // i. Throw a RangeError exception.
+ if (!IsValidTimeZoneName(*tz)) return std::unique_ptr<icu::TimeZone>();
+ return tz;
+}
+
+std::unique_ptr<icu::Calendar> CreateCalendar(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ const char* timezone) {
+ std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone);
+ if (tz.get() == nullptr) return std::unique_ptr<icu::Calendar>();
+
+ // Create a calendar using locale, and apply time zone to it.
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::Calendar> calendar(
+ icu::Calendar::createInstance(tz.release(), icu_locale, status));
+ CHECK(U_SUCCESS(status));
+ CHECK_NOT_NULL(calendar.get());
+
+ if (calendar->getDynamicClassID() ==
+ icu::GregorianCalendar::getStaticClassID()) {
+ icu::GregorianCalendar* gc =
+ static_cast<icu::GregorianCalendar*>(calendar.get());
+ UErrorCode status = U_ZERO_ERROR;
+ // The beginning of ECMAScript time, namely -(2**53)
+ const double start_of_time = -9007199254740992;
+ gc->setGregorianChange(start_of_time, status);
+ DCHECK(U_SUCCESS(status));
+ }
+ return calendar;
+}
+
+std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
+ Isolate* isolate, const icu::Locale& icu_locale,
+ const std::string& skeleton) {
+ // See https://github.com/tc39/ecma402/issues/225 . The best pattern
+ // generation needs to be done in the base locale according to the
+ // current spec however odd it may be. See also crbug.com/826549 .
+ // This is a temporary work-around to get v8's external behavior to match
+ // the current spec, but does not follow the spec provisions mentioned
+ // in the above Ecma 402 issue.
+ // TODO(jshin): The spec may need to be revised because using the base
+ // locale for the pattern match is not quite right. Moreover, what to
+ // do with 'related year' part when 'chinese/dangi' calendar is specified
+ // has to be discussed. Revisit once the spec is clarified/revised.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::DateTimePatternGenerator> generator(
+ icu::DateTimePatternGenerator::createInstance(no_extension_locale,
+ status));
+ icu::UnicodeString pattern;
+ if (U_SUCCESS(status)) {
+ pattern =
+ generator->getBestPattern(icu::UnicodeString(skeleton.c_str()), status);
+ }
+
+ // Make formatter from skeleton. Calendar and numbering system are added
+ // to the locale as Unicode extension (if they were specified at all).
+ status = U_ZERO_ERROR;
+ std::unique_ptr<icu::SimpleDateFormat> date_format(
+ new icu::SimpleDateFormat(pattern, icu_locale, status));
+ if (U_FAILURE(status)) return std::unique_ptr<icu::SimpleDateFormat>();
+
+ CHECK_NOT_NULL(date_format.get());
+ return date_format;
+}
+
+} // namespace
+
+enum FormatMatcherOption { kBestFit, kBasic };
+
+// ecma402/#sec-initializedatetimeformat
+MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
+ Handle<Object> requested_locales, Handle<Object> input_options) {
+ // 2. Let options be ? ToDateTimeOptions(options, "any", "date").
+ Handle<JSObject> options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options,
+ JSDateTimeFormat::ToDateTimeOptions(
+ isolate, input_options, RequiredOption::kAny, DefaultsOption::kDate),
+ JSDateTimeFormat);
+
+ // ResolveLocale currently get option of localeMatcher so we have to call
+ // ResolveLocale before "hour12" and "hourCycle".
+ // TODO(ftang): fix this once ResolveLocale is ported to C++
+ // 11. Let r be ResolveLocale( %DateTimeFormat%.[[AvailableLocales]],
+ // requestedLocales, opt, %DateTimeFormat%.[[RelevantExtensionKeys]],
+ // localeData).
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, r,
+ Intl::ResolveLocale(isolate, "dateformat", requested_locales, options),
+ JSDateTimeFormat);
+
+ // 6. Let hour12 be ? GetOption(options, "hour12", "boolean", undefined,
+ // undefined).
+ bool hour12;
+ Maybe<bool> maybe_get_hour12 = Intl::GetBoolOption(
+ isolate, options, "hour12", "Intl.DateTimeFormat", &hour12);
+ MAYBE_RETURN(maybe_get_hour12, Handle<JSDateTimeFormat>());
+ HourOption hour_option = HourOption::H_UNKNOWN;
+ if (maybe_get_hour12.FromJust()) {
+ hour_option = hour12 ? HourOption::H_12 : HourOption::H_24;
+ }
+
+ // 7. Let hourCycle be ? GetOption(options, "hourCycle", "string", « "h11",
+ // "h12", "h23", "h24" », undefined).
+ static std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
+ "h24"};
+ std::unique_ptr<char[]> hour_cycle = nullptr;
+ Maybe<bool> maybe_hour_cycle =
+ Intl::GetStringOption(isolate, options, "hourCycle", hour_cycle_values,
+ "Intl.DateTimeFormat", &hour_cycle);
+ MAYBE_RETURN(maybe_hour_cycle, Handle<JSDateTimeFormat>());
+ // 8. If hour12 is not undefined, then
+ if (maybe_get_hour12.FromJust()) {
+ // a. Let hourCycle be null.
+ hour_cycle = nullptr;
+ }
+ // 9. Set opt.[[hc]] to hourCycle.
+ // TODO(ftang): change behavior based on hour_cycle.
+
+ Handle<String> locale_with_extension_str =
+ isolate->factory()->NewStringFromStaticChars("localeWithExtension");
+ Handle<Object> locale_with_extension_obj =
+ JSObject::GetDataProperty(r, locale_with_extension_str);
+
+ // The locale_with_extension has to be a string. Either a user
+ // provided canonicalized string or the default locale.
+ CHECK(locale_with_extension_obj->IsString());
+ Handle<String> locale_with_extension =
+ Handle<String>::cast(locale_with_extension_obj);
+
+ icu::Locale icu_locale =
+ Intl::CreateICULocale(isolate, locale_with_extension);
+ DCHECK(!icu_locale.isBogus());
+
+ // 17. Let timeZone be ? Get(options, "timeZone").
+ static std::vector<const char*> empty_values = {};
+ std::unique_ptr<char[]> timezone = nullptr;
+ Maybe<bool> maybe_timezone =
+ Intl::GetStringOption(isolate, options, "timeZone", empty_values,
+ "Intl.DateTimeFormat", &timezone);
+ MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
+
+ // 22. For each row of Table 5, except the header row, do
+ std::string skeleton;
+ for (const auto& item : GetPatternData(hour_option)) {
+ std::unique_ptr<char[]> input;
+ // a. Let prop be the name given in the Property column of the row.
+ // b. Let value be ? GetOption(options, prop, "string", « the strings given
+ // in the Values column of the row », undefined).
+ Maybe<bool> maybe_get_option = Intl::GetStringOption(
+ isolate, options, item.property.c_str(), item.allowed_values,
+ "Intl.DateTimeFormat", &input);
+ MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
+ if (maybe_get_option.FromJust()) {
+ DCHECK_NOT_NULL(input.get());
+ // c. Set opt.[[<prop>]] to value.
+ skeleton += item.map.find(input.get())->second;
+ }
+ }
+
+ // We implement only best fit algorithm, but still need to check
+ // if the formatMatcher values are in range.
+ // 25. Let matcher be ? GetOption(options, "formatMatcher", "string",
+ // « "basic", "best fit" », "best fit").
+ Handle<JSReceiver> options_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
+ Object::ToObject(isolate, options),
+ JSDateTimeFormat);
+ std::unique_ptr<char[]> matcher_str = nullptr;
+ std::vector<const char*> matcher_values = {"basic", "best fit"};
+ Maybe<bool> maybe_found_matcher = Intl::GetStringOption(
+ isolate, options_obj, "formatMatcher", matcher_values,
+ "Intl.DateTimeFormat", &matcher_str);
+ MAYBE_RETURN(maybe_found_matcher, Handle<JSDateTimeFormat>());
+
+ std::unique_ptr<icu::SimpleDateFormat> date_format(
+ CreateICUDateFormat(isolate, icu_locale, skeleton));
+ if (date_format.get() == nullptr) {
+ // Remove extensions and try again.
+ icu_locale = icu::Locale(icu_locale.getBaseName());
+ date_format = CreateICUDateFormat(isolate, icu_locale, skeleton);
+ if (date_format.get() == nullptr) {
+ FATAL("Failed to create ICU date format, are ICU data files missing?");
+ }
+ }
+
+ // Set the locale
+ // 12. Set dateTimeFormat.[[Locale]] to r.[[locale]].
+ icu::Locale* cloned_locale = icu_locale.clone();
+ CHECK_NOT_NULL(cloned_locale);
+ Handle<Managed<icu::Locale>> managed_locale =
+ Managed<icu::Locale>::FromRawPtr(isolate, 0, cloned_locale);
+ date_time_format->set_icu_locale(*managed_locale);
+
+ // 13. Set dateTimeFormat.[[Calendar]] to r.[[ca]].
+ std::unique_ptr<icu::Calendar> calendar(
+ CreateCalendar(isolate, icu_locale, timezone.get()));
+
+ // 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
+ // i. Throw a RangeError exception.
+ if (calendar.get() == nullptr) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ isolate->factory()->NewStringFromAsciiChecked(
+ timezone.get())),
+ JSDateTimeFormat);
+ }
+ date_format->adoptCalendar(calendar.release());
+
+ Handle<Managed<icu::SimpleDateFormat>> managed_format =
+ Managed<icu::SimpleDateFormat>::FromUniquePtr(isolate, 0,
+ std::move(date_format));
+ date_time_format->set_icu_simple_date_format(*managed_format);
+ return date_time_format;
+}
+
+namespace {
+
+// The list comes from third_party/icu/source/i18n/unicode/udat.h.
+// They're mapped to DateTimeFormat components listed at
+// https://tc39.github.io/ecma402/#sec-datetimeformat-abstracts .
+Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
+ switch (field_id) {
+ case -1:
+ return isolate->factory()->literal_string();
+ case UDAT_YEAR_FIELD:
+ case UDAT_EXTENDED_YEAR_FIELD:
+ case UDAT_YEAR_NAME_FIELD:
+ return isolate->factory()->year_string();
+ case UDAT_MONTH_FIELD:
+ case UDAT_STANDALONE_MONTH_FIELD:
+ return isolate->factory()->month_string();
+ case UDAT_DATE_FIELD:
+ return isolate->factory()->day_string();
+ case UDAT_HOUR_OF_DAY1_FIELD:
+ case UDAT_HOUR_OF_DAY0_FIELD:
+ case UDAT_HOUR1_FIELD:
+ case UDAT_HOUR0_FIELD:
+ return isolate->factory()->hour_string();
+ case UDAT_MINUTE_FIELD:
+ return isolate->factory()->minute_string();
+ case UDAT_SECOND_FIELD:
+ return isolate->factory()->second_string();
+ case UDAT_DAY_OF_WEEK_FIELD:
+ case UDAT_DOW_LOCAL_FIELD:
+ case UDAT_STANDALONE_DAY_FIELD:
+ return isolate->factory()->weekday_string();
+ case UDAT_AM_PM_FIELD:
+ return isolate->factory()->dayPeriod_string();
+ case UDAT_TIMEZONE_FIELD:
+ case UDAT_TIMEZONE_RFC_FIELD:
+ case UDAT_TIMEZONE_GENERIC_FIELD:
+ case UDAT_TIMEZONE_SPECIAL_FIELD:
+ case UDAT_TIMEZONE_LOCALIZED_GMT_OFFSET_FIELD:
+ case UDAT_TIMEZONE_ISO_FIELD:
+ case UDAT_TIMEZONE_ISO_LOCAL_FIELD:
+ return isolate->factory()->timeZoneName_string();
+ case UDAT_ERA_FIELD:
+ return isolate->factory()->era_string();
+ default:
+ // Other UDAT_*_FIELD's cannot show up because there is no way to specify
+ // them via options of Intl.DateTimeFormat.
+ UNREACHABLE();
+ // To prevent MSVC from issuing C4715 warning.
+ return Handle<String>();
+ }
+}
+
+} // namespace
+
+MaybeHandle<Object> JSDateTimeFormat::FormatToParts(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
+ double date_value) {
+ Factory* factory = isolate->factory();
+ icu::SimpleDateFormat* format =
+ date_time_format->icu_simple_date_format()->raw();
+ CHECK_NOT_NULL(format);
+
+ icu::UnicodeString formatted;
+ icu::FieldPositionIterator fp_iter;
+ icu::FieldPosition fp;
+ UErrorCode status = U_ZERO_ERROR;
+ format->format(date_value, formatted, &fp_iter, status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
+ }
+
+ Handle<JSArray> result = factory->NewJSArray(0);
+ int32_t length = formatted.length();
+ if (length == 0) return result;
+
+ int index = 0;
+ int32_t previous_end_pos = 0;
+ Handle<String> substring;
+ while (fp_iter.next(fp)) {
+ int32_t begin_pos = fp.getBeginIndex();
+ int32_t end_pos = fp.getEndIndex();
+
+ if (previous_end_pos < begin_pos) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, previous_end_pos, begin_pos),
+ Object);
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring);
+ ++index;
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, begin_pos, end_pos), Object);
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(fp.getField(), isolate),
+ substring);
+ previous_end_pos = end_pos;
+ ++index;
+ }
+ if (previous_end_pos < length) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, previous_end_pos, length), Object);
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring);
+ }
+ JSObject::ValidateElements(*result);
+ return result;
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
new file mode 100644
index 0000000000..ae2aa36a97
--- /dev/null
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
+#define V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
+
+#include "src/isolate.h"
+#include "src/objects/managed.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace U_ICU_NAMESPACE {
+class Locale;
+class SimpleDateFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class JSDateTimeFormat : public JSObject {
+ public:
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> Initialize(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
+ Handle<Object> locales, Handle<Object> options);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolvedOptions(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format);
+
+ // ecma402/#sec-unwrapdatetimeformat
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat>
+ UnwrapDateTimeFormat(Isolate* isolate, Handle<JSReceiver> format_holder);
+
+ // Convert the options to ICU DateTimePatternGenerator skeleton.
+ static Maybe<std::string> OptionsToSkeleton(Isolate* isolate,
+ Handle<JSReceiver> options);
+
+ // Return the time zone id which match ICU's expectation of title casing
+ // return empty string when error.
+ static std::string CanonicalizeTimeZoneID(Isolate* isolate,
+ const std::string& input);
+
+ // ecma402/#sec-datetime-format-functions
+ // DateTime Format Functions
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> DateTimeFormat(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
+ Handle<Object> date);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> FormatToParts(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
+ double date_value);
+
+ // ecma-402/#sec-todatetimeoptions
+ enum class RequiredOption { kDate, kTime, kAny };
+ enum class DefaultsOption { kDate, kTime, kAll };
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ToDateTimeOptions(
+ Isolate* isolate, Handle<Object> input_options, RequiredOption required,
+ DefaultsOption defaults);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime(
+ Isolate* isolate, Handle<Object> date, Handle<Object> locales,
+ Handle<Object> options, RequiredOption required, DefaultsOption defaults,
+ const char* service);
+
+ DECL_CAST(JSDateTimeFormat)
+
+// Layout description.
+#define JS_DATE_TIME_FORMAT_FIELDS(V) \
+ V(kICULocaleOffset, kPointerSize) \
+ V(kICUSimpleDateFormatOffset, kPointerSize) \
+ V(kBoundFormatOffset, kPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_DATE_TIME_FORMAT_FIELDS)
+#undef JS_DATE_TIME_FORMAT_FIELDS
+
+ DECL_ACCESSORS(icu_locale, Managed<icu::Locale>)
+ DECL_ACCESSORS(icu_simple_date_format, Managed<icu::SimpleDateFormat>)
+ DECL_ACCESSORS(bound_format, Object)
+
+ DECL_PRINTER(JSDateTimeFormat)
+ DECL_VERIFIER(JSDateTimeFormat)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDateTimeFormat);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index 4d63d524ea..043b457cf0 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_JS_GENERATOR_H_
#define V8_OBJECTS_JS_GENERATOR_H_
-#include "src/objects.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index 554b3488b6..0f1395719e 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -20,7 +20,8 @@ namespace internal {
// Base list format accessors.
ACCESSORS(JSListFormat, locale, String, kLocaleOffset)
-ACCESSORS(JSListFormat, formatter, Foreign, kFormatterOffset)
+ACCESSORS(JSListFormat, icu_formatter, Managed<icu::ListFormatter>,
+ kICUFormatterOffset)
SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset)
inline void JSListFormat::set_style(Style style) {
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 66dbe0bfd9..d2713d489f 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -119,7 +119,7 @@ JSListFormat::Type get_type(const char* str) {
UNREACHABLE();
}
-MaybeHandle<JSListFormat> JSListFormat::InitializeListFormat(
+MaybeHandle<JSListFormat> JSListFormat::Initialize(
Isolate* isolate, Handle<JSListFormat> list_format_holder,
Handle<Object> input_locales, Handle<Object> input_options) {
Factory* factory = isolate->factory();
@@ -199,7 +199,7 @@ MaybeHandle<JSListFormat> JSListFormat::InitializeListFormat(
Handle<Managed<icu::ListFormatter>> managed_formatter =
Managed<icu::ListFormatter>::FromRawPtr(isolate, 0, formatter);
- list_format_holder->set_formatter(*managed_formatter);
+ list_format_holder->set_icu_formatter(*managed_formatter);
return list_format_holder;
}
@@ -217,11 +217,6 @@ Handle<JSObject> JSListFormat::ResolvedOptions(
return result;
}
-icu::ListFormatter* JSListFormat::UnpackFormatter(Isolate* isolate,
- Handle<JSListFormat> holder) {
- return Managed<icu::ListFormatter>::cast(holder->formatter())->raw();
-}
-
Handle<String> JSListFormat::StyleAsString() const {
switch (style()) {
case Style::LONG:
@@ -352,8 +347,7 @@ Maybe<bool> FormatListCommon(Isolate* isolate,
std::unique_ptr<icu::UnicodeString[]>& array) {
DCHECK(!list->IsUndefined());
- icu::ListFormatter* formatter =
- JSListFormat::UnpackFormatter(isolate, format_holder);
+ icu::ListFormatter* formatter = format_holder->icu_formatter()->raw();
CHECK_NOT_NULL(formatter);
*length = list->GetElementsAccessor()->NumberOfElements(*list);
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 22f8d20005..e9bfec7cc8 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -12,6 +12,7 @@
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/managed.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -28,17 +29,13 @@ class JSListFormat : public JSObject {
public:
// Initializes relative time format object with properties derived from input
// locales and options.
- static MaybeHandle<JSListFormat> InitializeListFormat(
+ static MaybeHandle<JSListFormat> Initialize(
Isolate* isolate, Handle<JSListFormat> list_format_holder,
Handle<Object> locales, Handle<Object> options);
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
Handle<JSListFormat> format_holder);
- // Unpacks formatter object from corresponding JavaScript object.
- static icu::ListFormatter* UnpackFormatter(
- Isolate* isolate, Handle<JSListFormat> list_format_holder);
-
// ecma402 #sec-formatlist
V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatList(
Isolate* isolate, Handle<JSListFormat> format_holder,
@@ -56,7 +53,7 @@ class JSListFormat : public JSObject {
// ListFormat accessors.
DECL_ACCESSORS(locale, String)
- DECL_ACCESSORS(formatter, Foreign)
+ DECL_ACCESSORS(icu_formatter, Managed<icu::ListFormatter>)
// Style: identifying the relative time format style used.
//
@@ -105,8 +102,8 @@ class JSListFormat : public JSObject {
// Layout description.
static const int kJSListFormatOffset = JSObject::kHeaderSize;
static const int kLocaleOffset = kJSListFormatOffset + kPointerSize;
- static const int kFormatterOffset = kLocaleOffset + kPointerSize;
- static const int kFlagsOffset = kFormatterOffset + kPointerSize;
+ static const int kICUFormatterOffset = kLocaleOffset + kPointerSize;
+ static const int kFlagsOffset = kICUFormatterOffset + kPointerSize;
static const int kSize = kFlagsOffset + kPointerSize;
private:
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index a70bef998e..ac0a7a914f 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -28,14 +28,45 @@ ACCESSORS(JSLocale, locale, String, kLocaleOffset);
// Unicode extension accessors.
ACCESSORS(JSLocale, calendar, Object, kCalendarOffset);
-ACCESSORS(JSLocale, case_first, Object, kCaseFirstOffset);
ACCESSORS(JSLocale, collation, Object, kCollationOffset);
-ACCESSORS(JSLocale, hour_cycle, Object, kHourCycleOffset);
-ACCESSORS(JSLocale, numeric, Object, kNumericOffset);
ACCESSORS(JSLocale, numbering_system, Object, kNumberingSystemOffset);
+SMI_ACCESSORS(JSLocale, flags, kFlagsOffset)
CAST_ACCESSOR(JSLocale);
+inline void JSLocale::set_case_first(CaseFirst case_first) {
+ DCHECK_GT(CaseFirst::COUNT, case_first);
+ int hints = flags();
+ hints = CaseFirstBits::update(hints, case_first);
+ set_flags(hints);
+}
+
+inline JSLocale::CaseFirst JSLocale::case_first() const {
+ return CaseFirstBits::decode(flags());
+}
+
+inline void JSLocale::set_hour_cycle(HourCycle hour_cycle) {
+ DCHECK_GT(HourCycle::COUNT, hour_cycle);
+ int hints = flags();
+ hints = HourCycleBits::update(hints, hour_cycle);
+ set_flags(hints);
+}
+
+inline JSLocale::HourCycle JSLocale::hour_cycle() const {
+ return HourCycleBits::decode(flags());
+}
+
+inline void JSLocale::set_numeric(Numeric numeric) {
+ DCHECK_GT(Numeric::COUNT, numeric);
+ int hints = flags();
+ hints = NumericBits::update(hints, numeric);
+ set_flags(hints);
+}
+
+inline JSLocale::Numeric JSLocale::numeric() const {
+ return NumericBits::decode(flags());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 8968aa58c9..78fb30fa41 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -35,6 +35,26 @@ namespace internal {
namespace {
+JSLocale::CaseFirst GetCaseFirst(const char* str) {
+ if (strcmp(str, "upper") == 0) return JSLocale::CaseFirst::UPPER;
+ if (strcmp(str, "lower") == 0) return JSLocale::CaseFirst::LOWER;
+ if (strcmp(str, "false") == 0) return JSLocale::CaseFirst::FALSE_VALUE;
+ UNREACHABLE();
+}
+
+JSLocale::HourCycle GetHourCycle(const char* str) {
+ if (strcmp(str, "h11") == 0) return JSLocale::HourCycle::H11;
+ if (strcmp(str, "h12") == 0) return JSLocale::HourCycle::H12;
+ if (strcmp(str, "h23") == 0) return JSLocale::HourCycle::H23;
+ if (strcmp(str, "h24") == 0) return JSLocale::HourCycle::H24;
+ UNREACHABLE();
+}
+
+JSLocale::Numeric GetNumeric(const char* str) {
+ return strcmp(str, "true") == 0 ? JSLocale::Numeric::TRUE_VALUE
+ : JSLocale::Numeric::FALSE_VALUE;
+}
+
struct OptionData {
const char* name;
const char* key;
@@ -49,12 +69,12 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
CHECK(isolate);
CHECK(icu_locale);
- static std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
- "h24"};
- static std::vector<const char*> case_first_values = {"upper", "lower",
- "false"};
- static std::vector<const char*> empty_values = {};
- static const std::array<OptionData, 6> kOptionToUnicodeTagMap = {
+ const std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
+ "h24"};
+ const std::vector<const char*> case_first_values = {"upper", "lower",
+ "false"};
+ const std::vector<const char*> empty_values = {};
+ const std::array<OptionData, 6> kOptionToUnicodeTagMap = {
{{"calendar", "ca", &empty_values, false},
{"collation", "co", &empty_values, false},
{"hourCycle", "hc", &hour_cycle_values, false},
@@ -75,7 +95,7 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
: Intl::GetStringOption(isolate, options, option_to_bcp47.name,
*(option_to_bcp47.possible_values),
"locale", &value_str);
- if (maybe_found.IsNothing()) return maybe_found;
+ MAYBE_RETURN(maybe_found, Nothing<bool>());
// TODO(cira): Use fallback value if value is not found to make
// this spec compliant.
@@ -138,19 +158,23 @@ bool PopulateLocaleWithUnicodeTags(Isolate* isolate, const char* icu_locale,
if (bcp47_key) {
const char* bcp47_value = uloc_toUnicodeLocaleType(bcp47_key, value);
if (bcp47_value) {
- Handle<String> bcp47_handle =
- factory->NewStringFromAsciiChecked(bcp47_value);
if (strcmp(bcp47_key, "kn") == 0) {
- locale_holder->set_numeric(*bcp47_handle);
+ locale_holder->set_numeric(GetNumeric(bcp47_value));
} else if (strcmp(bcp47_key, "ca") == 0) {
+ Handle<String> bcp47_handle =
+ factory->NewStringFromAsciiChecked(bcp47_value);
locale_holder->set_calendar(*bcp47_handle);
} else if (strcmp(bcp47_key, "kf") == 0) {
- locale_holder->set_case_first(*bcp47_handle);
+ locale_holder->set_case_first(GetCaseFirst(bcp47_value));
} else if (strcmp(bcp47_key, "co") == 0) {
+ Handle<String> bcp47_handle =
+ factory->NewStringFromAsciiChecked(bcp47_value);
locale_holder->set_collation(*bcp47_handle);
} else if (strcmp(bcp47_key, "hc") == 0) {
- locale_holder->set_hour_cycle(*bcp47_handle);
+ locale_holder->set_hour_cycle(GetHourCycle(bcp47_value));
} else if (strcmp(bcp47_key, "nu") == 0) {
+ Handle<String> bcp47_handle =
+ factory->NewStringFromAsciiChecked(bcp47_value);
locale_holder->set_numbering_system(*bcp47_handle);
}
}
@@ -163,17 +187,17 @@ bool PopulateLocaleWithUnicodeTags(Isolate* isolate, const char* icu_locale,
}
} // namespace
-MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
- Handle<JSLocale> locale_holder,
- Handle<String> locale,
- Handle<JSReceiver> options) {
+MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
+ Handle<JSLocale> locale_holder,
+ Handle<String> locale,
+ Handle<JSReceiver> options) {
+ locale_holder->set_flags(0);
static const char* const kMethod = "Intl.Locale";
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
UErrorCode status = U_ZERO_ERROR;
// Get ICU locale format, and canonicalize it.
char icu_result[ULOC_FULLNAME_CAPACITY];
- char icu_canonical[ULOC_FULLNAME_CAPACITY];
if (locale->length() == 0) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty),
@@ -184,18 +208,20 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
CHECK_LT(0, bcp47_locale.length());
CHECK_NOT_NULL(*bcp47_locale);
- int icu_length = uloc_forLanguageTag(
- *bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY, nullptr, &status);
+ int parsed_length = 0;
+ int icu_length =
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &parsed_length, &status);
- if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING ||
- icu_length == 0) {
+ if (U_FAILURE(status) ||
+ parsed_length < static_cast<int>(bcp47_locale.length()) ||
+ status == U_STRING_NOT_TERMINATED_WARNING || icu_length == 0) {
THROW_NEW_ERROR(
isolate,
NewRangeError(MessageTemplate::kLocaleBadParameters,
isolate->factory()->NewStringFromAsciiChecked(kMethod),
locale_holder),
JSLocale);
- return MaybeHandle<JSLocale>();
}
Maybe<bool> error = InsertOptionsIntoLocale(isolate, options, icu_result);
@@ -207,40 +233,26 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
isolate->factory()->NewStringFromAsciiChecked(kMethod),
locale_holder),
JSLocale);
- return MaybeHandle<JSLocale>();
- }
- DCHECK(error.FromJust());
-
- uloc_canonicalize(icu_result, icu_canonical, ULOC_FULLNAME_CAPACITY, &status);
- if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kLocaleBadParameters,
- isolate->factory()->NewStringFromAsciiChecked(kMethod),
- locale_holder),
- JSLocale);
- return MaybeHandle<JSLocale>();
}
- if (!PopulateLocaleWithUnicodeTags(isolate, icu_canonical, locale_holder)) {
+ if (!PopulateLocaleWithUnicodeTags(isolate, icu_result, locale_holder)) {
THROW_NEW_ERROR(
isolate,
NewRangeError(MessageTemplate::kLocaleBadParameters,
isolate->factory()->NewStringFromAsciiChecked(kMethod),
locale_holder),
JSLocale);
- return MaybeHandle<JSLocale>();
}
// Extract language, script and region parts.
char icu_language[ULOC_LANG_CAPACITY];
- uloc_getLanguage(icu_canonical, icu_language, ULOC_LANG_CAPACITY, &status);
+ uloc_getLanguage(icu_result, icu_language, ULOC_LANG_CAPACITY, &status);
char icu_script[ULOC_SCRIPT_CAPACITY];
- uloc_getScript(icu_canonical, icu_script, ULOC_SCRIPT_CAPACITY, &status);
+ uloc_getScript(icu_result, icu_script, ULOC_SCRIPT_CAPACITY, &status);
char icu_region[ULOC_COUNTRY_CAPACITY];
- uloc_getCountry(icu_canonical, icu_region, ULOC_COUNTRY_CAPACITY, &status);
+ uloc_getCountry(icu_result, icu_region, ULOC_COUNTRY_CAPACITY, &status);
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
THROW_NEW_ERROR(
@@ -249,7 +261,6 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
isolate->factory()->NewStringFromAsciiChecked(kMethod),
locale_holder),
JSLocale);
- return MaybeHandle<JSLocale>();
}
Factory* factory = isolate->factory();
@@ -271,8 +282,7 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
}
char icu_base_name[ULOC_FULLNAME_CAPACITY];
- uloc_getBaseName(icu_canonical, icu_base_name, ULOC_FULLNAME_CAPACITY,
- &status);
+ uloc_getBaseName(icu_result, icu_base_name, ULOC_FULLNAME_CAPACITY, &status);
// We need to convert it back to BCP47.
char bcp47_result[ULOC_FULLNAME_CAPACITY];
uloc_toLanguageTag(icu_base_name, bcp47_result, ULOC_FULLNAME_CAPACITY, true,
@@ -284,13 +294,12 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
isolate->factory()->NewStringFromAsciiChecked(kMethod),
locale_holder),
JSLocale);
- return MaybeHandle<JSLocale>();
}
Handle<String> base_name = factory->NewStringFromAsciiChecked(bcp47_result);
locale_holder->set_base_name(*base_name);
// Produce final representation of the locale string, for toString().
- uloc_toLanguageTag(icu_canonical, bcp47_result, ULOC_FULLNAME_CAPACITY, true,
+ uloc_toLanguageTag(icu_result, bcp47_result, ULOC_FULLNAME_CAPACITY, true,
&status);
if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
THROW_NEW_ERROR(
@@ -299,7 +308,6 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
isolate->factory()->NewStringFromAsciiChecked(kMethod),
locale_holder),
JSLocale);
- return MaybeHandle<JSLocale>();
}
Handle<String> locale_handle =
factory->NewStringFromAsciiChecked(bcp47_result);
@@ -310,20 +318,37 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
namespace {
-Handle<String> MorphLocale(Isolate* isolate, String* input,
+Handle<String> MorphLocale(Isolate* isolate, String* language_tag,
int32_t (*morph_func)(const char*, char*, int32_t,
UErrorCode*)) {
Factory* factory = isolate->factory();
char localeBuffer[ULOC_FULLNAME_CAPACITY];
+ char morphBuffer[ULOC_FULLNAME_CAPACITY];
+
UErrorCode status = U_ZERO_ERROR;
+ // Convert from language id to locale.
+ int32_t parsed_length;
+ int32_t length =
+ uloc_forLanguageTag(language_tag->ToCString().get(), localeBuffer,
+ ULOC_FULLNAME_CAPACITY, &parsed_length, &status);
+ CHECK(parsed_length == language_tag->length());
+ DCHECK(U_SUCCESS(status));
+ DCHECK_GT(length, 0);
DCHECK_NOT_NULL(morph_func);
- int32_t length = (*morph_func)(input->ToCString().get(), localeBuffer,
- ULOC_FULLNAME_CAPACITY, &status);
+ // Add the likely subtags or Minimize the subtags on the locale id
+ length =
+ (*morph_func)(localeBuffer, morphBuffer, ULOC_FULLNAME_CAPACITY, &status);
+ DCHECK(U_SUCCESS(status));
+ DCHECK_GT(length, 0);
+ // Returns a well-formed language tag
+ length = uloc_toLanguageTag(morphBuffer, localeBuffer, ULOC_FULLNAME_CAPACITY,
+ false, &status);
DCHECK(U_SUCCESS(status));
DCHECK_GT(length, 0);
- std::string locale(localeBuffer, length);
- std::replace(locale.begin(), locale.end(), '_', '-');
- return factory->NewStringFromAsciiChecked(locale.c_str());
+ std::string lang(localeBuffer, length);
+ std::replace(lang.begin(), lang.end(), '_', '-');
+
+ return factory->NewStringFromAsciiChecked(lang.c_str());
}
} // namespace
@@ -336,5 +361,46 @@ Handle<String> JSLocale::Minimize(Isolate* isolate, String* locale) {
return MorphLocale(isolate, locale, uloc_minimizeSubtags);
}
+Handle<String> JSLocale::CaseFirstAsString() const {
+ switch (case_first()) {
+ case CaseFirst::UPPER:
+ return GetReadOnlyRoots().upper_string_handle();
+ case CaseFirst::LOWER:
+ return GetReadOnlyRoots().lower_string_handle();
+ case CaseFirst::FALSE_VALUE:
+ return GetReadOnlyRoots().false_string_handle();
+ case CaseFirst::COUNT:
+ UNREACHABLE();
+ }
+}
+
+Handle<String> JSLocale::HourCycleAsString() const {
+ switch (hour_cycle()) {
+ case HourCycle::H11:
+ return GetReadOnlyRoots().h11_string_handle();
+ case HourCycle::H12:
+ return GetReadOnlyRoots().h12_string_handle();
+ case HourCycle::H23:
+ return GetReadOnlyRoots().h23_string_handle();
+ case HourCycle::H24:
+ return GetReadOnlyRoots().h24_string_handle();
+ case HourCycle::COUNT:
+ UNREACHABLE();
+ }
+}
+
+Handle<String> JSLocale::NumericAsString() const {
+ switch (numeric()) {
+ case Numeric::NOTSET:
+ return GetReadOnlyRoots().undefined_string_handle();
+ case Numeric::TRUE_VALUE:
+ return GetReadOnlyRoots().true_string_handle();
+ case Numeric::FALSE_VALUE:
+ return GetReadOnlyRoots().false_string_handle();
+ case Numeric::COUNT:
+ UNREACHABLE();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index d111885d52..f42a4cdaee 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -25,13 +25,17 @@ class JSLocale : public JSObject {
public:
// Initializes locale object with properties derived from input locale string
// and options.
- static MaybeHandle<JSLocale> InitializeLocale(Isolate* isolate,
- Handle<JSLocale> locale_holder,
- Handle<String> locale,
- Handle<JSReceiver> options);
+ static MaybeHandle<JSLocale> Initialize(Isolate* isolate,
+ Handle<JSLocale> locale_holder,
+ Handle<String> locale,
+ Handle<JSReceiver> options);
static Handle<String> Maximize(Isolate* isolate, String* locale);
static Handle<String> Minimize(Isolate* isolate, String* locale);
+ Handle<String> CaseFirstAsString() const;
+ Handle<String> NumericAsString() const;
+ Handle<String> HourCycleAsString() const;
+
DECL_CAST(JSLocale)
// Locale accessors.
@@ -43,12 +47,64 @@ class JSLocale : public JSObject {
// Unicode extension accessors.
DECL_ACCESSORS(calendar, Object)
- DECL_ACCESSORS(case_first, Object)
DECL_ACCESSORS(collation, Object)
- DECL_ACCESSORS(hour_cycle, Object)
- DECL_ACCESSORS(numeric, Object)
DECL_ACCESSORS(numbering_system, Object)
+ // CaseFirst: "kf"
+ //
+ // ecma402 #sec-Intl.Locale.prototype.caseFirst
+ enum class CaseFirst {
+ UPPER, // upper case sorts before lower case
+ LOWER, // lower case sorts before upper case
+ // (compiler does not like FALSE so we have to name it FALSE_VALUE)
+ FALSE_VALUE, // Turn the feature off
+ COUNT
+ };
+ inline void set_case_first(CaseFirst case_first);
+ inline CaseFirst case_first() const;
+
+ // Numeric: 'kn"
+ //
+ // ecma402 #sec-Intl.Locale.prototype.numeric
+ enum class Numeric { NOTSET, TRUE_VALUE, FALSE_VALUE, COUNT };
+ inline void set_numeric(Numeric numeric);
+ inline Numeric numeric() const;
+
+ // CaseFirst: "hc"
+ //
+ // ecma402 #sec-Intl.Locale.prototype.hourCycle
+ enum class HourCycle {
+ H11, // 12-hour format start with hour 0 and go up to 11.
+ H12, // 12-hour format start with hour 1 and go up to 12.
+ H23, // 24-hour format start with hour 0 and go up to 23.
+ H24, // 24-hour format start with hour 1 and go up to 24.
+ COUNT
+ };
+ inline void set_hour_cycle(HourCycle hour_cycle);
+ inline HourCycle hour_cycle() const;
+
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(CaseFirstBits, CaseFirst, 2, _) \
+ V(NumericBits, Numeric, 2, _) \
+ V(HourCycleBits, HourCycle, 2, _)
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(CaseFirst::UPPER <= CaseFirstBits::kMax);
+ STATIC_ASSERT(CaseFirst::LOWER <= CaseFirstBits::kMax);
+ STATIC_ASSERT(CaseFirst::FALSE_VALUE <= CaseFirstBits::kMax);
+ STATIC_ASSERT(Numeric::NOTSET <= NumericBits::kMax);
+ STATIC_ASSERT(Numeric::FALSE_VALUE <= NumericBits::kMax);
+ STATIC_ASSERT(Numeric::TRUE_VALUE <= NumericBits::kMax);
+ STATIC_ASSERT(HourCycle::H11 <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::H12 <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::H23 <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::H24 <= HourCycleBits::kMax);
+
+ // [flags] Bit field containing various flags about the function.
+ DECL_INT_ACCESSORS(flags)
+
DECL_PRINTER(JSLocale)
DECL_VERIFIER(JSLocale)
@@ -61,12 +117,10 @@ class JSLocale : public JSObject {
static const int kBaseNameOffset = kRegionOffset + kPointerSize;
static const int kLocaleOffset = kBaseNameOffset + kPointerSize;
// Unicode extension fields.
- static const int kCalendarOffset = kLocaleOffset + kPointerSize;
- static const int kCaseFirstOffset = kCalendarOffset + kPointerSize;
- static const int kCollationOffset = kCaseFirstOffset + kPointerSize;
- static const int kHourCycleOffset = kCollationOffset + kPointerSize;
- static const int kNumericOffset = kHourCycleOffset + kPointerSize;
- static const int kNumberingSystemOffset = kNumericOffset + kPointerSize;
+ static const int kFlagsOffset = kLocaleOffset + kPointerSize;
+ static const int kCalendarOffset = kFlagsOffset + kPointerSize;
+ static const int kCollationOffset = kCalendarOffset + kPointerSize;
+ static const int kNumberingSystemOffset = kCollationOffset + kPointerSize;
// Final size.
static const int kSize = kNumberingSystemOffset + kPointerSize;
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
new file mode 100644
index 0000000000..880ef9344f
--- /dev/null
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -0,0 +1,58 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_
+#define V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-number-format.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+ACCESSORS(JSNumberFormat, locale, String, kLocaleOffset)
+ACCESSORS(JSNumberFormat, icu_number_format, Managed<icu::NumberFormat>,
+ kICUNumberFormatOffset)
+ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset)
+SMI_ACCESSORS(JSNumberFormat, flags, kFlagsOffset)
+
+inline void JSNumberFormat::set_style(Style style) {
+ DCHECK_LT(style, Style::COUNT);
+ int hints = flags();
+ hints = StyleBits::update(hints, style);
+ set_flags(hints);
+}
+
+inline JSNumberFormat::Style JSNumberFormat::style() const {
+ return StyleBits::decode(flags());
+}
+
+inline void JSNumberFormat::set_currency_display(
+ CurrencyDisplay currency_display) {
+ DCHECK_LT(currency_display, CurrencyDisplay::COUNT);
+ int hints = flags();
+ hints = CurrencyDisplayBits::update(hints, currency_display);
+ set_flags(hints);
+}
+
+inline JSNumberFormat::CurrencyDisplay JSNumberFormat::currency_display()
+ const {
+ return CurrencyDisplayBits::decode(flags());
+}
+
+CAST_ACCESSOR(JSNumberFormat);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
new file mode 100644
index 0000000000..9fe7c30a9d
--- /dev/null
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -0,0 +1,709 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-number-format.h"
+
+#include <set>
+#include <string>
+
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-number-format-inl.h"
+#include "unicode/decimfmt.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/strenum.h"
+#include "unicode/ucurr.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// ecma-402/#sec-currencydigits
+// The currency is expected to an all upper case string value.
+int CurrencyDigits(const icu::UnicodeString& currency) {
+ UErrorCode status = U_ZERO_ERROR;
+ uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
+ reinterpret_cast<const UChar*>(currency.getBuffer()), &status);
+ // For missing currency codes, default to the most common, 2
+ return U_SUCCESS(status) ? fraction_digits : 2;
+}
+
+bool IsAToZ(char ch) { return IsInRange(AsciiAlphaToLower(ch), 'a', 'z'); }
+
+// ecma402/#sec-iswellformedcurrencycode
+bool IsWellFormedCurrencyCode(const std::string& currency) {
+ // Verifies that the input is a well-formed ISO 4217 currency code.
+ // ecma402/#sec-currency-codes
+ // 2. If the number of elements in normalized is not 3, return false.
+ if (currency.length() != 3) return false;
+ // 1. Let normalized be the result of mapping currency to upper case as
+ // described in 6.1.
+ //
+ // 3. If normalized contains any character that is not in
+ // the range "A" to "Z" (U+0041 to U+005A), return false.
+ //
+ // 4. Return true.
+ // Don't uppercase to test. It could convert invalid code into a valid one.
+ // For example \u00DFP (Eszett+P) becomes SSP.
+ return (IsAToZ(currency[0]) && IsAToZ(currency[1]) && IsAToZ(currency[2]));
+}
+
+} // anonymous namespace
+
+// static
+Handle<JSObject> JSNumberFormat::ResolvedOptions(
+ Isolate* isolate, Handle<JSNumberFormat> number_format_holder) {
+ Factory* factory = isolate->factory();
+ Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->style_string(),
+ number_format_holder->StyleAsString(), kDontThrow)
+ .FromJust());
+
+ icu::NumberFormat* number_format =
+ number_format_holder->icu_number_format()->raw();
+ CHECK_NOT_NULL(number_format);
+ icu::DecimalFormat* decimal_format =
+ static_cast<icu::DecimalFormat*>(number_format);
+ CHECK_NOT_NULL(decimal_format);
+
+ Handle<String> locale =
+ Handle<String>(number_format_holder->locale(), isolate);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->locale_string(), locale, kDontThrow)
+ .FromJust());
+ UErrorCode error = U_ZERO_ERROR;
+ icu::Locale icu_locale = number_format->getLocale(ULOC_VALID_LOCALE, error);
+ DCHECK(U_SUCCESS(error));
+
+ std::string numbering_system = Intl::GetNumberingSystem(icu_locale);
+ if (!numbering_system.empty()) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->numberingSystem_string(),
+ factory->NewStringFromAsciiChecked(numbering_system.c_str()),
+ kDontThrow)
+ .FromJust());
+ }
+
+ if (number_format_holder->style() == Style::CURRENCY) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->currencyDisplay_string(),
+ number_format_holder->CurrencyDisplayAsString(), kDontThrow)
+ .FromJust());
+ icu::UnicodeString currency(number_format->getCurrency());
+ DCHECK(!currency.isEmpty());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->currency_string(),
+ factory
+ ->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(currency.getBuffer()),
+ currency.length()))
+ .ToHandleChecked(),
+ kDontThrow)
+ .FromJust());
+ }
+
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->minimumIntegerDigits_string(),
+ factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
+ kDontThrow)
+ .FromJust());
+ CHECK(
+ JSReceiver::CreateDataProperty(
+ isolate, options, factory->minimumFractionDigits_string(),
+ factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
+ kDontThrow)
+ .FromJust());
+ CHECK(
+ JSReceiver::CreateDataProperty(
+ isolate, options, factory->maximumFractionDigits_string(),
+ factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
+ kDontThrow)
+ .FromJust());
+ if (decimal_format->areSignificantDigitsUsed()) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->minimumSignificantDigits_string(),
+ factory->NewNumberFromInt(
+ decimal_format->getMinimumSignificantDigits()),
+ kDontThrow)
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->maximumSignificantDigits_string(),
+ factory->NewNumberFromInt(
+ decimal_format->getMaximumSignificantDigits()),
+ kDontThrow)
+ .FromJust());
+ }
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->useGrouping_string(),
+ factory->ToBoolean((number_format->isGroupingUsed() == TRUE)),
+ kDontThrow)
+ .FromJust());
+
+ return options;
+}
+
+// ecma402/#sec-unwrapnumberformat
+MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
+ Isolate* isolate, Handle<JSReceiver> format_holder) {
+ // old code copy from NumberFormat::Unwrap that has no spec comment and
+ // compiled but fail unit tests.
+ Handle<Context> native_context =
+ Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::cast(native_context->intl_number_format_function()), isolate);
+ Handle<Object> object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, object,
+ Intl::LegacyUnwrapReceiver(isolate, format_holder, constructor,
+ format_holder->IsJSNumberFormat()),
+ JSNumberFormat);
+ // 4. If ... or nf does not have an [[InitializedNumberFormat]] internal slot,
+ // then
+ if (!object->IsJSNumberFormat()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "UnwrapNumberFormat")),
+ JSNumberFormat);
+ }
+ // 5. Return nf.
+ return Handle<JSNumberFormat>::cast(object);
+}
+
+// static
+MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
+ Isolate* isolate, Handle<JSNumberFormat> number_format,
+ Handle<Object> locales, Handle<Object> options_obj) {
+ // set the flags to 0 ASAP.
+ number_format->set_flags(0);
+ Factory* factory = isolate->factory();
+ // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ Handle<JSObject> requested_locales;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, requested_locales,
+ Intl::CanonicalizeLocaleListJS(isolate, locales),
+ JSNumberFormat);
+
+ // 2. If options is undefined, then
+ if (options_obj->IsUndefined(isolate)) {
+ // 2. a. Let options be ObjectCreate(null).
+ options_obj = isolate->factory()->NewJSObjectWithNullProto();
+ } else {
+ // 3. Else
+ // 3. a. Let options be ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options_obj,
+ Object::ToObject(isolate, options_obj, "Intl.NumberFormat"),
+ JSNumberFormat);
+ }
+
+ // At this point, options_obj can either be a JSObject or a JSProxy only.
+ Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
+
+ // 4. Let opt be a new Record.
+ //
+ // 5. Let matcher be ? GetOption(options, "localeMatcher", "string", «
+ // "lookup", "best fit" », "best fit").
+ //
+ // 6. Set opt.[[localeMatcher]] to matcher.
+ //
+ // 7. Let localeData be %NumberFormat%.[[LocaleData]].
+ //
+ // 8. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]],
+ // requestedLocales, opt, %NumberFormat%.[[RelevantExtensionKeys]],
+ // localeData).
+ //
+ // 9. Set numberFormat.[[Locale]] to r.[[locale]].
+
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, r,
+ Intl::ResolveLocale(isolate, "numberformat", requested_locales, options),
+ JSNumberFormat);
+
+ Handle<String> locale_with_extension_str =
+ isolate->factory()->NewStringFromStaticChars("localeWithExtension");
+ Handle<Object> locale_with_extension_obj =
+ JSObject::GetDataProperty(r, locale_with_extension_str);
+
+ // The locale_with_extension has to be a string. Either a user
+ // provided canonicalized string or the default locale.
+ CHECK(locale_with_extension_obj->IsString());
+ Handle<String> locale_with_extension =
+ Handle<String>::cast(locale_with_extension_obj);
+
+ icu::Locale icu_locale =
+ Intl::CreateICULocale(isolate, locale_with_extension);
+ number_format->set_locale(*locale_with_extension);
+ DCHECK(!icu_locale.isBogus());
+
+ std::set<std::string> relevant_extension_keys{"nu"};
+ std::map<std::string, std::string> extensions =
+ Intl::LookupUnicodeExtensions(icu_locale, relevant_extension_keys);
+
+ // The list that is the value of the "nu" field of any locale field of
+ // [[LocaleData]] must not include the values "native", "traditio", or
+ // "finance".
+ //
+ // See https://tc39.github.io/ecma402/#sec-intl.numberformat-internal-slots
+ if (extensions.find("nu") != extensions.end()) {
+ const std::string value = extensions.at("nu");
+ if (value == "native" || value == "traditio" || value == "finance") {
+ // 10. Set numberFormat.[[NumberingSystem]] to r.[[nu]].
+ UErrorCode status = U_ZERO_ERROR;
+ icu_locale.setKeywordValue("nu", nullptr, status);
+ CHECK(U_SUCCESS(status));
+ }
+ }
+
+ // 11. Let dataLocale be r.[[dataLocale]].
+ //
+ // 12. Let style be ? GetOption(options, "style", "string", « "decimal",
+ // "percent", "currency" », "decimal").
+ const char* service = "Intl.NumberFormat";
+ std::unique_ptr<char[]> style_cstr;
+ const std::vector<const char*> style_values = {"decimal", "percent",
+ "currency"};
+ Maybe<bool> found_style = Intl::GetStringOption(
+ isolate, options, "style", style_values, service, &style_cstr);
+ MAYBE_RETURN(found_style, MaybeHandle<JSNumberFormat>());
+ Style style = Style::DECIMAL;
+ if (found_style.FromJust()) {
+ DCHECK_NOT_NULL(style_cstr.get());
+ if (strcmp(style_cstr.get(), "percent") == 0) {
+ style = Style::PERCENT;
+ } else if (strcmp(style_cstr.get(), "currency") == 0) {
+ style = Style::CURRENCY;
+ }
+ }
+
+ // 13. Set numberFormat.[[Style]] to style.
+ number_format->set_style(style);
+
+ // 14. Let currency be ? GetOption(options, "currency", "string", undefined,
+ // undefined).
+ std::unique_ptr<char[]> currency_cstr;
+ const std::vector<const char*> empty_values = {};
+ Maybe<bool> found_currency = Intl::GetStringOption(
+ isolate, options, "currency", empty_values, service, &currency_cstr);
+ MAYBE_RETURN(found_currency, MaybeHandle<JSNumberFormat>());
+
+ std::string currency;
+ // 15. If currency is not undefined, then
+ if (found_currency.FromJust()) {
+ DCHECK_NOT_NULL(currency_cstr.get());
+ currency = currency_cstr.get();
+ // 15. a. If the result of IsWellFormedCurrencyCode(currency) is false,
+ // throw a RangeError exception.
+ if (!IsWellFormedCurrencyCode(currency)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidCurrencyCode,
+ factory->NewStringFromAsciiChecked(currency.c_str())),
+ JSNumberFormat);
+ }
+ }
+
+ // 16. If style is "currency" and currency is undefined, throw a TypeError
+ // exception.
+ if (style == Style::CURRENCY && !found_currency.FromJust()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCurrencyCode),
+ JSNumberFormat);
+ }
+ // 17. If style is "currency", then
+ int c_digits = 0;
+ icu::UnicodeString currency_ustr;
+ if (style == Style::CURRENCY) {
+ // a. Let currency be the result of converting currency to upper case as
+ // specified in 6.1
+ std::transform(currency.begin(), currency.end(), currency.begin(), toupper);
+ // c. Let cDigits be CurrencyDigits(currency).
+ currency_ustr = currency.c_str();
+ c_digits = CurrencyDigits(currency_ustr);
+ }
+
+ // 18. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
+ // "string", « "code", "symbol", "name" », "symbol").
+ std::unique_ptr<char[]> currency_display_cstr;
+ const std::vector<const char*> currency_display_values = {"code", "name",
+ "symbol"};
+ Maybe<bool> found_currency_display = Intl::GetStringOption(
+ isolate, options, "currencyDisplay", currency_display_values, service,
+ &currency_display_cstr);
+ MAYBE_RETURN(found_currency_display, MaybeHandle<JSNumberFormat>());
+ CurrencyDisplay currency_display = CurrencyDisplay::SYMBOL;
+ UNumberFormatStyle format_style = UNUM_CURRENCY;
+
+ if (found_currency_display.FromJust()) {
+ DCHECK_NOT_NULL(currency_display_cstr.get());
+ if (strcmp(currency_display_cstr.get(), "code") == 0) {
+ currency_display = CurrencyDisplay::CODE;
+ format_style = UNUM_CURRENCY_ISO;
+ } else if (strcmp(currency_display_cstr.get(), "name") == 0) {
+ currency_display = CurrencyDisplay::NAME;
+ format_style = UNUM_CURRENCY_PLURAL;
+ }
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::NumberFormat> icu_number_format;
+ if (style == Style::DECIMAL) {
+ icu_number_format.reset(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ } else if (style == Style::PERCENT) {
+ icu_number_format.reset(
+ icu::NumberFormat::createPercentInstance(icu_locale, status));
+ } else {
+ DCHECK_EQ(style, Style::CURRENCY);
+ icu_number_format.reset(
+ icu::NumberFormat::createInstance(icu_locale, format_style, status));
+ }
+
+ if (U_FAILURE(status) || icu_number_format.get() == nullptr) {
+ status = U_ZERO_ERROR;
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ icu_number_format.reset(
+ icu::NumberFormat::createInstance(no_extension_locale, status));
+
+ if (U_FAILURE(status) || icu_number_format.get() == nullptr) {
+ FATAL("Failed to create ICU number_format, are ICU data files missing?");
+ }
+ }
+ DCHECK(U_SUCCESS(status));
+ CHECK_NOT_NULL(icu_number_format.get());
+ if (style == Style::CURRENCY) {
+ // 19. If style is "currency", set numberFormat.[[CurrencyDisplay]] to
+ // currencyDisplay.
+ number_format->set_currency_display(currency_display);
+
+ // 17.b. Set numberFormat.[[Currency]] to currency.
+ if (!currency_ustr.isEmpty()) {
+ status = U_ZERO_ERROR;
+ icu_number_format->setCurrency(currency_ustr.getBuffer(), status);
+ CHECK(U_SUCCESS(status));
+ }
+ }
+
+ // 20. If style is "currency", then
+ int mnfd_default, mxfd_default;
+ if (style == Style::CURRENCY) {
+ // a. Let mnfdDefault be cDigits.
+ // b. Let mxfdDefault be cDigits.
+ mnfd_default = c_digits;
+ mxfd_default = c_digits;
+ } else {
+ // 21. Else,
+ // a. Let mnfdDefault be 0.
+ mnfd_default = 0;
+ // b. If style is "percent", then
+ if (style == Style::PERCENT) {
+ // i. Let mxfdDefault be 0.
+ mxfd_default = 0;
+ } else {
+ // c. Else,
+ // i. Let mxfdDefault be 3.
+ mxfd_default = 3;
+ }
+ }
+ // 22. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
+ // mnfdDefault, mxfdDefault).
+ icu::DecimalFormat* icu_decimal_format =
+ static_cast<icu::DecimalFormat*>(icu_number_format.get());
+ Maybe<bool> maybe_set_number_for_digit_options =
+ Intl::SetNumberFormatDigitOptions(isolate, icu_decimal_format, options,
+ mnfd_default, mxfd_default);
+ MAYBE_RETURN(maybe_set_number_for_digit_options, Handle<JSNumberFormat>());
+
+ // 23. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
+ // undefined, true).
+ bool use_grouping = true;
+ Maybe<bool> found_use_grouping = Intl::GetBoolOption(
+ isolate, options, "useGrouping", service, &use_grouping);
+ MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
+ // 24. Set numberFormat.[[UseGrouping]] to useGrouping.
+ icu_number_format->setGroupingUsed(use_grouping ? TRUE : FALSE);
+
+ // 25. Let dataLocaleData be localeData.[[<dataLocale>]].
+ //
+ // 26. Let patterns be dataLocaleData.[[patterns]].
+ //
+ // 27. Assert: patterns is a record (see 11.3.3).
+ //
+ // 28. Let stylePatterns be patterns.[[<style>]].
+ //
+ // 29. Set numberFormat.[[PositivePattern]] to
+ // stylePatterns.[[positivePattern]].
+ //
+ // 30. Set numberFormat.[[NegativePattern]] to
+ // stylePatterns.[[negativePattern]].
+
+ Handle<Managed<icu::NumberFormat>> managed_number_format =
+ Managed<icu::NumberFormat>::FromUniquePtr(isolate, 0,
+ std::move(icu_number_format));
+ number_format->set_icu_number_format(*managed_number_format);
+ number_format->set_bound_format(*factory->undefined_value());
+
+ // 31. Return numberFormat.
+ return number_format;
+}
+
+Handle<String> JSNumberFormat::StyleAsString() const {
+ switch (style()) {
+ case Style::DECIMAL:
+ return GetReadOnlyRoots().decimal_string_handle();
+ case Style::PERCENT:
+ return GetReadOnlyRoots().percent_string_handle();
+ case Style::CURRENCY:
+ return GetReadOnlyRoots().currency_string_handle();
+ case Style::COUNT:
+ UNREACHABLE();
+ }
+}
+
+Handle<String> JSNumberFormat::CurrencyDisplayAsString() const {
+ switch (currency_display()) {
+ case CurrencyDisplay::CODE:
+ return GetReadOnlyRoots().code_string_handle();
+ case CurrencyDisplay::SYMBOL:
+ return GetReadOnlyRoots().symbol_string_handle();
+ case CurrencyDisplay::NAME:
+ return GetReadOnlyRoots().name_string_handle();
+ case CurrencyDisplay::COUNT:
+ UNREACHABLE();
+ }
+}
+
+MaybeHandle<String> JSNumberFormat::FormatNumber(
+ Isolate* isolate, Handle<JSNumberFormat> number_format_holder,
+ double number) {
+ icu::NumberFormat* number_format =
+ number_format_holder->icu_number_format()->raw();
+ CHECK_NOT_NULL(number_format);
+
+ icu::UnicodeString result;
+ number_format->format(number, result);
+
+ return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+}
+
+namespace {
+
+bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
+ const NumberFormatSpan& b) {
+ // Regions that start earlier should be encountered earlier.
+ if (a.begin_pos < b.begin_pos) return true;
+ if (a.begin_pos > b.begin_pos) return false;
+ // For regions that start in the same place, regions that last longer should
+ // be encountered earlier.
+ if (a.end_pos < b.end_pos) return false;
+ if (a.end_pos > b.end_pos) return true;
+ // For regions that are exactly the same, one of them must be the "literal"
+ // backdrop we added, which has a field_id of -1, so consider higher field_ids
+ // to be later.
+ return a.field_id < b.field_id;
+}
+
+// The list comes from third_party/icu/source/i18n/unicode/unum.h.
+// They're mapped to NumberFormat part types mentioned throughout
+// https://tc39.github.io/ecma402/#sec-partitionnumberpattern .
+Handle<String> IcuNumberFieldIdToNumberType(int32_t field_id, double number,
+ Isolate* isolate) {
+ switch (static_cast<UNumberFormatFields>(field_id)) {
+ case UNUM_INTEGER_FIELD:
+ if (std::isfinite(number)) return isolate->factory()->integer_string();
+ if (std::isnan(number)) return isolate->factory()->nan_string();
+ return isolate->factory()->infinity_string();
+ case UNUM_FRACTION_FIELD:
+ return isolate->factory()->fraction_string();
+ case UNUM_DECIMAL_SEPARATOR_FIELD:
+ return isolate->factory()->decimal_string();
+ case UNUM_GROUPING_SEPARATOR_FIELD:
+ return isolate->factory()->group_string();
+ case UNUM_CURRENCY_FIELD:
+ return isolate->factory()->currency_string();
+ case UNUM_PERCENT_FIELD:
+ return isolate->factory()->percentSign_string();
+ case UNUM_SIGN_FIELD:
+ return number < 0 ? isolate->factory()->minusSign_string()
+ : isolate->factory()->plusSign_string();
+
+ case UNUM_EXPONENT_SYMBOL_FIELD:
+ case UNUM_EXPONENT_SIGN_FIELD:
+ case UNUM_EXPONENT_FIELD:
+ // We should never get these because we're not using any scientific
+ // formatter.
+ UNREACHABLE();
+ return Handle<String>();
+
+ case UNUM_PERMILL_FIELD:
+ // We're not creating any permill formatter, and it's not even clear how
+ // that would be possible with the ICU API.
+ UNREACHABLE();
+ return Handle<String>();
+
+ default:
+ UNREACHABLE();
+ return Handle<String>();
+ }
+}
+} // namespace
+
+// Flattens a list of possibly-overlapping "regions" to a list of
+// non-overlapping "parts". At least one of the input regions must span the
+// entire space of possible indexes. The regions parameter will sorted in-place
+// according to some criteria; this is done for performance to avoid copying the
+// input.
+std::vector<NumberFormatSpan> FlattenRegionsToParts(
+ std::vector<NumberFormatSpan>* regions) {
+ // The intention of this algorithm is that it's used to translate ICU "fields"
+ // to JavaScript "parts" of a formatted string. Each ICU field and JavaScript
+ // part has an integer field_id, which corresponds to something like "grouping
+ // separator", "fraction", or "percent sign", and has a begin and end
+ // position. Here's a diagram of:
+
+ // var nf = new Intl.NumberFormat(['de'], {style:'currency',currency:'EUR'});
+ // nf.formatToParts(123456.78);
+
+ // : 6
+ // input regions: 0000000211 7
+ // ('-' means -1): ------------
+ // formatted string: "123.456,78 €"
+ // output parts: 0006000211-7
+
+ // To illustrate the requirements of this algorithm, here's a contrived and
+ // convoluted example of inputs and expected outputs:
+
+ // : 4
+ // : 22 33 3
+ // : 11111 22
+ // input regions: 0000000 111
+ // : ------------
+ // formatted string: "abcdefghijkl"
+ // output parts: 0221340--231
+ // (The characters in the formatted string are irrelevant to this function.)
+
+ // We arrange the overlapping input regions like a mountain range where
+ // smaller regions are "on top" of larger regions, and we output a birds-eye
+ // view of the mountains, so that smaller regions take priority over larger
+ // regions.
+ std::sort(regions->begin(), regions->end(), cmp_NumberFormatSpan);
+ std::vector<size_t> overlapping_region_index_stack;
+ // At least one item in regions must be a region spanning the entire string.
+ // Due to the sorting above, the first item in the vector will be one of them.
+ overlapping_region_index_stack.push_back(0);
+ NumberFormatSpan top_region = regions->at(0);
+ size_t region_iterator = 1;
+ int32_t entire_size = top_region.end_pos;
+
+ std::vector<NumberFormatSpan> out_parts;
+
+ // The "climber" is a cursor that advances from left to right climbing "up"
+ // and "down" the mountains. Whenever the climber moves to the right, that
+ // represents an item of output.
+ int32_t climber = 0;
+ while (climber < entire_size) {
+ int32_t next_region_begin_pos;
+ if (region_iterator < regions->size()) {
+ next_region_begin_pos = regions->at(region_iterator).begin_pos;
+ } else {
+ // finish off the rest of the input by proceeding to the end.
+ next_region_begin_pos = entire_size;
+ }
+
+ if (climber < next_region_begin_pos) {
+ while (top_region.end_pos < next_region_begin_pos) {
+ if (climber < top_region.end_pos) {
+ // step down
+ out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
+ top_region.end_pos));
+ climber = top_region.end_pos;
+ } else {
+ // drop down
+ }
+ overlapping_region_index_stack.pop_back();
+ top_region = regions->at(overlapping_region_index_stack.back());
+ }
+ if (climber < next_region_begin_pos) {
+ // cross a plateau/mesa/valley
+ out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
+ next_region_begin_pos));
+ climber = next_region_begin_pos;
+ }
+ }
+ if (region_iterator < regions->size()) {
+ overlapping_region_index_stack.push_back(region_iterator++);
+ top_region = regions->at(overlapping_region_index_stack.back());
+ }
+ }
+ return out_parts;
+}
+
+MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
+ Isolate* isolate, Handle<JSNumberFormat> number_format, double number) {
+ Factory* factory = isolate->factory();
+ icu::NumberFormat* fmt = number_format->icu_number_format()->raw();
+ CHECK_NOT_NULL(fmt);
+
+ icu::UnicodeString formatted;
+ icu::FieldPositionIterator fp_iter;
+ UErrorCode status = U_ZERO_ERROR;
+ fmt->format(number, formatted, &fp_iter, status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), JSArray);
+ }
+
+ Handle<JSArray> result = factory->NewJSArray(0);
+ int32_t length = formatted.length();
+ if (length == 0) return result;
+
+ std::vector<NumberFormatSpan> regions;
+ // Add a "literal" backdrop for the entire string. This will be used if no
+ // other region covers some part of the formatted string. It's possible
+ // there's another field with exactly the same begin and end as this backdrop,
+ // in which case the backdrop's field_id of -1 will give it lower priority.
+ regions.push_back(NumberFormatSpan(-1, 0, formatted.length()));
+
+ {
+ icu::FieldPosition fp;
+ while (fp_iter.next(fp)) {
+ regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(),
+ fp.getEndIndex()));
+ }
+ }
+
+ std::vector<NumberFormatSpan> parts = FlattenRegionsToParts(&regions);
+
+ int index = 0;
+ for (auto it = parts.begin(); it < parts.end(); it++) {
+ NumberFormatSpan part = *it;
+ Handle<String> field_type_string =
+ part.field_id == -1
+ ? isolate->factory()->literal_string()
+ : IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
+ Handle<String> substring;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos),
+ JSArray);
+ Intl::AddElement(isolate, result, index, field_type_string, substring);
+ ++index;
+ }
+ JSObject::ValidateElements(*result);
+
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
new file mode 100644
index 0000000000..52443dc3d3
--- /dev/null
+++ b/deps/v8/src/objects/js-number-format.h
@@ -0,0 +1,135 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_NUMBER_FORMAT_H_
+#define V8_OBJECTS_JS_NUMBER_FORMAT_H_
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/managed.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace U_ICU_NAMESPACE {
+class NumberFormat;
+} // namespace U_ICU_NAMESPACE
+
+namespace v8 {
+namespace internal {
+
+class JSNumberFormat : public JSObject {
+ public:
+ // ecma402/#sec-initializenumberformat
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> Initialize(
+ Isolate* isolate, Handle<JSNumberFormat> number_format,
+ Handle<Object> locales, Handle<Object> options);
+
+ // ecma402/#sec-unwrapnumberformat
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> UnwrapNumberFormat(
+ Isolate* isolate, Handle<JSReceiver> format_holder);
+
+ // ecma402/#sec-intl.numberformat.prototype.resolvedoptions
+ static Handle<JSObject> ResolvedOptions(Isolate* isolate,
+ Handle<JSNumberFormat> number_format);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> FormatToParts(
+ Isolate* isolate, Handle<JSNumberFormat> number_format, double number);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumber(
+ Isolate* isolate, Handle<JSNumberFormat> number_format, double number);
+
+ Handle<String> StyleAsString() const;
+ Handle<String> CurrencyDisplayAsString() const;
+
+ DECL_CAST(JSNumberFormat)
+ DECL_PRINTER(JSNumberFormat)
+ DECL_VERIFIER(JSNumberFormat)
+
+ // [[Style]] is one of the values "decimal", "percent" or "currency",
+ // identifying the style of the number format.
+ enum class Style {
+ DECIMAL,
+ PERCENT,
+ CURRENCY,
+
+ COUNT
+ };
+ inline void set_style(Style style);
+ inline Style style() const;
+
+ // [[CurrencyDisplay]] is one of the values "code", "symbol" or "name",
+ // identifying the display of the currency number format.
+ enum class CurrencyDisplay {
+ CODE,
+ SYMBOL,
+ NAME,
+
+ COUNT
+ };
+ inline void set_currency_display(CurrencyDisplay currency_display);
+ inline CurrencyDisplay currency_display() const;
+
+// Layout description.
+#define JS_NUMBER_FORMAT_FIELDS(V) \
+ V(kLocaleOffset, kPointerSize) \
+ V(kICUNumberFormatOffset, kPointerSize) \
+ V(kBoundFormatOffset, kPointerSize) \
+ V(kFlagsOffset, kPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_NUMBER_FORMAT_FIELDS)
+#undef JS_NUMBER_FORMAT_FIELDS
+
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(StyleBits, Style, 2, _) \
+ V(CurrencyDisplayBits, CurrencyDisplay, 2, _)
+
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax);
+ STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax);
+ STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax);
+
+ STATIC_ASSERT(CurrencyDisplay::CODE <= CurrencyDisplayBits::kMax);
+ STATIC_ASSERT(CurrencyDisplay::SYMBOL <= CurrencyDisplayBits::kMax);
+ STATIC_ASSERT(CurrencyDisplay::NAME <= CurrencyDisplayBits::kMax);
+
+ DECL_ACCESSORS(locale, String)
+ DECL_ACCESSORS(icu_number_format, Managed<icu::NumberFormat>)
+ DECL_ACCESSORS(bound_format, Object)
+ DECL_INT_ACCESSORS(flags)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSNumberFormat);
+};
+
+struct NumberFormatSpan {
+ int32_t field_id;
+ int32_t begin_pos;
+ int32_t end_pos;
+
+ NumberFormatSpan() = default;
+ NumberFormatSpan(int32_t field_id, int32_t begin_pos, int32_t end_pos)
+ : field_id(field_id), begin_pos(begin_pos), end_pos(end_pos) {}
+};
+
+std::vector<NumberFormatSpan> FlattenRegionsToParts(
+ std::vector<NumberFormatSpan>* regions);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_NUMBER_FORMAT_H_
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
new file mode 100644
index 0000000000..53483136d8
--- /dev/null
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -0,0 +1,904 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_OBJECTS_INL_H_
+#define V8_OBJECTS_JS_OBJECTS_INL_H_
+
+#include "src/objects/js-objects.h"
+
+#include "src/feedback-vector.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/keys.h"
+#include "src/lookup-inl.h"
+#include "src/objects/property-array-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/prototype.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(JSAsyncFromSyncIterator)
+CAST_ACCESSOR(JSBoundFunction)
+CAST_ACCESSOR(JSDataView)
+CAST_ACCESSOR(JSDate)
+CAST_ACCESSOR(JSFunction)
+CAST_ACCESSOR(JSGlobalObject)
+CAST_ACCESSOR(JSGlobalProxy)
+CAST_ACCESSOR(JSMessageObject)
+CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(JSReceiver)
+CAST_ACCESSOR(JSStringIterator)
+CAST_ACCESSOR(JSValue)
+
+MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name) {
+ LookupIterator it(isolate, receiver, name, receiver);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return Object::GetProperty(&it);
+}
+
+MaybeHandle<Object> JSReceiver::GetElement(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ uint32_t index) {
+ LookupIterator it(isolate, receiver, index, receiver);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return Object::GetProperty(&it);
+}
+
+Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ LookupIterator it(object, name, object,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return GetDataProperty(&it);
+}
+
+MaybeHandle<Object> JSReceiver::GetPrototype(Isolate* isolate,
+ Handle<JSReceiver> receiver) {
+ // We don't expect access checks to be needed on JSProxy objects.
+ DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
+ PrototypeIterator iter(isolate, receiver, kStartAtReceiver,
+ PrototypeIterator::END_AT_NON_HIDDEN);
+ do {
+ if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
+ } while (!iter.IsAtEnd());
+ return PrototypeIterator::GetCurrent(iter);
+}
+
+MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ const char* name) {
+ Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
+ return GetProperty(isolate, receiver, str);
+}
+
+// static
+V8_WARN_UNUSED_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
+ Handle<JSReceiver> object) {
+ return KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES,
+ GetKeysConversion::kConvertToString);
+}
+
+bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
+ DisallowHeapAllocation no_gc;
+ HeapObject* prototype = HeapObject::cast(object->map()->prototype());
+ ReadOnlyRoots roots(isolate);
+ HeapObject* null = roots.null_value();
+ HeapObject* empty_fixed_array = roots.empty_fixed_array();
+ HeapObject* empty_slow_element_dictionary =
+ roots.empty_slow_element_dictionary();
+ while (prototype != null) {
+ Map* map = prototype->map();
+ if (map->IsCustomElementsReceiverMap()) return false;
+ HeapObject* elements = JSObject::cast(prototype)->elements();
+ if (elements != empty_fixed_array &&
+ elements != empty_slow_element_dictionary) {
+ return false;
+ }
+ prototype = HeapObject::cast(map->prototype());
+ }
+ return true;
+}
+
+ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
+
+FixedArrayBase* JSObject::elements() const {
+ Object* array = READ_FIELD(this, kElementsOffset);
+ return static_cast<FixedArrayBase*>(array);
+}
+
+void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
+ JSObject::ValidateElements(*object);
+ ElementsKind elements_kind = object->map()->elements_kind();
+ if (!IsObjectElementsKind(elements_kind)) {
+ if (IsHoleyElementsKind(elements_kind)) {
+ TransitionElementsKind(object, HOLEY_ELEMENTS);
+ } else {
+ TransitionElementsKind(object, PACKED_ELEMENTS);
+ }
+ }
+}
+
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Object** objects, uint32_t count,
+ EnsureElementsMode mode) {
+ ElementsKind current_kind = object->GetElementsKind();
+ ElementsKind target_kind = current_kind;
+ {
+ DisallowHeapAllocation no_allocation;
+ DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+ bool is_holey = IsHoleyElementsKind(current_kind);
+ if (current_kind == HOLEY_ELEMENTS) return;
+ Object* the_hole = object->GetReadOnlyRoots().the_hole_value();
+ for (uint32_t i = 0; i < count; ++i) {
+ Object* current = *objects++;
+ if (current == the_hole) {
+ is_holey = true;
+ target_kind = GetHoleyElementsKind(target_kind);
+ } else if (!current->IsSmi()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ if (IsSmiElementsKind(target_kind)) {
+ if (is_holey) {
+ target_kind = HOLEY_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = PACKED_DOUBLE_ELEMENTS;
+ }
+ }
+ } else if (is_holey) {
+ target_kind = HOLEY_ELEMENTS;
+ break;
+ } else {
+ target_kind = PACKED_ELEMENTS;
+ }
+ }
+ }
+ }
+ if (target_kind != current_kind) {
+ TransitionElementsKind(object, target_kind);
+ }
+}
+
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
+ uint32_t length,
+ EnsureElementsMode mode) {
+ ReadOnlyRoots roots = object->GetReadOnlyRoots();
+ if (elements->map() != roots.fixed_double_array_map()) {
+ DCHECK(elements->map() == roots.fixed_array_map() ||
+ elements->map() == roots.fixed_cow_array_map());
+ if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
+ mode = DONT_ALLOW_DOUBLE_ELEMENTS;
+ }
+ Object** objects =
+ Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
+ EnsureCanContainElements(object, objects, length, mode);
+ return;
+ }
+
+ DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
+ if (object->GetElementsKind() == HOLEY_SMI_ELEMENTS) {
+ TransitionElementsKind(object, HOLEY_DOUBLE_ELEMENTS);
+ } else if (object->GetElementsKind() == PACKED_SMI_ELEMENTS) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (uint32_t i = 0; i < length; ++i) {
+ if (double_array->is_the_hole(i)) {
+ TransitionElementsKind(object, HOLEY_DOUBLE_ELEMENTS);
+ return;
+ }
+ }
+ TransitionElementsKind(object, PACKED_DOUBLE_ELEMENTS);
+ }
+}
+
+void JSObject::SetMapAndElements(Handle<JSObject> object, Handle<Map> new_map,
+ Handle<FixedArrayBase> value) {
+ JSObject::MigrateToMap(object, new_map);
+ DCHECK((object->map()->has_fast_smi_or_object_elements() ||
+ (*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
+ object->map()->has_fast_string_wrapper_elements()) ==
+ (value->map() == object->GetReadOnlyRoots().fixed_array_map() ||
+ value->map() == object->GetReadOnlyRoots().fixed_cow_array_map()));
+ DCHECK((*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
+ (object->map()->has_fast_double_elements() ==
+ value->IsFixedDoubleArray()));
+ object->set_elements(*value);
+}
+
+void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kElementsOffset, value);
+ CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, value, mode);
+}
+
+void JSObject::initialize_elements() {
+ FixedArrayBase* elements = map()->GetInitialElements();
+ WRITE_FIELD(this, kElementsOffset, elements);
+}
+
+InterceptorInfo* JSObject::GetIndexedInterceptor() {
+ return map()->GetIndexedInterceptor();
+}
+
+InterceptorInfo* JSObject::GetNamedInterceptor() {
+ return map()->GetNamedInterceptor();
+}
+
+int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
+
+int JSObject::GetHeaderSize(const Map* map) {
+ // Check for the most common kind of JavaScript object before
+ // falling into the generic switch. This speeds up the internal
+ // field operations considerably on average.
+ InstanceType instance_type = map->instance_type();
+ return instance_type == JS_OBJECT_TYPE
+ ? JSObject::kHeaderSize
+ : GetHeaderSize(instance_type, map->has_prototype_slot());
+}
+
+// static
+int JSObject::GetEmbedderFieldCount(const Map* map) {
+ int instance_size = map->instance_size();
+ if (instance_size == kVariableSizeSentinel) return 0;
+ return ((instance_size - GetHeaderSize(map)) >> kPointerSizeLog2) -
+ map->GetInObjectProperties();
+}
+
+int JSObject::GetEmbedderFieldCount() const {
+ return GetEmbedderFieldCount(map());
+}
+
+int JSObject::GetEmbedderFieldOffset(int index) {
+ DCHECK(index < GetEmbedderFieldCount() && index >= 0);
+ return GetHeaderSize() + (kPointerSize * index);
+}
+
+Object* JSObject::GetEmbedderField(int index) {
+ DCHECK(index < GetEmbedderFieldCount() && index >= 0);
+ // Internal objects do follow immediately after the header, whereas in-object
+ // properties are at the end of the object. Therefore there is no need
+ // to adjust the index here.
+ return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
+}
+
+void JSObject::SetEmbedderField(int index, Object* value) {
+ DCHECK(index < GetEmbedderFieldCount() && index >= 0);
+ // Internal objects do follow immediately after the header, whereas in-object
+ // properties are at the end of the object. Therefore there is no need
+ // to adjust the index here.
+ int offset = GetHeaderSize() + (kPointerSize * index);
+ WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(this, offset, value);
+}
+
+void JSObject::SetEmbedderField(int index, Smi* value) {
+ DCHECK(index < GetEmbedderFieldCount() && index >= 0);
+ // Internal objects do follow immediately after the header, whereas in-object
+ // properties are at the end of the object. Therefore there is no need
+ // to adjust the index here.
+ int offset = GetHeaderSize() + (kPointerSize * index);
+ WRITE_FIELD(this, offset, value);
+}
+
+bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
+ if (!FLAG_unbox_double_fields) return false;
+ return map()->IsUnboxedDoubleField(index);
+}
+
+// Access fast-case object properties at index. The use of these routines
+// is needed to correctly distinguish between properties stored in-object and
+// properties stored in the properties array.
+Object* JSObject::RawFastPropertyAt(FieldIndex index) {
+ DCHECK(!IsUnboxedDoubleField(index));
+ if (index.is_inobject()) {
+ return READ_FIELD(this, index.offset());
+ } else {
+ return property_array()->get(index.outobject_array_index());
+ }
+}
+
+double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
+ DCHECK(IsUnboxedDoubleField(index));
+ return READ_DOUBLE_FIELD(this, index.offset());
+}
+
+uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
+ DCHECK(IsUnboxedDoubleField(index));
+ return READ_UINT64_FIELD(this, index.offset());
+}
+
+void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
+ if (index.is_inobject()) {
+ int offset = index.offset();
+ WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(this, offset, value);
+ } else {
+ property_array()->set(index.outobject_array_index(), value);
+ }
+}
+
+void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
+ uint64_t bits) {
+ // Double unboxing is enabled only on 64-bit platforms.
+ DCHECK_EQ(kDoubleSize, kPointerSize);
+ Address field_addr = FIELD_ADDR(this, index.offset());
+ base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(field_addr),
+ static_cast<base::AtomicWord>(bits));
+}
+
+void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
+ if (IsUnboxedDoubleField(index)) {
+ DCHECK(value->IsMutableHeapNumber());
+ // Ensure that all bits of the double value are preserved.
+ RawFastDoublePropertyAsBitsAtPut(
+ index, MutableHeapNumber::cast(value)->value_as_bits());
+ } else {
+ RawFastPropertyAtPut(index, value);
+ }
+}
+
+void JSObject::WriteToField(int descriptor, PropertyDetails details,
+ Object* value) {
+ DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(kData, details.kind());
+ DisallowHeapAllocation no_gc;
+ FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
+ if (details.representation().IsDouble()) {
+ // Nothing more to be done.
+ if (value->IsUninitialized()) {
+ return;
+ }
+ // Manipulating the signaling NaN used for the hole and uninitialized
+ // double field sentinel in C++, e.g. with bit_cast or value()/set_value(),
+ // will change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ uint64_t bits;
+ if (value->IsSmi()) {
+ bits = bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
+ } else {
+ DCHECK(value->IsHeapNumber());
+ bits = HeapNumber::cast(value)->value_as_bits();
+ }
+ if (IsUnboxedDoubleField(index)) {
+ RawFastDoublePropertyAsBitsAtPut(index, bits);
+ } else {
+ auto box = MutableHeapNumber::cast(RawFastPropertyAt(index));
+ box->set_value_as_bits(bits);
+ }
+ } else {
+ RawFastPropertyAtPut(index, value);
+ }
+}
+
+int JSObject::GetInObjectPropertyOffset(int index) {
+ return map()->GetInObjectPropertyOffset(index);
+}
+
+Object* JSObject::InObjectPropertyAt(int index) {
+ int offset = GetInObjectPropertyOffset(index);
+ return READ_FIELD(this, offset);
+}
+
+Object* JSObject::InObjectPropertyAtPut(int index, Object* value,
+ WriteBarrierMode mode) {
+ // Adjust for the number of properties stored in the object.
+ int offset = GetInObjectPropertyOffset(index);
+ WRITE_FIELD(this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
+ return value;
+}
+
+void JSObject::InitializeBody(Map* map, int start_offset,
+ Object* pre_allocated_value,
+ Object* filler_value) {
+ DCHECK(!filler_value->IsHeapObject() || !Heap::InNewSpace(filler_value));
+ DCHECK(!pre_allocated_value->IsHeapObject() ||
+ !Heap::InNewSpace(pre_allocated_value));
+ int size = map->instance_size();
+ int offset = start_offset;
+ if (filler_value != pre_allocated_value) {
+ int end_of_pre_allocated_offset =
+ size - (map->UnusedPropertyFields() * kPointerSize);
+ DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
+ while (offset < end_of_pre_allocated_offset) {
+ WRITE_FIELD(this, offset, pre_allocated_value);
+ offset += kPointerSize;
+ }
+ }
+ while (offset < size) {
+ WRITE_FIELD(this, offset, filler_value);
+ offset += kPointerSize;
+ }
+}
+
+Object* JSBoundFunction::raw_bound_target_function() const {
+ return READ_FIELD(this, kBoundTargetFunctionOffset);
+}
+
+ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
+ kBoundTargetFunctionOffset)
+ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
+ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
+
+ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
+ACCESSORS(JSFunction, feedback_cell, FeedbackCell, kFeedbackCellOffset)
+
+ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
+ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
+
+ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
+
+FeedbackVector* JSFunction::feedback_vector() const {
+ DCHECK(has_feedback_vector());
+ return FeedbackVector::cast(feedback_cell()->value());
+}
+
+// Code objects that are marked for deoptimization are not considered to be
+// optimized. This is because the JSFunction might have been already
+// deoptimized but its code() still needs to be unlinked, which will happen on
+// its next activation.
+// TODO(jupvfranco): rename this function. Maybe RunOptimizedCode,
+// or IsValidOptimizedCode.
+bool JSFunction::IsOptimized() {
+ return code()->kind() == Code::OPTIMIZED_FUNCTION &&
+ !code()->marked_for_deoptimization();
+}
+
+bool JSFunction::HasOptimizedCode() {
+ return IsOptimized() ||
+ (has_feedback_vector() && feedback_vector()->has_optimized_code() &&
+ !feedback_vector()->optimized_code()->marked_for_deoptimization());
+}
+
+bool JSFunction::HasOptimizationMarker() {
+ return has_feedback_vector() && feedback_vector()->has_optimization_marker();
+}
+
+void JSFunction::ClearOptimizationMarker() {
+ DCHECK(has_feedback_vector());
+ feedback_vector()->ClearOptimizationMarker();
+}
+
+// Optimized code marked for deoptimization will tier back down to running
+// interpreted on its next activation, and already doesn't count as IsOptimized.
+bool JSFunction::IsInterpreted() {
+ return code()->is_interpreter_trampoline_builtin() ||
+ (code()->kind() == Code::OPTIMIZED_FUNCTION &&
+ code()->marked_for_deoptimization());
+}
+
+bool JSFunction::ChecksOptimizationMarker() {
+ return code()->checks_optimization_marker();
+}
+
+bool JSFunction::IsMarkedForOptimization() {
+ return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ OptimizationMarker::kCompileOptimized;
+}
+
+bool JSFunction::IsMarkedForConcurrentOptimization() {
+ return has_feedback_vector() &&
+ feedback_vector()->optimization_marker() ==
+ OptimizationMarker::kCompileOptimizedConcurrent;
+}
+
+bool JSFunction::IsInOptimizationQueue() {
+ return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ OptimizationMarker::kInOptimizationQueue;
+}
+
+void JSFunction::CompleteInobjectSlackTrackingIfActive() {
+ if (!has_prototype_slot()) return;
+ if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
+ initial_map()->CompleteInobjectSlackTracking(GetIsolate());
+ }
+}
+
+AbstractCode* JSFunction::abstract_code() {
+ if (IsInterpreted()) {
+ return AbstractCode::cast(shared()->GetBytecodeArray());
+ } else {
+ return AbstractCode::cast(code());
+ }
+}
+
+Code* JSFunction::code() { return Code::cast(READ_FIELD(this, kCodeOffset)); }
+
+void JSFunction::set_code(Code* value) {
+ DCHECK(!Heap::InNewSpace(value));
+ WRITE_FIELD(this, kCodeOffset, value);
+ MarkingBarrier(this, HeapObject::RawField(this, kCodeOffset), value);
+}
+
+void JSFunction::set_code_no_write_barrier(Code* value) {
+ DCHECK(!Heap::InNewSpace(value));
+ WRITE_FIELD(this, kCodeOffset, value);
+}
+
+void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
+ if (has_feedback_vector() && feedback_vector()->has_optimized_code()) {
+ if (FLAG_trace_opt) {
+ PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
+ reason);
+ ShortPrint();
+ PrintF("]\n");
+ }
+ feedback_vector()->ClearOptimizedCode();
+ }
+}
+
+void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
+ DCHECK(has_feedback_vector());
+ DCHECK(ChecksOptimizationMarker());
+ DCHECK(!HasOptimizedCode());
+
+ feedback_vector()->SetOptimizationMarker(marker);
+}
+
+bool JSFunction::has_feedback_vector() const {
+ return !feedback_cell()->value()->IsUndefined();
+}
+
+Context* JSFunction::context() {
+ return Context::cast(READ_FIELD(this, kContextOffset));
+}
+
+bool JSFunction::has_context() const {
+ return READ_FIELD(this, kContextOffset)->IsContext();
+}
+
+JSGlobalProxy* JSFunction::global_proxy() { return context()->global_proxy(); }
+
+Context* JSFunction::native_context() { return context()->native_context(); }
+
+void JSFunction::set_context(Object* value) {
+ DCHECK(value->IsUndefined() || value->IsContext());
+ WRITE_FIELD(this, kContextOffset, value);
+ WRITE_BARRIER(this, kContextOffset, value);
+}
+
+ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
+ kPrototypeOrInitialMapOffset, map()->has_prototype_slot())
+
+bool JSFunction::has_prototype_slot() const {
+ return map()->has_prototype_slot();
+}
+
+Map* JSFunction::initial_map() { return Map::cast(prototype_or_initial_map()); }
+
+bool JSFunction::has_initial_map() {
+ DCHECK(has_prototype_slot());
+ return prototype_or_initial_map()->IsMap();
+}
+
+bool JSFunction::has_instance_prototype() {
+ DCHECK(has_prototype_slot());
+ return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
+}
+
+bool JSFunction::has_prototype() {
+ DCHECK(has_prototype_slot());
+ return map()->has_non_instance_prototype() || has_instance_prototype();
+}
+
+bool JSFunction::has_prototype_property() {
+ return (has_prototype_slot() && IsConstructor()) ||
+ IsGeneratorFunction(shared()->kind());
+}
+
+bool JSFunction::PrototypeRequiresRuntimeLookup() {
+ return !has_prototype_property() || map()->has_non_instance_prototype();
+}
+
+Object* JSFunction::instance_prototype() {
+ DCHECK(has_instance_prototype());
+ if (has_initial_map()) return initial_map()->prototype();
+ // When there is no initial map and the prototype is a JSReceiver, the
+ // initial map field is used for the prototype field.
+ return prototype_or_initial_map();
+}
+
+Object* JSFunction::prototype() {
+ DCHECK(has_prototype());
+ // If the function's prototype property has been set to a non-JSReceiver
+ // value, that value is stored in the constructor field of the map.
+ if (map()->has_non_instance_prototype()) {
+ Object* prototype = map()->GetConstructor();
+ // The map must have a prototype in that field, not a back pointer.
+ DCHECK(!prototype->IsMap());
+ DCHECK(!prototype->IsFunctionTemplateInfo());
+ return prototype;
+ }
+ return instance_prototype();
+}
+
+bool JSFunction::is_compiled() {
+ return code()->builtin_index() != Builtins::kCompileLazy;
+}
+
+ACCESSORS(JSValue, value, Object, kValueOffset)
+
+ACCESSORS(JSDate, value, Object, kValueOffset)
+ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
+ACCESSORS(JSDate, year, Object, kYearOffset)
+ACCESSORS(JSDate, month, Object, kMonthOffset)
+ACCESSORS(JSDate, day, Object, kDayOffset)
+ACCESSORS(JSDate, weekday, Object, kWeekdayOffset)
+ACCESSORS(JSDate, hour, Object, kHourOffset)
+ACCESSORS(JSDate, min, Object, kMinOffset)
+ACCESSORS(JSDate, sec, Object, kSecOffset)
+
+SMI_ACCESSORS(JSMessageObject, type, kTypeOffset)
+ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
+ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
+ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
+SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
+SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
+SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
+
+ElementsKind JSObject::GetElementsKind() const {
+ ElementsKind kind = map()->elements_kind();
+#if VERIFY_HEAP && DEBUG
+ FixedArrayBase* fixed_array =
+ reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
+
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
+ Map* map = fixed_array->map();
+ if (IsSmiOrObjectElementsKind(kind)) {
+ DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
+ map == GetReadOnlyRoots().fixed_cow_array_map());
+ } else if (IsDoubleElementsKind(kind)) {
+ DCHECK(fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetReadOnlyRoots().empty_fixed_array());
+ } else if (kind == DICTIONARY_ELEMENTS) {
+ DCHECK(fixed_array->IsFixedArray());
+ DCHECK(fixed_array->IsDictionary());
+ } else {
+ DCHECK(kind > DICTIONARY_ELEMENTS);
+ }
+ DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
+ (elements()->IsFixedArray() && elements()->length() >= 2));
+ }
+#endif
+ return kind;
+}
+
+bool JSObject::HasObjectElements() {
+ return IsObjectElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasSmiElements() { return IsSmiElementsKind(GetElementsKind()); }
+
+bool JSObject::HasSmiOrObjectElements() {
+ return IsSmiOrObjectElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasDoubleElements() {
+ return IsDoubleElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasHoleyElements() {
+ return IsHoleyElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasFastElements() {
+ return IsFastElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasFastPackedElements() {
+ return IsFastPackedElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasDictionaryElements() {
+ return GetElementsKind() == DICTIONARY_ELEMENTS;
+}
+
+bool JSObject::HasFastArgumentsElements() {
+ return GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+bool JSObject::HasSlowArgumentsElements() {
+ return GetElementsKind() == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+bool JSObject::HasSloppyArgumentsElements() {
+ return IsSloppyArgumentsElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasStringWrapperElements() {
+ return IsStringWrapperElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasFastStringWrapperElements() {
+ return GetElementsKind() == FAST_STRING_WRAPPER_ELEMENTS;
+}
+
+bool JSObject::HasSlowStringWrapperElements() {
+ return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
+}
+
+bool JSObject::HasFixedTypedArrayElements() {
+ DCHECK_NOT_NULL(elements());
+ return map()->has_fixed_typed_array_elements();
+}
+
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
+ bool JSObject::HasFixed##Type##Elements() { \
+ HeapObject* array = elements(); \
+ DCHECK_NOT_NULL(array); \
+ if (!array->IsHeapObject()) return false; \
+ return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+ }
+
+TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
+
+#undef FIXED_TYPED_ELEMENTS_CHECK
+
+bool JSObject::HasNamedInterceptor() { return map()->has_named_interceptor(); }
+
+bool JSObject::HasIndexedInterceptor() {
+ return map()->has_indexed_interceptor();
+}
+
+void JSGlobalObject::set_global_dictionary(GlobalDictionary* dictionary) {
+ DCHECK(IsJSGlobalObject());
+ set_raw_properties_or_hash(dictionary);
+}
+
+GlobalDictionary* JSGlobalObject::global_dictionary() {
+ DCHECK(!HasFastProperties());
+ DCHECK(IsJSGlobalObject());
+ return GlobalDictionary::cast(raw_properties_or_hash());
+}
+
+NumberDictionary* JSObject::element_dictionary() {
+ DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
+ return NumberDictionary::cast(elements());
+}
+
+void JSReceiver::initialize_properties() {
+ Heap* heap = GetHeap();
+ ReadOnlyRoots roots(heap);
+ DCHECK(!Heap::InNewSpace(roots.empty_fixed_array()));
+ DCHECK(!Heap::InNewSpace(heap->empty_property_dictionary()));
+ if (map()->is_dictionary_map()) {
+ WRITE_FIELD(this, kPropertiesOrHashOffset,
+ heap->empty_property_dictionary());
+ } else {
+ WRITE_FIELD(this, kPropertiesOrHashOffset, roots.empty_fixed_array());
+ }
+}
+
+bool JSReceiver::HasFastProperties() const {
+ DCHECK(
+ raw_properties_or_hash()->IsSmi() ||
+ (raw_properties_or_hash()->IsDictionary() == map()->is_dictionary_map()));
+ return !map()->is_dictionary_map();
+}
+
+NameDictionary* JSReceiver::property_dictionary() const {
+ DCHECK(!IsJSGlobalObject());
+ DCHECK(!HasFastProperties());
+
+ Object* prop = raw_properties_or_hash();
+ if (prop->IsSmi()) {
+ return GetHeap()->empty_property_dictionary();
+ }
+
+ return NameDictionary::cast(prop);
+}
+
+// TODO(gsathya): Pass isolate directly to this function and access
+// the heap from this.
+PropertyArray* JSReceiver::property_array() const {
+ DCHECK(HasFastProperties());
+
+ Object* prop = raw_properties_or_hash();
+ if (prop->IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
+ return GetReadOnlyRoots().empty_property_array();
+ }
+
+ return PropertyArray::cast(prop);
+}
+
+Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(object->GetIsolate(),
+ object, name, object);
+ return HasProperty(&it);
+}
+
+Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+ uint32_t index) {
+ if (object->IsJSModuleNamespace()) return Just(false);
+
+ if (object->IsJSObject()) { // Shortcut.
+ LookupIterator it(object->GetIsolate(), object, index, object,
+ LookupIterator::OWN);
+ return HasProperty(&it);
+ }
+
+ Maybe<PropertyAttributes> attributes =
+ JSReceiver::GetOwnPropertyAttributes(object, index);
+ MAYBE_RETURN(attributes, Nothing<bool>());
+ return Just(attributes.FromJust() != ABSENT);
+}
+
+Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
+ Handle<JSReceiver> object, Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(object->GetIsolate(),
+ object, name, object);
+ return GetPropertyAttributes(&it);
+}
+
+Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
+ Handle<JSReceiver> object, Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, object, LookupIterator::OWN);
+ return GetPropertyAttributes(&it);
+}
+
+Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
+ Handle<JSReceiver> object, uint32_t index) {
+ LookupIterator it(object->GetIsolate(), object, index, object,
+ LookupIterator::OWN);
+ return GetPropertyAttributes(&it);
+}
+
+Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+ LookupIterator it(object->GetIsolate(), object, index, object);
+ return HasProperty(&it);
+}
+
+Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
+ Handle<JSReceiver> object, uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it(isolate, object, index, object);
+ return GetPropertyAttributes(&it);
+}
+
+Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
+ Handle<JSReceiver> object, uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
+ return GetPropertyAttributes(&it);
+}
+
+bool JSGlobalObject::IsDetached() {
+ return JSGlobalProxy::cast(global_proxy())->IsDetachedFrom(this);
+}
+
+bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject* global) const {
+ const PrototypeIterator iter(this->GetIsolate(),
+ const_cast<JSGlobalProxy*>(this));
+ return iter.GetCurrent() != global;
+}
+
+inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) {
+ DCHECK_GE(embedder_field_count, 0);
+ return kSize + embedder_field_count * kPointerSize;
+}
+
+ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
+ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
+
+ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
+ kSyncIteratorOffset)
+ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
+
+ACCESSORS(JSStringIterator, string, String, kStringOffset)
+SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
new file mode 100644
index 0000000000..586fe757db
--- /dev/null
+++ b/deps/v8/src/objects/js-objects.h
@@ -0,0 +1,1408 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_OBJECTS_H_
+#define V8_OBJECTS_JS_OBJECTS_H_
+
+#include "src/objects.h"
+#include "src/objects/property-array.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSGlobalObject;
+class JSGlobalProxy;
+
+// JSReceiver includes types on which properties can be defined, i.e.,
+// JSObject and JSProxy.
+class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
+ public:
+ // Returns true if there is no slow (ie, dictionary) backing store.
+ inline bool HasFastProperties() const;
+
+ // Returns the properties array backing store if it
+ // exists. Otherwise, returns an empty_property_array when there's a
+ // Smi (hash code) or an empty_fixed_array for a fast properties
+ // map.
+ inline PropertyArray* property_array() const;
+
+ // Gets slow properties for non-global objects.
+ inline NameDictionary* property_dictionary() const;
+
+ // Sets the properties backing store and makes sure any existing hash is moved
+ // to the new properties store. To clear out the properties store, pass in the
+ // empty_fixed_array(), the hash will be maintained in this case as well.
+ void SetProperties(HeapObject* properties);
+
+ // There are five possible values for the properties offset.
+ // 1) EmptyFixedArray/EmptyPropertyDictionary - This is the standard
+ // placeholder.
+ //
+ // 2) Smi - This is the hash code of the object.
+ //
+ // 3) PropertyArray - This is similar to a FixedArray but stores
+ // the hash code of the object in its length field. This is a fast
+ // backing store.
+ //
+ // 4) NameDictionary - This is the dictionary-mode backing store.
+ //
+ // 4) GlobalDictionary - This is the backing store for the
+ // GlobalObject.
+ //
+ // This is used only in the deoptimizer and heap. Please use the
+ // above typed getters and setters to access the properties.
+ DECL_ACCESSORS(raw_properties_or_hash, Object)
+
+ inline void initialize_properties();
+
+ // Deletes an existing named property in a normalized object.
+ static void DeleteNormalizedProperty(Handle<JSReceiver> object, int entry);
+
+ DECL_CAST(JSReceiver)
+
+ // ES6 section 7.1.1 ToPrimitive
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
+ Handle<JSReceiver> receiver,
+ ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+
+ // ES6 section 7.1.1.1 OrdinaryToPrimitive
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
+ Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
+
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
+
+ // Get the first non-hidden prototype.
+ static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
+ Handle<JSReceiver> receiver);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasInPrototypeChain(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
+
+ // Reads all enumerable own properties of source and adds them to
+ // target, using either Set or CreateDataProperty depending on the
+ // use_set argument. This only copies values not present in the
+ // maybe_excluded_properties list.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetOrCopyDataProperties(
+ Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
+ const ScopedVector<Handle<Object>>* excluded_properties = nullptr,
+ bool use_set = true);
+
+ // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasProperty(
+ Handle<JSReceiver> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasElement(
+ Handle<JSReceiver> object, uint32_t index);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasOwnProperty(
+ Handle<JSReceiver> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasOwnProperty(
+ Handle<JSReceiver> object, uint32_t index);
+
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
+ Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetElement(
+ Isolate* isolate, Handle<JSReceiver> receiver, uint32_t index);
+
+ // Implementation of ES6 [[Delete]]
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyOrElement(
+ Handle<JSReceiver> object, Handle<Name> name,
+ LanguageMode language_mode = LanguageMode::kSloppy);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteProperty(
+ Handle<JSReceiver> object, Handle<Name> name,
+ LanguageMode language_mode = LanguageMode::kSloppy);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteProperty(
+ LookupIterator* it, LanguageMode language_mode);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteElement(
+ Handle<JSReceiver> object, uint32_t index,
+ LanguageMode language_mode = LanguageMode::kSloppy);
+
+ V8_WARN_UNUSED_RESULT static Object* DefineProperty(
+ Isolate* isolate, Handle<Object> object, Handle<Object> name,
+ Handle<Object> attributes);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> DefineProperties(
+ Isolate* isolate, Handle<Object> object, Handle<Object> properties);
+
+ // "virtual" dispatcher to the correct [[DefineOwnProperty]] implementation.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ // ES6 7.3.4 (when passed kDontThrow)
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Name> key,
+ Handle<Object> value, ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+
+ // ES6 9.1.6.1
+ V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ LookupIterator* it, PropertyDescriptor* desc, ShouldThrow should_throw);
+ // ES6 9.1.6.2
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsCompatiblePropertyDescriptor(
+ Isolate* isolate, bool extensible, PropertyDescriptor* desc,
+ PropertyDescriptor* current, Handle<Name> property_name,
+ ShouldThrow should_throw);
+ // ES6 9.1.6.3
+ // |it| can be NULL in cases where the ES spec passes |undefined| as the
+ // receiver. Exactly one of |it| and |property_name| must be provided.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
+ Isolate* isolate, LookupIterator* it, bool extensible,
+ PropertyDescriptor* desc, PropertyDescriptor* current,
+ ShouldThrow should_throw, Handle<Name> property_name);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool>
+ GetOwnPropertyDescriptor(Isolate* isolate, Handle<JSReceiver> object,
+ Handle<Object> key, PropertyDescriptor* desc);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ LookupIterator* it, PropertyDescriptor* desc);
+
+ typedef PropertyAttributes IntegrityLevel;
+
+ // ES6 7.3.14 (when passed kDontThrow)
+ // 'level' must be SEALED or FROZEN.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetIntegrityLevel(
+ Handle<JSReceiver> object, IntegrityLevel lvl, ShouldThrow should_throw);
+
+ // ES6 7.3.15
+ // 'level' must be SEALED or FROZEN.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
+ Handle<JSReceiver> object, IntegrityLevel lvl);
+
+ // ES6 [[PreventExtensions]] (when passed kDontThrow)
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSReceiver> object, ShouldThrow should_throw);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(
+ Handle<JSReceiver> object);
+
+ // Returns the class name ([[Class]] property in the specification).
+ V8_EXPORT_PRIVATE String* class_name();
+
+ // Returns the constructor (the function that was used to instantiate the
+ // object).
+ static MaybeHandle<JSFunction> GetConstructor(Handle<JSReceiver> receiver);
+
+ // Returns the constructor name (the name (possibly, inferred name) of the
+ // function that was used to instantiate the object).
+ static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
+
+ Handle<Context> GetCreationContext();
+
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
+ GetPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
+ GetOwnPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
+ GetOwnPropertyAttributes(Handle<JSReceiver> object, uint32_t index);
+
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
+ GetElementAttributes(Handle<JSReceiver> object, uint32_t index);
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
+ GetOwnElementAttributes(Handle<JSReceiver> object, uint32_t index);
+
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+ LookupIterator* it);
+
+ // Set the object's prototype (only JSReceiver and null are allowed values).
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
+ Handle<JSReceiver> object, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
+
+ inline static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
+ Handle<Name> name);
+ static Handle<Object> GetDataProperty(LookupIterator* it);
+
+ // Retrieves a permanent object identity hash code. The undefined value might
+ // be returned in case no hash was created yet.
+ Object* GetIdentityHash(Isolate* isolate);
+
+ // Retrieves a permanent object identity hash code. May create and store a
+ // hash code if needed and none exists.
+ static Smi* CreateIdentityHash(Isolate* isolate, JSReceiver* key);
+ Smi* GetOrCreateIdentityHash(Isolate* isolate);
+
+ // Stores the hash code. The hash passed in must be masked with
+ // JSReceiver::kHashMask.
+ void SetIdentityHash(int masked_hash);
+
+ // ES6 [[OwnPropertyKeys]] (modulo return type)
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<FixedArray> OwnPropertyKeys(
+ Handle<JSReceiver> object);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetOwnValues(
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
+
+ V8_WARN_UNUSED_RESULT static Handle<FixedArray> GetOwnElementIndices(
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSObject> object);
+
+ static const int kHashMask = PropertyArray::HashField::kMask;
+
+ // Layout description.
+ static const int kPropertiesOrHashOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
+
+ bool HasProxyInPrototype(Isolate* isolate);
+
+ bool HasComplexElements();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
+};
+
+// The JSObject describes real heap allocated JavaScript objects with
+// properties.
+// Note that the map of JSObject changes during execution to enable inline
+// caching.
+class JSObject : public JSReceiver {
+ public:
+ static bool IsUnmodifiedApiObject(Object** o);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> New(
+ Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site);
+
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSObject> object);
+
+ // 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
+ // Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> ObjectCreate(
+ Isolate* isolate, Handle<Object> prototype);
+
+ // [elements]: The elements (properties with names that are integers).
+ //
+ // Elements can be in two general modes: fast and slow. Each mode
+ // corresponds to a set of object representations of elements that
+ // have something in common.
+ //
+ // In the fast mode elements is a FixedArray and so each element can
+ // be quickly accessed. This fact is used in the generated code. The
+ // elements array can have one of three maps in this mode:
+ // fixed_array_map, sloppy_arguments_elements_map or
+ // fixed_cow_array_map (for copy-on-write arrays). In the latter case
+ // the elements array may be shared by a few objects and so before
+ // writing to any element the array must be copied. Use
+ // EnsureWritableFastElements in this case.
+ //
+ // In the slow mode the elements is either a NumberDictionary, a
+ // FixedArray parameter map for a (sloppy) arguments object.
+ DECL_ACCESSORS(elements, FixedArrayBase)
+ inline void initialize_elements();
+ static inline void SetMapAndElements(Handle<JSObject> object, Handle<Map> map,
+ Handle<FixedArrayBase> elements);
+ inline ElementsKind GetElementsKind() const;
+ ElementsAccessor* GetElementsAccessor();
+ // Returns true if an object has elements of PACKED_SMI_ELEMENTS or
+ // HOLEY_SMI_ELEMENTS ElementsKind.
+ inline bool HasSmiElements();
+ // Returns true if an object has elements of PACKED_ELEMENTS or
+ // HOLEY_ELEMENTS ElementsKind.
+ inline bool HasObjectElements();
+ // Returns true if an object has elements of PACKED_SMI_ELEMENTS,
+ // HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, or HOLEY_ELEMENTS.
+ inline bool HasSmiOrObjectElements();
+ // Returns true if an object has any of the "fast" elements kinds.
+ inline bool HasFastElements();
+ // Returns true if an object has any of the PACKED elements kinds.
+ inline bool HasFastPackedElements();
+ // Returns true if an object has elements of PACKED_DOUBLE_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS ElementsKind.
+ inline bool HasDoubleElements();
+ // Returns true if an object has elements of HOLEY_SMI_ELEMENTS,
+ // HOLEY_DOUBLE_ELEMENTS, or HOLEY_ELEMENTS ElementsKind.
+ inline bool HasHoleyElements();
+ inline bool HasSloppyArgumentsElements();
+ inline bool HasStringWrapperElements();
+ inline bool HasDictionaryElements();
+
+ inline bool HasFixedTypedArrayElements();
+
+ inline bool HasFixedUint8ClampedElements();
+ inline bool HasFixedArrayElements();
+ inline bool HasFixedInt8Elements();
+ inline bool HasFixedUint8Elements();
+ inline bool HasFixedInt16Elements();
+ inline bool HasFixedUint16Elements();
+ inline bool HasFixedInt32Elements();
+ inline bool HasFixedUint32Elements();
+ inline bool HasFixedFloat32Elements();
+ inline bool HasFixedFloat64Elements();
+ inline bool HasFixedBigInt64Elements();
+ inline bool HasFixedBigUint64Elements();
+
+ inline bool HasFastArgumentsElements();
+ inline bool HasSlowArgumentsElements();
+ inline bool HasFastStringWrapperElements();
+ inline bool HasSlowStringWrapperElements();
+ bool HasEnumerableElements();
+
+ inline NumberDictionary* element_dictionary(); // Gets slow elements.
+
+ // Requires: HasFastElements().
+ static void EnsureWritableFastElements(Handle<JSObject> object);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithInterceptor(
+ LookupIterator* it, ShouldThrow should_throw, Handle<Object> value);
+
+ // The API currently still wants DefineOwnPropertyIgnoreAttributes to convert
+ // AccessorInfo objects to data fields. We allow FORCE_FIELD as an exception
+ // to the default behavior that calls the setter.
+ enum AccessorInfoHandling { FORCE_FIELD, DONT_FORCE_FIELD };
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ AccessorInfoHandling handling = DONT_FORCE_FIELD);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ ShouldThrow should_throw,
+ AccessorInfoHandling handling = DONT_FORCE_FIELD);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ SetOwnPropertyIgnoreAttributes(Handle<JSObject> object, Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ SetOwnElementIgnoreAttributes(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+ // Equivalent to one of the above depending on whether |name| can be converted
+ // to an array index.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ DefinePropertyOrElementIgnoreAttributes(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes = NONE);
+
+ // Adds or reconfigures a property to attributes NONE. It will fail when it
+ // cannot.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
+ LookupIterator* it, Handle<Object> value,
+ ShouldThrow should_throw = kDontThrow);
+
+ static void AddProperty(Isolate* isolate, Handle<JSObject> object,
+ Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static void AddDataElement(Handle<JSObject> receiver, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+ // Extend the receiver with a single fast property appeared first in the
+ // passed map. This also extends the property backing store if necessary.
+ static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
+
+ // Migrates the given object to a map whose field representations are the
+ // lowest upper bound of all known representations for that field.
+ static void MigrateInstance(Handle<JSObject> instance);
+
+ // Migrates the given object only if the target map is already available,
+ // or returns false if such a map is not yet available.
+ static bool TryMigrateInstance(Handle<JSObject> instance);
+
+ // Sets the property value in a normalized object given (key, value, details).
+ // Handles the special representation of JS global objects.
+ static void SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details);
+ static void SetDictionaryElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+ static void SetDictionaryArgumentsElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static void OptimizeAsPrototype(Handle<JSObject> object,
+ bool enable_setup_mode = true);
+ static void ReoptimizeIfPrototype(Handle<JSObject> object);
+ static void MakePrototypesFast(Handle<Object> receiver,
+ WhereToStart where_to_start, Isolate* isolate);
+ static void LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate);
+ static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
+ Handle<Map> new_map,
+ Isolate* isolate);
+ static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
+ static Map* InvalidatePrototypeChains(Map* map);
+ static void InvalidatePrototypeValidityCell(JSGlobalObject* global);
+
+ // Updates prototype chain tracking information when an object changes its
+ // map from |old_map| to |new_map|.
+ static void NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
+ Isolate* isolate);
+
+ // Utility used by many Array builtins and runtime functions
+ static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
+
+ // To be passed to PrototypeUsers::Compact.
+ static void PrototypeRegistryCompactionCallback(HeapObject* value,
+ int old_index, int new_index);
+
+ // Retrieve interceptors.
+ inline InterceptorInfo* GetNamedInterceptor();
+ inline InterceptorInfo* GetIndexedInterceptor();
+
+ // Used from JSReceiver.
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
+ GetPropertyAttributesWithInterceptor(LookupIterator* it);
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
+ GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
+
+ // Defines an AccessorPair property on the given object.
+ // TODO(mstarzinger): Rename to SetAccessor().
+ static MaybeHandle<Object> DefineAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
+ static MaybeHandle<Object> DefineAccessor(LookupIterator* it,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
+
+ // Defines an AccessorInfo property on the given object.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetAccessor(
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> info,
+ PropertyAttributes attributes);
+
+ // The result must be checked first for exceptions. If there's no exception,
+ // the output parameter |done| indicates whether the interceptor has a result
+ // or not.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
+ LookupIterator* it, bool* done);
+
+ static void ValidateElements(JSObject* object);
+
+ // Makes sure that this object can contain HeapObject as elements.
+ static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
+
+ // Makes sure that this object can contain the specified elements.
+ static inline void EnsureCanContainElements(Handle<JSObject> object,
+ Object** elements, uint32_t count,
+ EnsureElementsMode mode);
+ static inline void EnsureCanContainElements(Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
+ uint32_t length,
+ EnsureElementsMode mode);
+ static void EnsureCanContainElements(Handle<JSObject> object,
+ Arguments* arguments, uint32_t first_arg,
+ uint32_t arg_count,
+ EnsureElementsMode mode);
+
+ // Would we convert a fast elements array to dictionary mode given
+ // an access at key?
+ bool WouldConvertToSlowElements(uint32_t index);
+
+ static const uint32_t kMinAddedElementsCapacity = 16;
+
+ // Computes the new capacity when expanding the elements of a JSObject.
+ static uint32_t NewElementsCapacity(uint32_t old_capacity) {
+ // (old_capacity + 50%) + kMinAddedElementsCapacity
+ return old_capacity + (old_capacity >> 1) + kMinAddedElementsCapacity;
+ }
+
+ // These methods do not perform access checks!
+ template <AllocationSiteUpdateMode update_or_check =
+ AllocationSiteUpdateMode::kUpdate>
+ static bool UpdateAllocationSite(Handle<JSObject> object,
+ ElementsKind to_kind);
+
+ // Lookup interceptors are used for handling properties controlled by host
+ // objects.
+ inline bool HasNamedInterceptor();
+ inline bool HasIndexedInterceptor();
+
+ // Support functions for v8 api (needed for correct interceptor behavior).
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedProperty(
+ Handle<JSObject> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealElementProperty(
+ Handle<JSObject> object, uint32_t index);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedCallbackProperty(
+ Handle<JSObject> object, Handle<Name> name);
+
+ // Get the header size for a JSObject. Used to compute the index of
+ // embedder fields as well as the number of embedder fields.
+ // The |function_has_prototype_slot| parameter is needed only for
+ // JSFunction objects.
+ static int GetHeaderSize(InstanceType instance_type,
+ bool function_has_prototype_slot = false);
+ static inline int GetHeaderSize(const Map* map);
+ inline int GetHeaderSize() const;
+
+ static inline int GetEmbedderFieldCount(const Map* map);
+ inline int GetEmbedderFieldCount() const;
+ inline int GetEmbedderFieldOffset(int index);
+ inline Object* GetEmbedderField(int index);
+ inline void SetEmbedderField(int index, Object* value);
+ inline void SetEmbedderField(int index, Smi* value);
+
+ // Returns true when the object is potentially a wrapper that gets special
+ // garbage collection treatment.
+ // TODO(mlippautz): Make check exact and replace the pattern match in
+ // Heap::TracePossibleWrapper.
+ bool IsApiWrapper();
+
+ // Same as IsApiWrapper() but also allow dropping the wrapper on minor GCs.
+ bool IsDroppableApiWrapper();
+
+ // Returns a new map with all transitions dropped from the object's current
+ // map and the ElementsKind set.
+ static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+ ElementsKind to_kind);
+ static void TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind);
+
+ // Always use this to migrate an object to a new map.
+ // |expected_additional_properties| is only used for fast-to-slow transitions
+ // and ignored otherwise.
+ static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
+ int expected_additional_properties = 0);
+
+ // Forces a prototype without any of the checks that the regular SetPrototype
+ // would do.
+ static void ForceSetPrototype(Handle<JSObject> object, Handle<Object> proto);
+
+ // Convert the object to use the canonical dictionary
+ // representation. If the object is expected to have additional properties
+ // added this number can be indicated to have the backing store allocated to
+ // an initial capacity for holding these properties.
+ static void NormalizeProperties(Handle<JSObject> object,
+ PropertyNormalizationMode mode,
+ int expected_additional_properties,
+ const char* reason);
+
+ // Convert and update the elements backing store to be a
+ // NumberDictionary dictionary. Returns the backing after conversion.
+ static Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
+
+ void RequireSlowElements(NumberDictionary* dictionary);
+
+ // Transform slow named properties to fast variants.
+ static void MigrateSlowToFast(Handle<JSObject> object,
+ int unused_property_fields, const char* reason);
+
+ inline bool IsUnboxedDoubleField(FieldIndex index);
+
+ // Access fast-case object properties at index.
+ static Handle<Object> FastPropertyAt(Handle<JSObject> object,
+ Representation representation,
+ FieldIndex index);
+ inline Object* RawFastPropertyAt(FieldIndex index);
+ inline double RawFastDoublePropertyAt(FieldIndex index);
+ inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
+
+ inline void FastPropertyAtPut(FieldIndex index, Object* value);
+ inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
+ inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
+ inline void WriteToField(int descriptor, PropertyDetails details,
+ Object* value);
+
+ // Access to in object properties.
+ inline int GetInObjectPropertyOffset(int index);
+ inline Object* InObjectPropertyAt(int index);
+ inline Object* InObjectPropertyAtPut(
+ int index, Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // Set the object's prototype (only JSReceiver and null are allowed values).
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
+ Handle<JSObject> object, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
+
+ // Makes the object prototype immutable
+ // Never called from JavaScript
+ static void SetImmutableProto(Handle<JSObject> object);
+
+ // Initializes the body starting at |start_offset|. It is responsibility of
+ // the caller to initialize object header. Fill the pre-allocated fields with
+ // pre_allocated_value and the rest with filler_value.
+ // Note: this call does not update write barrier, the caller is responsible
+ // to ensure that |filler_value| can be collected without WB here.
+ inline void InitializeBody(Map* map, int start_offset,
+ Object* pre_allocated_value, Object* filler_value);
+
+ // Check whether this object references another object
+ bool ReferencesObject(Object* obj);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
+ Handle<JSObject> object, IntegrityLevel lvl);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSObject> object, ShouldThrow should_throw);
+
+ static bool IsExtensible(Handle<JSObject> object);
+
+ DECL_CAST(JSObject)
+
+ // Dispatched behavior.
+ void JSObjectShortPrint(StringStream* accumulator);
+ DECL_PRINTER(JSObject)
+ DECL_VERIFIER(JSObject)
+#ifdef OBJECT_PRINT
+ bool PrintProperties(std::ostream& os); // NOLINT
+ void PrintElements(std::ostream& os); // NOLINT
+#endif
+#if defined(DEBUG) || defined(OBJECT_PRINT)
+ void PrintTransitions(std::ostream& os); // NOLINT
+#endif
+
+ static void PrintElementsTransition(FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind,
+ Handle<FixedArrayBase> to_elements);
+
+ void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
+
+#ifdef DEBUG
+ // Structure for collecting spill information about JSObjects.
+ class SpillInformation {
+ public:
+ void Clear();
+ void Print();
+ int number_of_objects_;
+ int number_of_objects_with_fast_properties_;
+ int number_of_objects_with_fast_elements_;
+ int number_of_fast_used_fields_;
+ int number_of_fast_unused_fields_;
+ int number_of_slow_used_properties_;
+ int number_of_slow_unused_properties_;
+ int number_of_fast_used_elements_;
+ int number_of_fast_unused_elements_;
+ int number_of_slow_used_elements_;
+ int number_of_slow_unused_elements_;
+ };
+
+ void IncrementSpillStatistics(Isolate* isolate, SpillInformation* info);
+#endif
+
+#ifdef VERIFY_HEAP
+ // If a GC was caused while constructing this object, the elements pointer
+ // may point to a one pointer filler map. The object won't be rooted, but
+ // our heap verification code could stumble across it.
+ bool ElementsAreSafeToExamine() const;
+#endif
+
+ Object* SlowReverseLookup(Object* value);
+
+ // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
+ // Also maximal value of JSArray's length property.
+ static const uint32_t kMaxElementCount = 0xffffffffu;
+
+ // Constants for heuristics controlling conversion of fast elements
+ // to slow elements.
+
+ // Maximal gap that can be introduced by adding an element beyond
+ // the current elements length.
+ static const uint32_t kMaxGap = 1024;
+
+ // Maximal length of fast elements array that won't be checked for
+ // being dense enough on expansion.
+ static const int kMaxUncheckedFastElementsLength = 5000;
+
+ // Same as above but for old arrays. This limit is more strict. We
+ // don't want to be wasteful with long lived objects.
+ static const int kMaxUncheckedOldFastElementsLength = 500;
+
+ // This constant applies only to the initial map of "global.Object" and
+ // not to arbitrary other JSObject maps.
+ static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
+
+ static const int kMaxInstanceSize = 255 * kPointerSize;
+
+ // When extending the backing storage for property values, we increase
+ // its size by more than the 1 entry necessary, so sequentially adding fields
+ // to the same object requires fewer allocations and copies.
+ static const int kFieldsAdded = 3;
+ STATIC_ASSERT(kMaxNumberOfDescriptors + kFieldsAdded <=
+ PropertyArray::kMaxLength);
+
+ // Layout description.
+ static const int kElementsOffset = JSReceiver::kHeaderSize;
+ static const int kHeaderSize = kElementsOffset + kPointerSize;
+
+ STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
+ static const int kMaxInObjectProperties =
+ (kMaxInstanceSize - kHeaderSize) >> kPointerSizeLog2;
+ STATIC_ASSERT(kMaxInObjectProperties <= kMaxNumberOfDescriptors);
+ // TODO(cbruni): Revisit calculation of the max supported embedder fields.
+ static const int kMaxEmbedderFields =
+ ((1 << kFirstInobjectPropertyOffsetBitCount) - 1 - kHeaderSize) >>
+ kPointerSizeLog2;
+ STATIC_ASSERT(kMaxEmbedderFields <= kMaxInObjectProperties);
+
+ class BodyDescriptor;
+
+ class FastBodyDescriptor;
+
+ // Gets the number of currently used elements.
+ int GetFastElementsUsage();
+
+ static bool AllCanRead(LookupIterator* it);
+ static bool AllCanWrite(LookupIterator* it);
+
+ private:
+ friend class JSReceiver;
+ friend class Object;
+
+ // Used from Object::GetProperty().
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ GetPropertyWithFailedAccessCheck(LookupIterator* it);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
+ LookupIterator* it, ShouldThrow should_throw);
+
+ bool ReferencesObjectFromElements(FixedArray* elements, ElementsKind kind,
+ Object* object);
+
+ // Helper for fast versions of preventExtensions, seal, and freeze.
+ // attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
+ template <PropertyAttributes attrs>
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensionsWithTransition(
+ Handle<JSObject> object, ShouldThrow should_throw);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
+};
+
+// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
+// map. This initial map adds in-object properties for "get", "set",
+// "enumerable" and "configurable" properties, as assigned by the
+// FromPropertyDescriptor function for regular accessor properties.
+class JSAccessorPropertyDescriptor : public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kGetOffset = JSObject::kHeaderSize;
+ static const int kSetOffset = kGetOffset + kPointerSize;
+ static const int kEnumerableOffset = kSetOffset + kPointerSize;
+ static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
+ static const int kSize = kConfigurableOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kGetIndex = 0;
+ static const int kSetIndex = 1;
+ static const int kEnumerableIndex = 2;
+ static const int kConfigurableIndex = 3;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSAccessorPropertyDescriptor);
+};
+
+// JSDataPropertyDescriptor is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "value", "writable",
+// "enumerable" and "configurable" properties, as assigned by the
+// FromPropertyDescriptor function for regular data properties.
+class JSDataPropertyDescriptor : public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kWritableOffset = kValueOffset + kPointerSize;
+ static const int kEnumerableOffset = kWritableOffset + kPointerSize;
+ static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
+ static const int kSize = kConfigurableOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kValueIndex = 0;
+ static const int kWritableIndex = 1;
+ static const int kEnumerableIndex = 2;
+ static const int kConfigurableIndex = 3;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataPropertyDescriptor);
+};
+
+// JSIteratorResult is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "done" and "value",
+// as specified by ES6 section 25.1.1.3 The IteratorResult Interface
+class JSIteratorResult : public JSObject {
+ public:
+ DECL_ACCESSORS(value, Object)
+
+ DECL_ACCESSORS(done, Object)
+
+ // Offsets of object fields.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kDoneOffset = kValueOffset + kPointerSize;
+ static const int kSize = kDoneOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kValueIndex = 0;
+ static const int kDoneIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
+};
+
+// JSBoundFunction describes a bound function exotic object.
+class JSBoundFunction : public JSObject {
+ public:
+ // [bound_target_function]: The wrapped function object.
+ inline Object* raw_bound_target_function() const;
+ DECL_ACCESSORS(bound_target_function, JSReceiver)
+
+ // [bound_this]: The value that is always passed as the this value when
+ // calling the wrapped function.
+ DECL_ACCESSORS(bound_this, Object)
+
+ // [bound_arguments]: A list of values whose elements are used as the first
+ // arguments to any call to the wrapped function.
+ DECL_ACCESSORS(bound_arguments, FixedArray)
+
+ static MaybeHandle<String> GetName(Isolate* isolate,
+ Handle<JSBoundFunction> function);
+ static Maybe<int> GetLength(Isolate* isolate,
+ Handle<JSBoundFunction> function);
+ static MaybeHandle<Context> GetFunctionRealm(
+ Handle<JSBoundFunction> function);
+
+ DECL_CAST(JSBoundFunction)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSBoundFunction)
+ DECL_VERIFIER(JSBoundFunction)
+
+ // The bound function's string representation implemented according
+ // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
+ static Handle<String> ToString(Handle<JSBoundFunction> function);
+
+ // Layout description.
+ static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
+ static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
+ static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
+ static const int kSize = kBoundArgumentsOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSBoundFunction);
+};
+
+// JSFunction describes JavaScript functions.
+class JSFunction : public JSObject {
+ public:
+ // [prototype_or_initial_map]:
+ DECL_ACCESSORS(prototype_or_initial_map, Object)
+
+ // [shared]: The information about the function that
+ // can be shared by instances.
+ DECL_ACCESSORS(shared, SharedFunctionInfo)
+
+ static const int kLengthDescriptorIndex = 0;
+ static const int kNameDescriptorIndex = 1;
+ // Home object descriptor index when function has a [[HomeObject]] slot.
+ static const int kMaybeHomeObjectDescriptorIndex = 2;
+
+ // [context]: The context for this function.
+ inline Context* context();
+ inline bool has_context() const;
+ inline void set_context(Object* context);
+ inline JSGlobalProxy* global_proxy();
+ inline Context* native_context();
+
+ static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
+ static Maybe<int> GetLength(Isolate* isolate, Handle<JSFunction> function);
+ static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
+
+ // [code]: The generated code object for this function. Executed
+ // when the function is invoked, e.g. foo() or new foo(). See
+ // [[Call]] and [[Construct]] description in ECMA-262, section
+ // 8.6.2, page 27.
+ inline Code* code();
+ inline void set_code(Code* code);
+ inline void set_code_no_write_barrier(Code* code);
+
+ // Get the abstract code associated with the function, which will either be
+ // a Code object or a BytecodeArray.
+ inline AbstractCode* abstract_code();
+
+ // Tells whether or not this function is interpreted.
+ //
+ // Note: function->IsInterpreted() does not necessarily return the same value
+ // as function->shared()->IsInterpreted() because the closure might have been
+ // optimized.
+ inline bool IsInterpreted();
+
+ // Tells whether or not this function checks its optimization marker in its
+ // feedback vector.
+ inline bool ChecksOptimizationMarker();
+
+ // Tells whether or not this function holds optimized code.
+ //
+ // Note: Returning false does not necessarily mean that this function hasn't
+ // been optimized, as it may have optimized code on its feedback vector.
+ inline bool IsOptimized();
+
+ // Tells whether or not this function has optimized code available to it,
+ // either because it is optimized or because it has optimized code in its
+ // feedback vector.
+ inline bool HasOptimizedCode();
+
+ // Tells whether or not this function has a (non-zero) optimization marker.
+ inline bool HasOptimizationMarker();
+
+ // Mark this function for lazy recompilation. The function will be recompiled
+ // the next time it is executed.
+ void MarkForOptimization(ConcurrencyMode mode);
+
+ // Tells whether or not the function is already marked for lazy recompilation.
+ inline bool IsMarkedForOptimization();
+ inline bool IsMarkedForConcurrentOptimization();
+
+ // Tells whether or not the function is on the concurrent recompilation queue.
+ inline bool IsInOptimizationQueue();
+
+ // Clears the optimized code slot in the function's feedback vector.
+ inline void ClearOptimizedCodeSlot(const char* reason);
+
+ // Sets the optimization marker in the function's feedback vector.
+ inline void SetOptimizationMarker(OptimizationMarker marker);
+
+ // Clears the optimization marker in the function's feedback vector.
+ inline void ClearOptimizationMarker();
+
+ // If slack tracking is active, it computes instance size of the initial map
+ // with minimum permissible object slack. If it is not active, it simply
+ // returns the initial map's instance size.
+ int ComputeInstanceSizeWithMinSlack(Isolate* isolate);
+
+ // Completes inobject slack tracking on initial map if it is active.
+ inline void CompleteInobjectSlackTrackingIfActive();
+
+ // [feedback_cell]: The FeedbackCell used to hold the FeedbackVector
+ // eventually.
+ DECL_ACCESSORS(feedback_cell, FeedbackCell)
+
+ // feedback_vector() can be used once the function is compiled.
+ inline FeedbackVector* feedback_vector() const;
+ inline bool has_feedback_vector() const;
+ static void EnsureFeedbackVector(Handle<JSFunction> function);
+
+ // Unconditionally clear the type feedback vector.
+ void ClearTypeFeedbackInfo();
+
+ inline bool has_prototype_slot() const;
+
+ // The initial map for an object created by this constructor.
+ inline Map* initial_map();
+ static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
+ Handle<Object> prototype);
+ inline bool has_initial_map();
+ static void EnsureHasInitialMap(Handle<JSFunction> function);
+
+ // Creates a map that matches the constructor's initial map, but with
+ // [[prototype]] being new.target.prototype. Because new.target can be a
+ // JSProxy, this can call back into JavaScript.
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Map> GetDerivedMap(
+ Isolate* isolate, Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target);
+
+ // Get and set the prototype property on a JSFunction. If the
+ // function has an initial map the prototype is set on the initial
+ // map. Otherwise, the prototype is put in the initial map field
+ // until an initial map is needed.
+ inline bool has_prototype();
+ inline bool has_instance_prototype();
+ inline Object* prototype();
+ inline Object* instance_prototype();
+ inline bool has_prototype_property();
+ inline bool PrototypeRequiresRuntimeLookup();
+ static void SetPrototype(Handle<JSFunction> function, Handle<Object> value);
+
+ // Returns if this function has been compiled to native code yet.
+ inline bool is_compiled();
+
+ static int GetHeaderSize(bool function_has_prototype_slot) {
+ return function_has_prototype_slot ? JSFunction::kSizeWithPrototype
+ : JSFunction::kSizeWithoutPrototype;
+ }
+
+ // Prints the name of the function using PrintF.
+ void PrintName(FILE* out = stdout);
+
+ DECL_CAST(JSFunction)
+
+ // Calculate the instance size and in-object properties count.
+ static bool CalculateInstanceSizeForDerivedClass(
+ Handle<JSFunction> function, InstanceType instance_type,
+ int requested_embedder_fields, int* instance_size,
+ int* in_object_properties);
+ static void CalculateInstanceSizeHelper(InstanceType instance_type,
+ bool has_prototype_slot,
+ int requested_embedder_fields,
+ int requested_in_object_properties,
+ int* instance_size,
+ int* in_object_properties);
+
+ class BodyDescriptor;
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSFunction)
+ DECL_VERIFIER(JSFunction)
+
+ // The function's name if it is configured, otherwise shared function info
+ // debug name.
+ static Handle<String> GetName(Handle<JSFunction> function);
+
+ // ES6 section 9.2.11 SetFunctionName
+ // Because of the way this abstract operation is used in the spec,
+ // it should never fail, but in practice it will fail if the generated
+ // function name's length exceeds String::kMaxLength.
+ static V8_WARN_UNUSED_RESULT bool SetName(Handle<JSFunction> function,
+ Handle<Name> name,
+ Handle<String> prefix);
+
+ // The function's displayName if it is set, otherwise name if it is
+ // configured, otherwise shared function info
+ // debug name.
+ static Handle<String> GetDebugName(Handle<JSFunction> function);
+
+ // The function's string representation implemented according to
+ // ES6 section 19.2.3.5 Function.prototype.toString ( ).
+ static Handle<String> ToString(Handle<JSFunction> function);
+
+// Layout description.
+#define JS_FUNCTION_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kSharedFunctionInfoOffset, kPointerSize) \
+ V(kContextOffset, kPointerSize) \
+ V(kFeedbackCellOffset, kPointerSize) \
+ V(kEndOfStrongFieldsOffset, 0) \
+ V(kCodeOffset, kPointerSize) \
+ /* Size of JSFunction object without prototype field. */ \
+ V(kSizeWithoutPrototype, 0) \
+ V(kPrototypeOrInitialMapOffset, kPointerSize) \
+ /* Size of JSFunction object with prototype field. */ \
+ V(kSizeWithPrototype, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_FUNCTION_FIELDS)
+#undef JS_FUNCTION_FIELDS
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
+};
+
+// JSGlobalProxy's prototype must be a JSGlobalObject or null,
+// and the prototype is hidden. JSGlobalProxy always delegates
+// property accesses to its prototype if the prototype is not null.
+//
+// A JSGlobalProxy can be reinitialized which will preserve its identity.
+//
+// Accessing a JSGlobalProxy requires security check.
+
+class JSGlobalProxy : public JSObject {
+ public:
+ // [native_context]: the owner native context of this global proxy object.
+ // It is null value if this object is not used by any context.
+ DECL_ACCESSORS(native_context, Object)
+
+ DECL_CAST(JSGlobalProxy)
+
+ inline bool IsDetachedFrom(JSGlobalObject* global) const;
+
+ static int SizeWithEmbedderFields(int embedder_field_count);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSGlobalProxy)
+ DECL_VERIFIER(JSGlobalProxy)
+
+ // Layout description.
+ static const int kNativeContextOffset = JSObject::kHeaderSize;
+ static const int kSize = kNativeContextOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
+};
+
+// JavaScript global object.
+class JSGlobalObject : public JSObject {
+ public:
+ // [native context]: the natives corresponding to this global object.
+ DECL_ACCESSORS(native_context, Context)
+
+ // [global proxy]: the global proxy object of the context
+ DECL_ACCESSORS(global_proxy, JSObject)
+
+ // Gets global object properties.
+ inline GlobalDictionary* global_dictionary();
+ inline void set_global_dictionary(GlobalDictionary* dictionary);
+
+ static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
+ Handle<Name> name);
+ // Ensure that the global object has a cell for the given property name.
+ static Handle<PropertyCell> EnsureEmptyPropertyCell(
+ Handle<JSGlobalObject> global, Handle<Name> name,
+ PropertyCellType cell_type, int* entry_out = nullptr);
+
+ DECL_CAST(JSGlobalObject)
+
+ inline bool IsDetached();
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSGlobalObject)
+ DECL_VERIFIER(JSGlobalObject)
+
+ // Layout description.
+ static const int kNativeContextOffset = JSObject::kHeaderSize;
+ static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
+ static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
+ static const int kSize = kHeaderSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
+};
+
+// Representation for JS Wrapper objects, String, Number, Boolean, etc.
+class JSValue : public JSObject {
+ public:
+ // [value]: the object being wrapped.
+ DECL_ACCESSORS(value, Object)
+
+ DECL_CAST(JSValue)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSValue)
+ DECL_VERIFIER(JSValue)
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
+};
+
+class DateCache;
+
+// Representation for JS date objects.
+class JSDate : public JSObject {
+ public:
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSDate> New(
+ Handle<JSFunction> constructor, Handle<JSReceiver> new_target, double tv);
+
+ // If one component is NaN, all of them are, indicating a NaN time value.
+ // [value]: the time value.
+ DECL_ACCESSORS(value, Object)
+ // [year]: caches year. Either undefined, smi, or NaN.
+ DECL_ACCESSORS(year, Object)
+ // [month]: caches month. Either undefined, smi, or NaN.
+ DECL_ACCESSORS(month, Object)
+ // [day]: caches day. Either undefined, smi, or NaN.
+ DECL_ACCESSORS(day, Object)
+ // [weekday]: caches day of week. Either undefined, smi, or NaN.
+ DECL_ACCESSORS(weekday, Object)
+ // [hour]: caches hours. Either undefined, smi, or NaN.
+ DECL_ACCESSORS(hour, Object)
+ // [min]: caches minutes. Either undefined, smi, or NaN.
+ DECL_ACCESSORS(min, Object)
+ // [sec]: caches seconds. Either undefined, smi, or NaN.
+ DECL_ACCESSORS(sec, Object)
+ // [cache stamp]: sample of the date cache stamp at the
+ // moment when chached fields were cached.
+ DECL_ACCESSORS(cache_stamp, Object)
+
+ DECL_CAST(JSDate)
+
+ // Returns the time value (UTC) identifying the current time.
+ static double CurrentTimeValue(Isolate* isolate);
+
+ // Returns the date field with the specified index.
+ // See FieldIndex for the list of date fields.
+ static Object* GetField(Object* date, Smi* index);
+
+ static Handle<Object> SetValue(Handle<JSDate> date, double v);
+
+ void SetValue(Object* value, bool is_value_nan);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSDate)
+ DECL_VERIFIER(JSDate)
+
+ // The order is important. It must be kept in sync with date macros
+ // in macros.py.
+ enum FieldIndex {
+ kDateValue,
+ kYear,
+ kMonth,
+ kDay,
+ kWeekday,
+ kHour,
+ kMinute,
+ kSecond,
+ kFirstUncachedField,
+ kMillisecond = kFirstUncachedField,
+ kDays,
+ kTimeInDay,
+ kFirstUTCField,
+ kYearUTC = kFirstUTCField,
+ kMonthUTC,
+ kDayUTC,
+ kWeekdayUTC,
+ kHourUTC,
+ kMinuteUTC,
+ kSecondUTC,
+ kMillisecondUTC,
+ kDaysUTC,
+ kTimeInDayUTC,
+ kTimezoneOffset
+ };
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kYearOffset = kValueOffset + kPointerSize;
+ static const int kMonthOffset = kYearOffset + kPointerSize;
+ static const int kDayOffset = kMonthOffset + kPointerSize;
+ static const int kWeekdayOffset = kDayOffset + kPointerSize;
+ static const int kHourOffset = kWeekdayOffset + kPointerSize;
+ static const int kMinOffset = kHourOffset + kPointerSize;
+ static const int kSecOffset = kMinOffset + kPointerSize;
+ static const int kCacheStampOffset = kSecOffset + kPointerSize;
+ static const int kSize = kCacheStampOffset + kPointerSize;
+
+ private:
+ inline Object* DoGetField(FieldIndex index);
+
+ Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache);
+
+ // Computes and caches the cacheable fields of the date.
+ inline void SetCachedFields(int64_t local_time_ms, DateCache* date_cache);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate);
+};
+
+// Representation of message objects used for error reporting through
+// the API. The messages are formatted in JavaScript so this object is
+// a real JavaScript object. The information used for formatting the
+// error messages are not directly accessible from JavaScript to
+// prevent leaking information to user code called during error
+// formatting.
+class JSMessageObject : public JSObject {
+ public:
+ // [type]: the type of error message.
+ inline int type() const;
+ inline void set_type(int value);
+
+ // [arguments]: the arguments for formatting the error message.
+ DECL_ACCESSORS(argument, Object)
+
+ // [script]: the script from which the error message originated.
+ DECL_ACCESSORS(script, Script)
+
+ // [stack_frames]: an array of stack frames for this error object.
+ DECL_ACCESSORS(stack_frames, Object)
+
+ // [start_position]: the start position in the script for the error message.
+ inline int start_position() const;
+ inline void set_start_position(int value);
+
+ // [end_position]: the end position in the script for the error message.
+ inline int end_position() const;
+ inline void set_end_position(int value);
+
+ // Returns the line number for the error message (1-based), or
+ // Message::kNoLineNumberInfo if the line cannot be determined.
+ int GetLineNumber() const;
+
+ // Returns the offset of the given position within the containing line.
+ int GetColumnNumber() const;
+
+ // Returns the source code line containing the given source
+ // position, or the empty string if the position is invalid.
+ Handle<String> GetSourceLine() const;
+
+ inline int error_level() const;
+ inline void set_error_level(int level);
+
+ DECL_CAST(JSMessageObject)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSMessageObject)
+ DECL_VERIFIER(JSMessageObject)
+
+ // Layout description.
+ static const int kTypeOffset = JSObject::kHeaderSize;
+ static const int kArgumentsOffset = kTypeOffset + kPointerSize;
+ static const int kScriptOffset = kArgumentsOffset + kPointerSize;
+ static const int kStackFramesOffset = kScriptOffset + kPointerSize;
+ static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
+ static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
+ static const int kErrorLevelOffset = kEndPositionOffset + kPointerSize;
+ static const int kSize = kErrorLevelOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<HeapObject::kMapOffset,
+ kStackFramesOffset + kPointerSize, kSize>
+ BodyDescriptor;
+};
+
+// The [Async-from-Sync Iterator] object
+// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
+// An object which wraps an ordinary Iterator and converts it to behave
+// according to the Async Iterator protocol.
+// (See https://tc39.github.io/proposal-async-iteration/#sec-iteration)
+class JSAsyncFromSyncIterator : public JSObject {
+ public:
+ DECL_CAST(JSAsyncFromSyncIterator)
+ DECL_PRINTER(JSAsyncFromSyncIterator)
+ DECL_VERIFIER(JSAsyncFromSyncIterator)
+
+ // Async-from-Sync Iterator instances are ordinary objects that inherit
+ // properties from the %AsyncFromSyncIteratorPrototype% intrinsic object.
+ // Async-from-Sync Iterator instances are initially created with the internal
+ // slots listed in Table 4.
+ // (proposal-async-iteration/#table-async-from-sync-iterator-internal-slots)
+ DECL_ACCESSORS(sync_iterator, JSReceiver)
+
+ // The "next" method is loaded during GetIterator, and is not reloaded for
+ // subsequent "next" invocations.
+ DECL_ACCESSORS(next, Object)
+
+ // Offsets of object fields.
+ static const int kSyncIteratorOffset = JSObject::kHeaderSize;
+ static const int kNextOffset = kSyncIteratorOffset + kPointerSize;
+ static const int kSize = kNextOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncFromSyncIterator);
+};
+
+class JSStringIterator : public JSObject {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSStringIterator)
+ DECL_VERIFIER(JSStringIterator)
+
+ DECL_CAST(JSStringIterator)
+
+ // [string]: the [[IteratedString]] inobject property.
+ DECL_ACCESSORS(string, String)
+
+ // [index]: The [[StringIteratorNextIndex]] inobject property.
+ inline int index() const;
+ inline void set_index(int value);
+
+ static const int kStringOffset = JSObject::kHeaderSize;
+ static const int kNextIndexOffset = kStringOffset + kPointerSize;
+ static const int kSize = kNextIndexOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_OBJECTS_H_
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index 07cc62a41e..f76692c501 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -85,7 +85,7 @@ void InitializeICUPluralRules(
} // namespace
// static
-MaybeHandle<JSPluralRules> JSPluralRules::InitializePluralRules(
+MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
Isolate* isolate, Handle<JSPluralRules> plural_rules,
Handle<Object> locales, Handle<Object> options_obj) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
@@ -190,8 +190,7 @@ MaybeHandle<JSPluralRules> JSPluralRules::InitializePluralRules(
}
MaybeHandle<String> JSPluralRules::ResolvePlural(
- Isolate* isolate, Handle<JSPluralRules> plural_rules,
- Handle<Object> number) {
+ Isolate* isolate, Handle<JSPluralRules> plural_rules, double number) {
icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules()->raw();
CHECK_NOT_NULL(icu_plural_rules);
@@ -207,7 +206,7 @@ MaybeHandle<String> JSPluralRules::ResolvePlural(
// this step, then switch to that API. Bug thread:
// http://bugs.icu-project.org/trac/ticket/12763
icu::UnicodeString rounded_string;
- icu_decimal_format->format(number->Number(), rounded_string);
+ icu_decimal_format->format(number, rounded_string);
icu::Formattable formattable;
UErrorCode status = U_ZERO_ERROR;
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 9d5da795ab..f262457acb 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -18,12 +18,16 @@
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
+namespace U_ICU_NAMESPACE {
+class PluralRules;
+} // namespace U_ICU_NAMESPACE
+
namespace v8 {
namespace internal {
class JSPluralRules : public JSObject {
public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> InitializePluralRules(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> Initialize(
Isolate* isolate, Handle<JSPluralRules> plural_rules,
Handle<Object> locales, Handle<Object> options);
@@ -31,8 +35,7 @@ class JSPluralRules : public JSObject {
Handle<JSPluralRules> plural_rules);
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ResolvePlural(
- Isolate* isolate, Handle<JSPluralRules> plural_rules,
- Handle<Object> number);
+ Isolate* isolate, Handle<JSPluralRules> plural_rules, double number);
DECL_CAST(JSPluralRules)
DECL_PRINTER(JSPluralRules)
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index c52e19ce49..b395ac9b6d 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_JS_PROMISE_H_
#define V8_OBJECTS_JS_PROMISE_H_
-#include "src/objects.h"
+#include "src/objects/js-objects.h"
#include "src/objects/promise.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 45e27473fe..2a7c518be4 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_JS_PROXY_H_
#define V8_OBJECTS_JS_PROXY_H_
-#include "src/objects.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -118,8 +118,6 @@ class JSProxy : public JSReceiver {
typedef FixedBodyDescriptor<JSReceiver::kPropertiesOrHashOffset, kSize, kSize>
BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
static Maybe<bool> SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
index 9821e33efb..9ad2851c7a 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_
#define V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_
-#include "src/objects.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 6dc984e252..a4ee3ee7f3 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -20,7 +20,8 @@ namespace internal {
// Base relative time format accessors.
ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset)
-ACCESSORS(JSRelativeTimeFormat, formatter, Foreign, kFormatterOffset)
+ACCESSORS(JSRelativeTimeFormat, icu_formatter,
+ Managed<icu::RelativeDateTimeFormatter>, kICUFormatterOffset)
SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
// TODO(ftang): Use bit field accessor for style and numeric later.
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 56130f7311..b3aa996d64 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -17,9 +17,9 @@
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-relative-time-format-inl.h"
-#include "src/objects/managed.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
+#include "unicode/uvernum.h" // for U_ICU_VERSION_MAJOR_NUM
namespace v8 {
namespace internal {
@@ -54,8 +54,7 @@ JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::getNumeric(
UNREACHABLE();
}
-MaybeHandle<JSRelativeTimeFormat>
-JSRelativeTimeFormat::InitializeRelativeTimeFormat(
+MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Isolate* isolate, Handle<JSRelativeTimeFormat> relative_time_format_holder,
Handle<Object> input_locales, Handle<Object> input_options) {
Factory* factory = isolate->factory();
@@ -161,7 +160,7 @@ JSRelativeTimeFormat::InitializeRelativeTimeFormat(
icu_formatter);
// 30. Set relativeTimeFormat.[[InitializedRelativeTimeFormat]] to true.
- relative_time_format_holder->set_formatter(*managed_formatter);
+ relative_time_format_holder->set_icu_formatter(*managed_formatter);
// 31. Return relativeTimeFormat.
return relative_time_format_holder;
}
@@ -180,12 +179,6 @@ Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
return result;
}
-icu::RelativeDateTimeFormatter* JSRelativeTimeFormat::UnpackFormatter(
- Handle<JSRelativeTimeFormat> holder) {
- return Managed<icu::RelativeDateTimeFormatter>::cast(holder->formatter())
- ->raw();
-}
-
Handle<String> JSRelativeTimeFormat::StyleAsString() const {
switch (style()) {
case Style::LONG:
@@ -210,5 +203,212 @@ Handle<String> JSRelativeTimeFormat::NumericAsString() const {
}
}
+namespace {
+
+Handle<String> UnitAsString(Isolate* isolate, URelativeDateTimeUnit unit_enum) {
+ Factory* factory = isolate->factory();
+ switch (unit_enum) {
+ case UDAT_REL_UNIT_SECOND:
+ return factory->second_string();
+ case UDAT_REL_UNIT_MINUTE:
+ return factory->minute_string();
+ case UDAT_REL_UNIT_HOUR:
+ return factory->hour_string();
+ case UDAT_REL_UNIT_DAY:
+ return factory->day_string();
+ case UDAT_REL_UNIT_WEEK:
+ return factory->week_string();
+ case UDAT_REL_UNIT_MONTH:
+ return factory->month_string();
+ case UDAT_REL_UNIT_QUARTER:
+ return factory->quarter_string();
+ case UDAT_REL_UNIT_YEAR:
+ return factory->year_string();
+ default:
+ UNREACHABLE();
+ }
+}
+
+MaybeHandle<JSArray> GenerateRelativeTimeFormatParts(
+ Isolate* isolate, const icu::UnicodeString& formatted,
+ const icu::UnicodeString& integer_part, URelativeDateTimeUnit unit_enum) {
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ int32_t found = formatted.indexOf(integer_part);
+
+ Handle<String> substring;
+ if (found < 0) {
+ // Cannot find the integer_part in the formatted.
+ // Return [{'type': 'literal', 'value': formatted}]
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
+ Intl::ToString(isolate, formatted), JSArray);
+ Intl::AddElement(isolate, array,
+ 0, // index
+ factory->literal_string(), // field_type_string
+ substring);
+ } else {
+ // Found the formatted integer in the result.
+ int index = 0;
+
+ // array.push({
+ // 'type': 'literal',
+ // 'value': formatted.substring(0, found)})
+ if (found > 0) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
+ Intl::ToString(isolate, formatted, 0, found),
+ JSArray);
+ Intl::AddElement(isolate, array, index++,
+ factory->literal_string(), // field_type_string
+ substring);
+ }
+
+ // array.push({
+ // 'type': 'integer',
+ // 'value': formatted.substring(found, found + integer_part.length),
+ // 'unit': unit})
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
+ Intl::ToString(isolate, formatted, found,
+ found + integer_part.length()),
+ JSArray);
+ Handle<String> unit = UnitAsString(isolate, unit_enum);
+ Intl::AddElement(isolate, array, index++,
+ factory->integer_string(), // field_type_string
+ substring, factory->unit_string(), unit);
+
+ // array.push({
+ // 'type': 'literal',
+ // 'value': formatted.substring(
+ // found + integer_part.length, formatted.length)})
+ if (found + integer_part.length() < formatted.length()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, found + integer_part.length(),
+ formatted.length()),
+ JSArray);
+ Intl::AddElement(isolate, array, index,
+ factory->literal_string(), // field_type_string
+ substring);
+ }
+ }
+ return array;
+}
+
+bool GetURelativeDateTimeUnit(Handle<String> unit,
+ URelativeDateTimeUnit* unit_enum) {
+ std::unique_ptr<char[]> unit_str = unit->ToCString();
+ if ((strcmp("second", unit_str.get()) == 0) ||
+ (strcmp("seconds", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_SECOND;
+ } else if ((strcmp("minute", unit_str.get()) == 0) ||
+ (strcmp("minutes", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_MINUTE;
+ } else if ((strcmp("hour", unit_str.get()) == 0) ||
+ (strcmp("hours", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_HOUR;
+ } else if ((strcmp("day", unit_str.get()) == 0) ||
+ (strcmp("days", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_DAY;
+ } else if ((strcmp("week", unit_str.get()) == 0) ||
+ (strcmp("weeks", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_WEEK;
+ } else if ((strcmp("month", unit_str.get()) == 0) ||
+ (strcmp("months", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_MONTH;
+ } else if ((strcmp("quarter", unit_str.get()) == 0) ||
+ (strcmp("quarters", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_QUARTER;
+ } else if ((strcmp("year", unit_str.get()) == 0) ||
+ (strcmp("years", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_YEAR;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+MaybeHandle<Object> JSRelativeTimeFormat::Format(
+ Isolate* isolate, Handle<Object> value_obj, Handle<Object> unit_obj,
+ Handle<JSRelativeTimeFormat> format_holder, const char* func_name,
+ bool to_parts) {
+ Factory* factory = isolate->factory();
+
+ // 3. Let value be ? ToNumber(value).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(isolate, value_obj), Object);
+ double number = value->Number();
+ // 4. Let unit be ? ToString(unit).
+ Handle<String> unit;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, unit, Object::ToString(isolate, unit_obj),
+ Object);
+
+ // 4. If isFinite(value) is false, then throw a RangeError exception.
+ if (!std::isfinite(number)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kNotFiniteNumber,
+ isolate->factory()->NewStringFromAsciiChecked(func_name)),
+ Object);
+ }
+
+ icu::RelativeDateTimeFormatter* formatter =
+ format_holder->icu_formatter()->raw();
+ CHECK_NOT_NULL(formatter);
+
+ URelativeDateTimeUnit unit_enum;
+ if (!GetURelativeDateTimeUnit(unit, &unit_enum)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidUnit,
+ isolate->factory()->NewStringFromAsciiChecked(func_name),
+ unit),
+ Object);
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString formatted;
+
+#if USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
+ if (unit_enum != UDAT_REL_UNIT_QUARTER) { // ICU did not implement
+ // UDAT_REL_UNIT_QUARTER < 63
+#endif // USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
+ if (format_holder->numeric() == JSRelativeTimeFormat::Numeric::ALWAYS) {
+ formatter->formatNumeric(number, unit_enum, formatted, status);
+ } else {
+ DCHECK_EQ(JSRelativeTimeFormat::Numeric::AUTO, format_holder->numeric());
+ formatter->format(number, unit_enum, formatted, status);
+ }
+#if USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
+ }
+#endif // USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
+
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
+ }
+
+ if (to_parts) {
+ icu::UnicodeString integer;
+ icu::FieldPosition pos;
+ formatter->getNumberFormat().format(std::abs(number), integer, pos, status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError),
+ Object);
+ }
+
+ Handle<JSArray> elements;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, elements,
+ GenerateRelativeTimeFormatParts(isolate, formatted, integer, unit_enum),
+ Object);
+ return elements;
+ }
+
+ return factory->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(formatted.getBuffer()),
+ formatted.length()));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 397c6fe287..eaaeb0e05f 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -12,6 +12,7 @@
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/managed.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -28,26 +29,30 @@ class JSRelativeTimeFormat : public JSObject {
public:
// Initializes relative time format object with properties derived from input
// locales and options.
- static MaybeHandle<JSRelativeTimeFormat> InitializeRelativeTimeFormat(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSRelativeTimeFormat> Initialize(
Isolate* isolate,
Handle<JSRelativeTimeFormat> relative_time_format_holder,
Handle<Object> locales, Handle<Object> options);
- static Handle<JSObject> ResolvedOptions(
+ V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSRelativeTimeFormat> format_holder);
- // Unpacks formatter object from corresponding JavaScript object.
- static icu::RelativeDateTimeFormatter* UnpackFormatter(
- Handle<JSRelativeTimeFormat> relative_time_format_holder);
Handle<String> StyleAsString() const;
Handle<String> NumericAsString() const;
+ // ecma402/#sec-Intl.RelativeTimeFormat.prototype.format
+ // ecma402/#sec-Intl.RelativeTimeFormat.prototype.formatToParts
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Format(
+ Isolate* isolate, Handle<Object> value_obj, Handle<Object> unit_obj,
+ Handle<JSRelativeTimeFormat> format_holder, const char* func_name,
+ bool to_parts);
+
DECL_CAST(JSRelativeTimeFormat)
// RelativeTimeFormat accessors.
DECL_ACCESSORS(locale, String)
- DECL_ACCESSORS(formatter, Foreign)
+ DECL_ACCESSORS(icu_formatter, Managed<icu::RelativeDateTimeFormatter>)
// Style: identifying the relative time format style used.
//
@@ -98,8 +103,8 @@ class JSRelativeTimeFormat : public JSObject {
// Layout description.
static const int kJSRelativeTimeFormatOffset = JSObject::kHeaderSize;
static const int kLocaleOffset = kJSRelativeTimeFormatOffset + kPointerSize;
- static const int kFormatterOffset = kLocaleOffset + kPointerSize;
- static const int kFlagsOffset = kFormatterOffset + kPointerSize;
+ static const int kICUFormatterOffset = kLocaleOffset + kPointerSize;
+ static const int kFlagsOffset = kICUFormatterOffset + kPointerSize;
static const int kSize = kFlagsOffset + kPointerSize;
private:
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
new file mode 100644
index 0000000000..1aac2b1d63
--- /dev/null
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_SEGMENTER_INL_H_
+#define V8_OBJECTS_JS_SEGMENTER_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-segmenter.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Base segmenter accessors.
+ACCESSORS(JSSegmenter, locale, String, kLocaleOffset)
+ACCESSORS(JSSegmenter, icu_break_iterator, Managed<icu::BreakIterator>,
+ kICUBreakIteratorOffset)
+SMI_ACCESSORS(JSSegmenter, flags, kFlagsOffset)
+
+inline void JSSegmenter::set_line_break_style(LineBreakStyle line_break_style) {
+ DCHECK_GT(LineBreakStyle::COUNT, line_break_style);
+ int hints = flags();
+ hints = LineBreakStyleBits::update(hints, line_break_style);
+ set_flags(hints);
+}
+
+inline JSSegmenter::LineBreakStyle JSSegmenter::line_break_style() const {
+ return LineBreakStyleBits::decode(flags());
+}
+
+inline void JSSegmenter::set_granularity(Granularity granularity) {
+ DCHECK_GT(Granularity::COUNT, granularity);
+ int hints = flags();
+ hints = GranularityBits::update(hints, granularity);
+ set_flags(hints);
+}
+
+inline JSSegmenter::Granularity JSSegmenter::granularity() const {
+ return GranularityBits::decode(flags());
+}
+
+CAST_ACCESSOR(JSSegmenter);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_SEGMENTER_INL_H_
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
new file mode 100644
index 0000000000..62d9bd508a
--- /dev/null
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -0,0 +1,214 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-segmenter.h"
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-segmenter-inl.h"
+#include "src/objects/managed.h"
+#include "unicode/brkiter.h"
+
+namespace v8 {
+namespace internal {
+
+JSSegmenter::LineBreakStyle JSSegmenter::GetLineBreakStyle(const char* str) {
+ if (strcmp(str, "strict") == 0) return JSSegmenter::LineBreakStyle::STRICT;
+ if (strcmp(str, "normal") == 0) return JSSegmenter::LineBreakStyle::NORMAL;
+ if (strcmp(str, "loose") == 0) return JSSegmenter::LineBreakStyle::LOOSE;
+ UNREACHABLE();
+}
+
+JSSegmenter::Granularity JSSegmenter::GetGranularity(const char* str) {
+ if (strcmp(str, "grapheme") == 0) return JSSegmenter::Granularity::GRAPHEME;
+ if (strcmp(str, "word") == 0) return JSSegmenter::Granularity::WORD;
+ if (strcmp(str, "sentence") == 0) return JSSegmenter::Granularity::SENTENCE;
+ if (strcmp(str, "line") == 0) return JSSegmenter::Granularity::LINE;
+ UNREACHABLE();
+}
+
+MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
+ Isolate* isolate, Handle<JSSegmenter> segmenter_holder,
+ Handle<Object> input_locales, Handle<Object> input_options) {
+ Factory* factory = isolate->factory();
+ segmenter_holder->set_flags(0);
+ // 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ Handle<JSObject> requested_locales;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, requested_locales,
+ Intl::CanonicalizeLocaleListJS(isolate, input_locales), JSSegmenter);
+
+ // 11. If options is undefined, then
+ Handle<JSReceiver> options;
+ if (input_options->IsUndefined(isolate)) {
+ // a. Let options be ObjectCreate(null).
+ options = isolate->factory()->NewJSObjectWithNullProto();
+ // 12. Else
+ } else {
+ // a. Let options be ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ Object::ToObject(isolate, input_options),
+ JSSegmenter);
+ }
+
+ // 8. Set opt.[[lb]] to lineBreakStyle.
+
+ // Because currently we access localeMatcher inside ResolveLocale, we have to
+ // move ResolveLocale before get lineBreakStyle
+ // 9. Let r be ResolveLocale(%Segmenter%.[[AvailableLocales]],
+ // requestedLocales, opt, %Segmenter%.[[RelevantExtensionKeys]]).
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, r,
+ Intl::ResolveLocale(isolate, "segmenter", requested_locales, options),
+ JSSegmenter);
+ Handle<Object> locale_obj =
+ JSObject::GetDataProperty(r, factory->locale_string());
+ Handle<String> locale;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, locale, Object::ToString(isolate, locale_obj), JSSegmenter);
+
+ // 7. Let lineBreakStyle be ? GetOption(options, "lineBreakStyle", "string", «
+ // "strict", "normal", "loose" », "normal").
+ std::unique_ptr<char[]> line_break_style_str = nullptr;
+ const std::vector<const char*> line_break_style_values = {"strict", "normal",
+ "loose"};
+ Maybe<bool> maybe_found_line_break_style = Intl::GetStringOption(
+ isolate, options, "lineBreakStyle", line_break_style_values,
+ "Intl.Segmenter", &line_break_style_str);
+ LineBreakStyle line_break_style_enum = LineBreakStyle::NORMAL;
+ MAYBE_RETURN(maybe_found_line_break_style, MaybeHandle<JSSegmenter>());
+ if (maybe_found_line_break_style.FromJust()) {
+ DCHECK_NOT_NULL(line_break_style_str.get());
+ line_break_style_enum = GetLineBreakStyle(line_break_style_str.get());
+ }
+
+ // 10. Set segmenter.[[Locale]] to the value of r.[[Locale]].
+ segmenter_holder->set_locale(*locale);
+
+ // 13. Let granularity be ? GetOption(options, "granularity", "string", «
+ // "grapheme", "word", "sentence", "line" », "grapheme").
+
+ std::unique_ptr<char[]> granularity_str = nullptr;
+ const std::vector<const char*> granularity_values = {"grapheme", "word",
+ "sentence", "line"};
+ Maybe<bool> maybe_found_granularity =
+ Intl::GetStringOption(isolate, options, "granularity", granularity_values,
+ "Intl.Segmenter", &granularity_str);
+ Granularity granularity_enum = Granularity::GRAPHEME;
+ MAYBE_RETURN(maybe_found_granularity, MaybeHandle<JSSegmenter>());
+ if (maybe_found_granularity.FromJust()) {
+ DCHECK_NOT_NULL(granularity_str.get());
+ granularity_enum = GetGranularity(granularity_str.get());
+ }
+
+ // 14. Set segmenter.[[SegmenterGranularity]] to granularity.
+ segmenter_holder->set_granularity(granularity_enum);
+
+ // 15. If granularity is "line",
+ if (granularity_enum == Granularity::LINE) {
+ // a. Set segmenter.[[SegmenterLineBreakStyle]] to r.[[lb]].
+ segmenter_holder->set_line_break_style(line_break_style_enum);
+ } else {
+ segmenter_holder->set_line_break_style(LineBreakStyle::NOTSET);
+ }
+
+ icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ DCHECK(!icu_locale.isBogus());
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::BreakIterator> icu_break_iterator;
+
+ switch (granularity_enum) {
+ case Granularity::GRAPHEME:
+ icu_break_iterator.reset(
+ icu::BreakIterator::createCharacterInstance(icu_locale, status));
+ break;
+ case Granularity::WORD:
+ icu_break_iterator.reset(
+ icu::BreakIterator::createWordInstance(icu_locale, status));
+ break;
+ case Granularity::SENTENCE:
+ icu_break_iterator.reset(
+ icu::BreakIterator::createSentenceInstance(icu_locale, status));
+ break;
+ case Granularity::LINE:
+ icu_break_iterator.reset(
+ icu::BreakIterator::createLineInstance(icu_locale, status));
+ // 15. If granularity is "line",
+ // a. Set segmenter.[[SegmenterLineBreakStyle]] to r.[[lb]].
+ // TBW
+ break;
+ case Granularity::COUNT:
+ UNREACHABLE();
+ }
+
+ CHECK(U_SUCCESS(status));
+ CHECK_NOT_NULL(icu_break_iterator.get());
+
+ Handle<Managed<icu::BreakIterator>> managed_break_iterator =
+ Managed<icu::BreakIterator>::FromUniquePtr(isolate, 0,
+ std::move(icu_break_iterator));
+
+ segmenter_holder->set_icu_break_iterator(*managed_break_iterator);
+ return segmenter_holder;
+}
+
+Handle<JSObject> JSSegmenter::ResolvedOptions(
+ Isolate* isolate, Handle<JSSegmenter> segmenter_holder) {
+ Factory* factory = isolate->factory();
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ Handle<String> locale(segmenter_holder->locale(), isolate);
+ JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
+ NONE);
+ if (segmenter_holder->line_break_style() != LineBreakStyle::NOTSET) {
+ JSObject::AddProperty(isolate, result, factory->lineBreakStyle_string(),
+ segmenter_holder->LineBreakStyleAsString(), NONE);
+ }
+ JSObject::AddProperty(isolate, result, factory->granularity_string(),
+ segmenter_holder->GranularityAsString(), NONE);
+ return result;
+}
+
+Handle<String> JSSegmenter::LineBreakStyleAsString() const {
+ switch (line_break_style()) {
+ case LineBreakStyle::STRICT:
+ return GetReadOnlyRoots().strict_string_handle();
+ case LineBreakStyle::NORMAL:
+ return GetReadOnlyRoots().normal_string_handle();
+ case LineBreakStyle::LOOSE:
+ return GetReadOnlyRoots().loose_string_handle();
+ case LineBreakStyle::COUNT:
+ case LineBreakStyle::NOTSET:
+ UNREACHABLE();
+ }
+}
+
+Handle<String> JSSegmenter::GranularityAsString() const {
+ switch (granularity()) {
+ case Granularity::GRAPHEME:
+ return GetReadOnlyRoots().grapheme_string_handle();
+ case Granularity::WORD:
+ return GetReadOnlyRoots().word_string_handle();
+ case Granularity::SENTENCE:
+ return GetReadOnlyRoots().sentence_string_handle();
+ case Granularity::LINE:
+ return GetReadOnlyRoots().line_string_handle();
+ case Granularity::COUNT:
+ UNREACHABLE();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
new file mode 100644
index 0000000000..167d70c210
--- /dev/null
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -0,0 +1,118 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_SEGMENTER_H_
+#define V8_OBJECTS_JS_SEGMENTER_H_
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/objects/managed.h"
+#include "unicode/uversion.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+}
+
+namespace v8 {
+namespace internal {
+
+class JSSegmenter : public JSObject {
+ public:
+ // Initializes segmenter object with properties derived from input
+ // locales and options.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSSegmenter> Initialize(
+ Isolate* isolate, Handle<JSSegmenter> segmenter_holder,
+ Handle<Object> locales, Handle<Object> options);
+
+ V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
+ Isolate* isolate, Handle<JSSegmenter> segmenter_holder);
+
+ Handle<String> LineBreakStyleAsString() const;
+ Handle<String> GranularityAsString() const;
+
+ DECL_CAST(JSSegmenter)
+
+ // Segmenter accessors.
+ DECL_ACCESSORS(locale, String)
+
+ DECL_ACCESSORS(icu_break_iterator, Managed<icu::BreakIterator>)
+
+ // LineBreakStyle: identifying the style used for line break.
+ //
+ // ecma402 #sec-segmenter-internal-slots
+
+ enum class LineBreakStyle {
+ NOTSET, // While the granularity is not LINE
+ STRICT, // CSS level 3 line-break=strict, e.g. treat CJ as NS
+ NORMAL, // CSS level 3 line-break=normal, e.g. treat CJ as ID, break before
+ // hyphens for ja,zh
+ LOOSE, // CSS level 3 line-break=loose
+ COUNT
+ };
+ inline void set_line_break_style(LineBreakStyle line_break_style);
+ inline LineBreakStyle line_break_style() const;
+
+ // Granularity: identifying the segmenter used.
+ //
+ // ecma402 #sec-segmenter-internal-slots
+ enum class Granularity {
+ GRAPHEME, // for character-breaks
+ WORD, // for word-breaks
+ SENTENCE, // for sentence-breaks
+ LINE, // for line-breaks
+ COUNT
+ };
+ inline void set_granularity(Granularity granularity);
+ inline Granularity granularity() const;
+
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(LineBreakStyleBits, LineBreakStyle, 3, _) \
+ V(GranularityBits, Granularity, 3, _)
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(LineBreakStyle::NOTSET <= LineBreakStyleBits::kMax);
+ STATIC_ASSERT(LineBreakStyle::STRICT <= LineBreakStyleBits::kMax);
+ STATIC_ASSERT(LineBreakStyle::NORMAL <= LineBreakStyleBits::kMax);
+ STATIC_ASSERT(LineBreakStyle::LOOSE <= LineBreakStyleBits::kMax);
+ STATIC_ASSERT(Granularity::GRAPHEME <= GranularityBits::kMax);
+ STATIC_ASSERT(Granularity::WORD <= GranularityBits::kMax);
+ STATIC_ASSERT(Granularity::SENTENCE <= GranularityBits::kMax);
+ STATIC_ASSERT(Granularity::LINE <= GranularityBits::kMax);
+
+ // [flags] Bit field containing various flags about the function.
+ DECL_INT_ACCESSORS(flags)
+
+ DECL_PRINTER(JSSegmenter)
+ DECL_VERIFIER(JSSegmenter)
+
+ // Layout description.
+ static const int kJSSegmenterOffset = JSObject::kHeaderSize;
+ static const int kLocaleOffset = kJSSegmenterOffset + kPointerSize;
+ static const int kICUBreakIteratorOffset = kLocaleOffset + kPointerSize;
+ static const int kFlagsOffset = kICUBreakIteratorOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
+
+ private:
+ static LineBreakStyle GetLineBreakStyle(const char* str);
+ static Granularity GetGranularity(const char* str);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSegmenter);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_SEGMENTER_H_
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 59f061dc05..0ec4113d4d 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -136,10 +136,10 @@ bool Map::IsUnboxedDoubleField(FieldIndex index) const {
return !layout_descriptor()->IsTagged(index.property_index());
}
-bool Map::TooManyFastProperties(StoreFromKeyed store_mode) const {
+bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
if (UnusedPropertyFields() != 0) return false;
if (is_prototype_map()) return false;
- int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
+ int minimum = store_origin == StoreOrigin::kNamed ? 128 : 12;
int limit = Max(minimum, GetInObjectProperties());
int external = NumberOfFields() - GetInObjectProperties();
return external > limit;
@@ -511,57 +511,30 @@ void Map::NotifyLeafMapLayoutChange(Isolate* isolate) {
}
}
-bool Map::IsJSObject(InstanceType type) {
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- return type >= FIRST_JS_OBJECT_TYPE;
-}
-
bool Map::CanTransition() const {
// Only JSObject and subtypes have map transitions and back pointers.
- return IsJSObject(instance_type());
+ return InstanceTypeChecker::IsJSObject(instance_type());
}
+#define DEF_TESTER(Type, ...) \
+ bool Map::Is##Type##Map() const { \
+ return InstanceTypeChecker::Is##Type(instance_type()); \
+ }
+INSTANCE_TYPE_CHECKERS(DEF_TESTER)
+#undef DEF_TESTER
+
bool Map::IsBooleanMap() const {
return this == GetReadOnlyRoots().boolean_map();
}
-bool Map::IsNullMap() const { return this == GetReadOnlyRoots().null_map(); }
-
-bool Map::IsUndefinedMap() const {
- return this == GetReadOnlyRoots().undefined_map();
-}
-
bool Map::IsNullOrUndefinedMap() const {
- return IsNullMap() || IsUndefinedMap();
+ return this == GetReadOnlyRoots().null_map() ||
+ this == GetReadOnlyRoots().undefined_map();
}
bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
-bool Map::IsJSReceiverMap() const {
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- return instance_type() >= FIRST_JS_RECEIVER_TYPE;
-}
-bool Map::IsJSObjectMap() const { return IsJSObject(instance_type()); }
-bool Map::IsJSPromiseMap() const { return instance_type() == JS_PROMISE_TYPE; }
-bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
-bool Map::IsJSFunctionMap() const {
- return instance_type() == JS_FUNCTION_TYPE;
-}
-bool Map::IsStringMap() const { return instance_type() < FIRST_NONSTRING_TYPE; }
-bool Map::IsJSProxyMap() const { return instance_type() == JS_PROXY_TYPE; }
-bool Map::IsJSGlobalProxyMap() const {
- return instance_type() == JS_GLOBAL_PROXY_TYPE;
-}
-bool Map::IsJSGlobalObjectMap() const {
- return instance_type() == JS_GLOBAL_OBJECT_TYPE;
-}
-bool Map::IsJSTypedArrayMap() const {
- return instance_type() == JS_TYPED_ARRAY_TYPE;
-}
-bool Map::IsJSDataViewMap() const {
- return instance_type() == JS_DATA_VIEW_TYPE;
-}
Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 397f874610..5f6b173cd3 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -37,9 +37,10 @@ namespace internal {
V(FreeSpace) \
V(JSApiObject) \
V(JSArrayBuffer) \
- V(JSFunction) \
+ V(JSDataView) \
V(JSObject) \
V(JSObjectFast) \
+ V(JSTypedArray) \
V(JSWeakCollection) \
V(Map) \
V(NativeContext) \
@@ -403,9 +404,6 @@ class Map : public HeapObject {
inline bool has_fixed_typed_array_elements() const;
inline bool has_dictionary_elements() const;
- static bool IsValidElementsTransition(ElementsKind from_kind,
- ElementsKind to_kind);
-
// Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a
// map with DICTIONARY_ELEMENTS was found in the prototype chain.
bool DictionaryElementsInPrototypeChainOnly(Isolate* isolate);
@@ -471,8 +469,6 @@ class Map : public HeapObject {
bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
int target_inobject, int target_unused,
int* old_number_of_fields) const;
- // TODO(ishell): moveit!
- static Handle<Map> GeneralizeAllFields(Isolate* isolate, Handle<Map> map);
V8_WARN_UNUSED_RESULT static Handle<FieldType> GeneralizeFieldType(
Representation rep1, Handle<FieldType> type1, Representation rep2,
Handle<FieldType> type2, Isolate* isolate);
@@ -693,13 +689,13 @@ class Map : public HeapObject {
// Maximal number of fast properties. Used to restrict the number of map
// transitions to avoid an explosion in the number of maps for objects used as
// dictionaries.
- inline bool TooManyFastProperties(StoreFromKeyed store_mode) const;
+ inline bool TooManyFastProperties(StoreOrigin store_origin) const;
static Handle<Map> TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
PropertyConstness constness,
- StoreFromKeyed store_mode);
+ StoreOrigin store_origin);
static Handle<Map> TransitionToAccessorProperty(
Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
Handle<Object> getter, Handle<Object> setter,
@@ -756,27 +752,14 @@ class Map : public HeapObject {
Map* FindElementsKindTransitionedMap(Isolate* isolate,
MapHandles const& candidates);
- inline static bool IsJSObject(InstanceType type);
-
inline bool CanTransition() const;
+#define DECL_TESTER(Type, ...) inline bool Is##Type##Map() const;
+ INSTANCE_TYPE_CHECKERS(DECL_TESTER)
+#undef DECL_TESTER
inline bool IsBooleanMap() const;
- inline bool IsNullMap() const;
- inline bool IsUndefinedMap() const;
inline bool IsNullOrUndefinedMap() const;
inline bool IsPrimitiveMap() const;
- inline bool IsJSReceiverMap() const;
- inline bool IsJSObjectMap() const;
- inline bool IsJSPromiseMap() const;
- inline bool IsJSArrayMap() const;
- inline bool IsJSFunctionMap() const;
- inline bool IsStringMap() const;
- inline bool IsJSProxyMap() const;
- inline bool IsModuleMap() const;
- inline bool IsJSGlobalProxyMap() const;
- inline bool IsJSGlobalObjectMap() const;
- inline bool IsJSTypedArrayMap() const;
- inline bool IsJSDataViewMap() const;
inline bool IsSpecialReceiverMap() const;
inline bool IsCustomElementsReceiverMap() const;
@@ -945,7 +928,7 @@ class Map : public HeapObject {
void UpdateFieldType(Isolate* isolate, int descriptor_number,
Handle<Name> name, PropertyConstness new_constness,
Representation new_representation,
- MaybeObjectHandle new_wrapped_type);
+ const MaybeObjectHandle& new_wrapped_type);
// TODO(ishell): Move to MapUpdater.
void PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
@@ -971,9 +954,6 @@ class Map : public HeapObject {
class NormalizedMapCache : public WeakFixedArray,
public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
static Handle<NormalizedMapCache> New(Isolate* isolate);
V8_WARN_UNUSED_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index fa3cd8c14f..6d2bc6a9ab 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -20,29 +20,24 @@ bool MaybeObject::ToSmi(Smi** value) {
return false;
}
-Smi* MaybeObject::ToSmi() {
- DCHECK(HAS_SMI_TAG(this));
- return Smi::cast(reinterpret_cast<Object*>(this));
-}
-
-bool MaybeObject::IsStrongOrWeakHeapObject() const {
- if (IsSmi() || IsClearedWeakHeapObject()) {
+bool MaybeObject::IsStrongOrWeak() const {
+ if (IsSmi() || IsCleared()) {
return false;
}
return true;
}
-bool MaybeObject::ToStrongOrWeakHeapObject(HeapObject** result) {
- if (IsSmi() || IsClearedWeakHeapObject()) {
+bool MaybeObject::GetHeapObject(HeapObject** result) {
+ if (IsSmi() || IsCleared()) {
return false;
}
*result = GetHeapObject();
return true;
}
-bool MaybeObject::ToStrongOrWeakHeapObject(
- HeapObject** result, HeapObjectReferenceType* reference_type) {
- if (IsSmi() || IsClearedWeakHeapObject()) {
+bool MaybeObject::GetHeapObject(HeapObject** result,
+ HeapObjectReferenceType* reference_type) {
+ if (IsSmi() || IsCleared()) {
return false;
}
*reference_type = HasWeakHeapObjectTag(this)
@@ -52,11 +47,11 @@ bool MaybeObject::ToStrongOrWeakHeapObject(
return true;
}
-bool MaybeObject::IsStrongHeapObject() const {
+bool MaybeObject::IsStrong() const {
return !HasWeakHeapObjectTag(this) && !IsSmi();
}
-bool MaybeObject::ToStrongHeapObject(HeapObject** result) {
+bool MaybeObject::GetHeapObjectIfStrong(HeapObject** result) {
if (!HasWeakHeapObjectTag(this) && !IsSmi()) {
*result = reinterpret_cast<HeapObject*>(this);
return true;
@@ -64,35 +59,33 @@ bool MaybeObject::ToStrongHeapObject(HeapObject** result) {
return false;
}
-HeapObject* MaybeObject::ToStrongHeapObject() {
- DCHECK(IsStrongHeapObject());
+HeapObject* MaybeObject::GetHeapObjectAssumeStrong() {
+ DCHECK(IsStrong());
return reinterpret_cast<HeapObject*>(this);
}
-bool MaybeObject::IsWeakHeapObject() const {
- return HasWeakHeapObjectTag(this) && !IsClearedWeakHeapObject();
+bool MaybeObject::IsWeak() const {
+ return HasWeakHeapObjectTag(this) && !IsCleared();
}
-bool MaybeObject::IsWeakOrClearedHeapObject() const {
- return HasWeakHeapObjectTag(this);
-}
+bool MaybeObject::IsWeakOrCleared() const { return HasWeakHeapObjectTag(this); }
-bool MaybeObject::ToWeakHeapObject(HeapObject** result) {
- if (HasWeakHeapObjectTag(this) && !IsClearedWeakHeapObject()) {
+bool MaybeObject::GetHeapObjectIfWeak(HeapObject** result) {
+ if (IsWeak()) {
*result = GetHeapObject();
return true;
}
return false;
}
-HeapObject* MaybeObject::ToWeakHeapObject() {
- DCHECK(IsWeakHeapObject());
+HeapObject* MaybeObject::GetHeapObjectAssumeWeak() {
+ DCHECK(IsWeak());
return GetHeapObject();
}
HeapObject* MaybeObject::GetHeapObject() {
DCHECK(!IsSmi());
- DCHECK(!IsClearedWeakHeapObject());
+ DCHECK(!IsCleared());
return RemoveWeakHeapObjectMask(reinterpret_cast<HeapObjectReference*>(this));
}
@@ -103,15 +96,10 @@ Object* MaybeObject::GetHeapObjectOrSmi() {
return GetHeapObject();
}
-bool MaybeObject::IsObject() const { return IsSmi() || IsStrongHeapObject(); }
-
-Object* MaybeObject::ToObject() {
- DCHECK(!HasWeakHeapObjectTag(this));
- return reinterpret_cast<Object*>(this);
-}
+bool MaybeObject::IsObject() const { return IsSmi() || IsStrong(); }
MaybeObject* MaybeObject::MakeWeak(MaybeObject* object) {
- DCHECK(object->IsStrongOrWeakHeapObject());
+ DCHECK(object->IsStrongOrWeak());
return AddWeakHeapObjectMask(object);
}
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index 84c8538224..0d55ff859c 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_MAYBE_OBJECT_H_
#define V8_OBJECTS_MAYBE_OBJECT_H_
+#include "include/v8-internal.h"
#include "include/v8.h"
#include "src/globals.h"
#include "src/objects.h"
@@ -23,30 +24,53 @@ class MaybeObject {
public:
bool IsSmi() const { return HAS_SMI_TAG(this); }
inline bool ToSmi(Smi** value);
- inline Smi* ToSmi();
- bool IsClearedWeakHeapObject() const {
+ bool IsCleared() const {
return ::v8::internal::IsClearedWeakHeapObject(this);
}
- inline bool IsStrongOrWeakHeapObject() const;
- inline bool ToStrongOrWeakHeapObject(HeapObject** result);
- inline bool ToStrongOrWeakHeapObject(HeapObject** result,
- HeapObjectReferenceType* reference_type);
- inline bool IsStrongHeapObject() const;
- inline bool ToStrongHeapObject(HeapObject** result);
- inline HeapObject* ToStrongHeapObject();
- inline bool IsWeakHeapObject() const;
- inline bool IsWeakOrClearedHeapObject() const;
- inline bool ToWeakHeapObject(HeapObject** result);
- inline HeapObject* ToWeakHeapObject();
-
- // Returns the HeapObject pointed to (either strongly or weakly).
+ inline bool IsStrongOrWeak() const;
+ inline bool IsStrong() const;
+
+ // If this MaybeObject is a strong pointer to a HeapObject, returns true and
+ // sets *result. Otherwise returns false.
+ inline bool GetHeapObjectIfStrong(HeapObject** result);
+
+ // DCHECKs that this MaybeObject is a strong pointer to a HeapObject and
+ // returns the HeapObject.
+ inline HeapObject* GetHeapObjectAssumeStrong();
+
+ inline bool IsWeak() const;
+ inline bool IsWeakOrCleared() const;
+
+ // If this MaybeObject is a weak pointer to a HeapObject, returns true and
+ // sets *result. Otherwise returns false.
+ inline bool GetHeapObjectIfWeak(HeapObject** result);
+
+ // DCHECKs that this MaybeObject is a weak pointer to a HeapObject and
+ // returns the HeapObject.
+ inline HeapObject* GetHeapObjectAssumeWeak();
+
+ // If this MaybeObject is a strong or weak pointer to a HeapObject, returns
+ // true and sets *result. Otherwise returns false.
+ inline bool GetHeapObject(HeapObject** result);
+ inline bool GetHeapObject(HeapObject** result,
+ HeapObjectReferenceType* reference_type);
+
+ // DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
+ // and returns the HeapObject.
inline HeapObject* GetHeapObject();
+
+ // DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
+ // or a SMI and returns the HeapObject or SMI.
inline Object* GetHeapObjectOrSmi();
inline bool IsObject() const;
- inline Object* ToObject();
+ template <typename T>
+ T* cast() {
+ DCHECK(!HasWeakHeapObjectTag(this));
+ return T::cast(reinterpret_cast<Object*>(this));
+ }
static MaybeObject* FromSmi(Smi* smi) {
DCHECK(HAS_SMI_TAG(smi));
diff --git a/deps/v8/src/objects/microtask-queue-inl.h b/deps/v8/src/objects/microtask-queue-inl.h
new file mode 100644
index 0000000000..8d93ee5226
--- /dev/null
+++ b/deps/v8/src/objects/microtask-queue-inl.h
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MICROTASK_QUEUE_INL_H_
+#define V8_OBJECTS_MICROTASK_QUEUE_INL_H_
+
+#include "src/objects/microtask-queue.h"
+
+#include "src/objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(MicrotaskQueue)
+ACCESSORS(MicrotaskQueue, queue, FixedArray, kQueueOffset)
+SMI_ACCESSORS(MicrotaskQueue, pending_microtask_count,
+ kPendingMicrotaskCountOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MICROTASK_QUEUE_INL_H_
diff --git a/deps/v8/src/objects/microtask-queue.cc b/deps/v8/src/objects/microtask-queue.cc
new file mode 100644
index 0000000000..a8905acd36
--- /dev/null
+++ b/deps/v8/src/objects/microtask-queue.cc
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/microtask-queue.h"
+
+#include "src/objects/microtask-queue-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// DCHECK requires this for taking the reference of it.
+constexpr int MicrotaskQueue::kMinimumQueueCapacity;
+
+// static
+void MicrotaskQueue::EnqueueMicrotask(Isolate* isolate,
+ Handle<MicrotaskQueue> microtask_queue,
+ Handle<Microtask> microtask) {
+ Handle<FixedArray> queue(microtask_queue->queue(), isolate);
+ int num_tasks = microtask_queue->pending_microtask_count();
+ DCHECK_LE(num_tasks, queue->length());
+ if (num_tasks == queue->length()) {
+ queue = isolate->factory()->CopyFixedArrayAndGrow(
+ queue, std::max(num_tasks, kMinimumQueueCapacity));
+ microtask_queue->set_queue(*queue);
+ }
+ DCHECK_LE(kMinimumQueueCapacity, queue->length());
+ DCHECK_LT(num_tasks, queue->length());
+ DCHECK(queue->get(num_tasks)->IsUndefined(isolate));
+ queue->set(num_tasks, *microtask);
+ microtask_queue->set_pending_microtask_count(num_tasks + 1);
+}
+
+// static
+void MicrotaskQueue::RunMicrotasks(Handle<MicrotaskQueue> microtask_queue) {
+ UNIMPLEMENTED();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/microtask-queue.h b/deps/v8/src/objects/microtask-queue.h
new file mode 100644
index 0000000000..bb14cfb498
--- /dev/null
+++ b/deps/v8/src/objects/microtask-queue.h
@@ -0,0 +1,55 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MICROTASK_QUEUE_H_
+#define V8_OBJECTS_MICROTASK_QUEUE_H_
+
+#include "src/objects.h"
+#include "src/objects/microtask.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE MicrotaskQueue : public Struct {
+ public:
+ DECL_CAST(MicrotaskQueue)
+ DECL_VERIFIER(MicrotaskQueue)
+ DECL_PRINTER(MicrotaskQueue)
+
+ // A FixedArray that the queued microtasks are stored.
+ // The first |pending_microtask_count| slots contains Microtask instance
+ // for each, and followings are undefined_value if any.
+ DECL_ACCESSORS(queue, FixedArray)
+
+ // The number of microtasks queued in |queue|. This must be less or equal to
+ // the length of |queue|.
+ DECL_INT_ACCESSORS(pending_microtask_count)
+
+ // Enqueues |microtask| to |microtask_queue|.
+ static void EnqueueMicrotask(Isolate* isolate,
+ Handle<MicrotaskQueue> microtask_queue,
+ Handle<Microtask> microtask);
+
+ // Runs all enqueued microtasks.
+ static void RunMicrotasks(Handle<MicrotaskQueue> microtask_queue);
+
+ static constexpr int kMinimumQueueCapacity = 8;
+
+ static const int kQueueOffset = HeapObject::kHeaderSize;
+ static const int kPendingMicrotaskCountOffset = kQueueOffset + kPointerSize;
+ static const int kSize = kPendingMicrotaskCountOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MicrotaskQueue);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MICROTASK_QUEUE_H_
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 02a94c446b..c4d2626e60 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -18,8 +18,6 @@
namespace v8 {
namespace internal {
-namespace {
-
struct ModuleHandleHash {
V8_INLINE size_t operator()(Handle<Module> module) const {
return module->hash();
@@ -82,8 +80,6 @@ class UnorderedStringMap
zone)) {}
};
-} // anonymous namespace
-
class Module::ResolveSet
: public std::unordered_map<
Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
@@ -106,22 +102,18 @@ class Module::ResolveSet
Zone* zone_;
};
-namespace {
-
-int ExportIndex(int cell_index) {
+int Module::ExportIndex(int cell_index) {
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kExport);
return cell_index - 1;
}
-int ImportIndex(int cell_index) {
+int Module::ImportIndex(int cell_index) {
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kImport);
return -cell_index - 1;
}
-} // anonymous namespace
-
void Module::CreateIndirectExport(Isolate* isolate, Handle<Module> module,
Handle<String> name,
Handle<ModuleInfoEntry> entry) {
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 4612d73c89..fd9f9ace80 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -7,6 +7,7 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -27,9 +28,6 @@ class Zone;
// The runtime representation of an ECMAScript module.
class Module : public Struct, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
@@ -111,6 +109,9 @@ class Module : public Struct, public NeverReadOnlySpaceObject {
static void StoreVariable(Handle<Module> module, int cell_index,
Handle<Object> value);
+ static int ImportIndex(int cell_index);
+ static int ExportIndex(int cell_index);
+
// Get the namespace object for [module_request] of [module]. If it doesn't
// exist yet, it is created.
static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index e768a40ec2..512e47875c 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -19,24 +19,25 @@ CAST_ACCESSOR(Name)
CAST_ACCESSOR(Symbol)
ACCESSORS(Symbol, name, Object, kNameOffset)
-SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
-BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
-BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
-BOOL_ACCESSORS(Symbol, flags, is_public, kPublicBit)
-BOOL_ACCESSORS(Symbol, flags, is_interesting_symbol, kInterestingSymbolBit)
+INT_ACCESSORS(Symbol, flags, kFlagsOffset)
+BIT_FIELD_ACCESSORS(Symbol, flags, is_private, Symbol::IsPrivateBit)
+BIT_FIELD_ACCESSORS(Symbol, flags, is_well_known_symbol,
+ Symbol::IsWellKnownSymbolBit)
+BIT_FIELD_ACCESSORS(Symbol, flags, is_public, Symbol::IsPublicBit)
+BIT_FIELD_ACCESSORS(Symbol, flags, is_interesting_symbol,
+ Symbol::IsInterestingSymbolBit)
bool Symbol::is_private_field() const {
- bool value = BooleanBit::get(flags(), kPrivateFieldBit);
+ bool value = Symbol::IsPrivateFieldBit::decode(flags());
DCHECK_IMPLIES(value, is_private());
return value;
}
void Symbol::set_is_private_field() {
- int old_value = flags();
// TODO(gsathya): Re-order the bits to have these next to each other
// and just do the bit shifts once.
- set_flags(BooleanBit::set(old_value, kPrivateBit, true) |
- BooleanBit::set(old_value, kPrivateFieldBit, true));
+ set_flags(Symbol::IsPrivateBit::update(flags(), true));
+ set_flags(Symbol::IsPrivateFieldBit::update(flags(), true));
}
bool Name::IsUniqueName() const {
@@ -51,13 +52,6 @@ uint32_t Name::hash_field() {
void Name::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
-#if V8_HOST_ARCH_64_BIT
-#if V8_TARGET_LITTLE_ENDIAN
- WRITE_UINT32_FIELD(this, kHashFieldSlot + kInt32Size, 0);
-#else
- WRITE_UINT32_FIELD(this, kHashFieldSlot, 0);
-#endif
-#endif
}
bool Name::Equals(Name* other) {
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 06e08deb82..bcc1f2c27d 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -67,13 +67,8 @@ class Name : public HeapObject {
int NameShortPrint(Vector<char> str);
// Layout description.
- static const int kHashFieldSlot = HeapObject::kHeaderSize;
-#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
- static const int kHashFieldOffset = kHashFieldSlot;
-#else
- static const int kHashFieldOffset = kHashFieldSlot + kInt32Size;
-#endif
- static const int kSize = kHashFieldSlot + kPointerSize;
+ static const int kHashFieldOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kHashFieldOffset + kInt32Size;
// Mask constant for checking if a name has a computed hash code
// and if it is a string that is an array index. The least significant bit
@@ -181,20 +176,22 @@ class Symbol : public Name {
DECL_VERIFIER(Symbol)
// Layout description.
- static const int kNameOffset = Name::kSize;
- static const int kFlagsOffset = kNameOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
-
- // Flags layout.
- static const int kPrivateBit = 0;
- static const int kWellKnownSymbolBit = 1;
- static const int kPublicBit = 2;
- static const int kInterestingSymbolBit = 3;
- static const int kPrivateFieldBit = 4;
-
- typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
+ static const int kFlagsOffset = Name::kHeaderSize;
+ static const int kNameOffset = kFlagsOffset + kInt32Size;
+ static const int kSize = kNameOffset + kPointerSize;
+
+// Flags layout.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(IsPrivateBit, bool, 1, _) \
+ V(IsWellKnownSymbolBit, bool, 1, _) \
+ V(IsPublicBit, bool, 1, _) \
+ V(IsInterestingSymbolBit, bool, 1, _) \
+ V(IsPrivateFieldBit, bool, 1, _)
+
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ typedef FixedBodyDescriptor<kNameOffset, kSize, kSize> BodyDescriptor;
void SymbolShortPrint(std::ostream& os);
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index 8176bb0324..a0c19cab5c 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -46,6 +46,8 @@
#undef WRITE_INTPTR_FIELD
#undef RELAXED_READ_INTPTR_FIELD
#undef RELAXED_WRITE_INTPTR_FIELD
+#undef READ_UINTPTR_FIELD
+#undef WRITE_UINTPTR_FIELD
#undef READ_UINT8_FIELD
#undef WRITE_UINT8_FIELD
#undef READ_INT8_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 9ec24a62f7..c97f59f9c0 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -290,6 +290,12 @@
#define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_UINTPTR_FIELD(p, offset) \
+ (*reinterpret_cast<const uintptr_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_UINTPTR_FIELD(p, offset, value) \
+ (*reinterpret_cast<uintptr_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_UINT8_FIELD(p, offset) \
(*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset)))
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 76b0692c46..76343c21ed 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -13,20 +13,20 @@
namespace v8 {
namespace internal {
-int OrderedHashSet::GetMapRootIndex() {
- return Heap::kOrderedHashSetMapRootIndex;
+RootIndex OrderedHashSet::GetMapRootIndex() {
+ return RootIndex::kOrderedHashSetMap;
}
-int OrderedHashMap::GetMapRootIndex() {
- return Heap::kOrderedHashMapMapRootIndex;
+RootIndex OrderedHashMap::GetMapRootIndex() {
+ return RootIndex::kOrderedHashMapMap;
}
-int SmallOrderedHashMap::GetMapRootIndex() {
- return Heap::kSmallOrderedHashMapMapRootIndex;
+RootIndex SmallOrderedHashMap::GetMapRootIndex() {
+ return RootIndex::kSmallOrderedHashMapMap;
}
-int SmallOrderedHashSet::GetMapRootIndex() {
- return Heap::kSmallOrderedHashSetMapRootIndex;
+RootIndex SmallOrderedHashSet::GetMapRootIndex() {
+ return RootIndex::kSmallOrderedHashSetMap;
}
inline Object* OrderedHashMap::ValueAt(int entry) {
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index fdafce56ae..171e4dfae3 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -26,7 +26,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
}
int num_buckets = capacity / kLoadFactor;
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
- static_cast<Heap::RootListIndex>(Derived::GetMapRootIndex()),
+ Derived::GetMapRootIndex(),
kHashTableStartIndex + num_buckets + (capacity * kEntrySize), pretenure);
Handle<Derived> table = Handle<Derived>::cast(backing_store);
for (int i = 0; i < num_buckets; ++i) {
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 0ee0f71c5c..6c606efc75 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -231,7 +232,7 @@ class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
Handle<OrderedHashSet> table,
GetKeysConversion convert);
static HeapObject* GetEmpty(ReadOnlyRoots ro_roots);
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
static inline bool Is(Handle<HeapObject> table);
};
@@ -249,7 +250,7 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
static Object* GetHash(Isolate* isolate, Object* key);
static HeapObject* GetEmpty(ReadOnlyRoots ro_roots);
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
static inline bool Is(Handle<HeapObject> table);
static const int kValueOffset = 1;
@@ -326,9 +327,6 @@ class SmallOrderedHashTable : public HeapObject {
// Iterates only fields in the DataTable.
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
// Returns total size in bytes required for a table of given
// capacity.
static int SizeFor(int capacity) {
@@ -554,7 +552,7 @@ class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
Handle<SmallOrderedHashSet> table,
Handle<Object> key);
static inline bool Is(Handle<HeapObject> table);
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
};
class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
@@ -575,7 +573,7 @@ class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
Handle<Object> key,
Handle<Object> value);
static inline bool Is(Handle<HeapObject> table);
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
};
// TODO(gsathya): Rename this to OrderedHashTable, after we rename
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 5ff5dac6f3..0f7b4f23ce 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+class JSPromise;
+
// Struct to hold state required for PromiseReactionJob. See the comment on the
// PromiseReaction below for details on how this is being managed to reduce the
// memory and allocation overhead. This is the base class for the concrete
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
new file mode 100644
index 0000000000..cb157db5d6
--- /dev/null
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -0,0 +1,83 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_ARRAY_INL_H_
+#define V8_OBJECTS_PROPERTY_ARRAY_INL_H_
+
+#include "src/objects/property-array.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(PropertyArray)
+
+Object* PropertyArray::get(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LE(index, this->length());
+ return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
+}
+
+void PropertyArray::set(int index, Object* value) {
+ DCHECK(IsPropertyArray());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(this, offset, value);
+}
+
+void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
+}
+
+Object** PropertyArray::data_start() {
+ return HeapObject::RawField(this, kHeaderSize);
+}
+
+int PropertyArray::length() const {
+ Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
+ int value = Smi::ToInt(value_obj);
+ return LengthField::decode(value);
+}
+
+void PropertyArray::initialize_length(int len) {
+ SLOW_DCHECK(len >= 0);
+ SLOW_DCHECK(len < LengthField::kMax);
+ WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(len));
+}
+
+int PropertyArray::synchronized_length() const {
+ Object* value_obj = ACQUIRE_READ_FIELD(this, kLengthAndHashOffset);
+ int value = Smi::ToInt(value_obj);
+ return LengthField::decode(value);
+}
+
+int PropertyArray::Hash() const {
+ Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
+ int value = Smi::ToInt(value_obj);
+ return HashField::decode(value);
+}
+
+void PropertyArray::SetHash(int hash) {
+ Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
+ int value = Smi::ToInt(value_obj);
+ value = HashField::update(value, hash);
+ WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(value));
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROPERTY_ARRAY_INL_H_
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
new file mode 100644
index 0000000000..70f535a8f0
--- /dev/null
+++ b/deps/v8/src/objects/property-array.h
@@ -0,0 +1,73 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_ARRAY_H_
+#define V8_OBJECTS_PROPERTY_ARRAY_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class PropertyArray : public HeapObject {
+ public:
+ // [length]: length of the array.
+ inline int length() const;
+
+ // Get the length using acquire loads.
+ inline int synchronized_length() const;
+
+ // This is only used on a newly allocated PropertyArray which
+ // doesn't have an existing hash.
+ inline void initialize_length(int length);
+
+ inline void SetHash(int hash);
+ inline int Hash() const;
+
+ inline Object* get(int index) const;
+
+ inline void set(int index, Object* value);
+ // Setter with explicit barrier mode.
+ inline void set(int index, Object* value, WriteBarrierMode mode);
+
+ // Gives access to raw memory which stores the array's data.
+ inline Object** data_start();
+
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) {
+ return kHeaderSize + length * kPointerSize;
+ }
+
+ DECL_CAST(PropertyArray)
+ DECL_PRINTER(PropertyArray)
+ DECL_VERIFIER(PropertyArray)
+
+ // Layout description.
+ static const int kLengthAndHashOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthAndHashOffset + kPointerSize;
+
+ // Garbage collection support.
+ typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
+
+ static const int kLengthFieldSize = 10;
+ class LengthField : public BitField<int, 0, kLengthFieldSize> {};
+ static const int kMaxLength = LengthField::kMax;
+ class HashField : public BitField<int, kLengthFieldSize,
+ kSmiValueSize - kLengthFieldSize - 1> {};
+
+ static const int kNoHashSentinel = 0;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyArray);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROPERTY_ARRAY_H_
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 298674eb10..24e219d46c 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -20,7 +20,7 @@ namespace internal {
CAST_ACCESSOR(PrototypeInfo)
Map* PrototypeInfo::ObjectCreateMap() {
- return Map::cast(object_create_map()->ToWeakHeapObject());
+ return Map::cast(object_create_map()->GetHeapObjectAssumeWeak());
}
// static
@@ -31,7 +31,7 @@ void PrototypeInfo::SetObjectCreateMap(Handle<PrototypeInfo> info,
bool PrototypeInfo::HasObjectCreateMap() {
MaybeObject* cache = object_create_map();
- return cache->IsWeakHeapObject();
+ return cache->IsWeak();
}
ACCESSORS(PrototypeInfo, module_namespace, Object, kJSModuleNamespaceOffset)
@@ -51,7 +51,7 @@ void PrototypeUsers::MarkSlotEmpty(WeakArrayList* array, int index) {
}
Smi* PrototypeUsers::empty_slot_index(WeakArrayList* array) {
- return array->Get(kEmptySlotIndex)->ToSmi();
+ return array->Get(kEmptySlotIndex)->cast<Smi>();
}
void PrototypeUsers::set_empty_slot_index(WeakArrayList* array, int index) {
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 9ec87dcb92..0fa5557e8c 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -369,7 +369,6 @@ Handle<ScopeInfo> ScopeInfo::CreateForEmptyFunction(Isolate* isolate) {
// static
Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
ScopeType type) {
- DCHECK(isolate->bootstrapper()->IsActive());
DCHECK(type == SCRIPT_SCOPE || type == FUNCTION_SCOPE);
const int parameter_count = 0;
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index ac0664f7fb..622c51210b 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -177,7 +177,8 @@ class ScopeInfo : public FixedArray {
MaybeHandle<ScopeInfo> outer_scope);
static Handle<ScopeInfo> CreateForWithScope(
Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
- static Handle<ScopeInfo> CreateForEmptyFunction(Isolate* isolate);
+ V8_EXPORT_PRIVATE static Handle<ScopeInfo> CreateForEmptyFunction(
+ Isolate* isolate);
static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
// Serializes empty scope info.
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 3420b71754..bd789ba2ff 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -17,9 +17,6 @@ namespace internal {
// Script describes a script which has been added to the VM.
class Script : public Struct, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
// Script types.
enum Type {
TYPE_NATIVE = 0,
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 0b4a7effb9..cf057e9ca0 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -282,60 +282,6 @@ void SharedFunctionInfo::DontAdaptArguments() {
set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
}
-int SharedFunctionInfo::StartPosition() const {
- Object* maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
- if (info->HasPositionInfo()) {
- return info->StartPosition();
- }
- } else if (HasUncompiledData()) {
- // Works with or without scope.
- return uncompiled_data()->start_position();
- } else if (IsApiFunction() || HasBuiltinId()) {
- DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
- return 0;
- }
- return kNoSourcePosition;
-}
-
-int SharedFunctionInfo::EndPosition() const {
- Object* maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
- if (info->HasPositionInfo()) {
- return info->EndPosition();
- }
- } else if (HasUncompiledData()) {
- // Works with or without scope.
- return uncompiled_data()->end_position();
- } else if (IsApiFunction() || HasBuiltinId()) {
- DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
- return 0;
- }
- return kNoSourcePosition;
-}
-
-void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
- Object* maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
- if (info->HasPositionInfo()) {
- info->SetPositionInfo(start_position, end_position);
- }
- } else if (HasUncompiledData()) {
- if (HasUncompiledDataWithPreParsedScope()) {
- // Clear out preparsed scope data, since the position setter invalidates
- // any scope data.
- ClearPreParsedScopeData();
- }
- uncompiled_data()->set_start_position(start_position);
- uncompiled_data()->set_end_position(end_position);
- } else {
- UNREACHABLE();
- }
-}
-
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
ScopeInfo* SharedFunctionInfo::scope_info() const {
@@ -613,21 +559,6 @@ bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
return function_data()->IsWasmExportedFunctionData();
}
-int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
- // Fast path for the common case when the SFI is uncompiled and so the
- // function literal id is already in the uncompiled data.
- if (HasUncompiledData()) {
- int id = uncompiled_data()->function_literal_id();
- // Make sure the id is what we should have found with the slow path.
- DCHECK_EQ(id, FindIndexInScript(isolate));
- return id;
- }
-
- // Otherwise, search for the function in the SFI's script's function list,
- // and return its index in that list.e
- return FindIndexInScript(isolate);
-}
-
Object* SharedFunctionInfo::script() const {
Object* maybe_script = script_or_debug_info();
if (maybe_script->IsDebugInfo()) {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index d5f65a91d1..f43fa61b2f 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -7,6 +7,7 @@
#include "src/bailout-reason.h"
#include "src/objects.h"
+#include "src/objects/builtin-function-id.h"
#include "src/objects/script.h"
// Has to be the last include (doesn't have include guards):
@@ -53,8 +54,6 @@ class PreParsedScopeData : public HeapObject {
POINTER_SIZE_ALIGN(kUnalignedChildDataStartOffset);
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
static constexpr int SizeFor(int length) {
return kChildDataStartOffset + length * kPointerSize;
@@ -114,8 +113,6 @@ class UncompiledDataWithoutPreParsedScope : public UncompiledData {
// No extra fields compared to UncompiledData.
typedef UncompiledData::BodyDescriptor BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledDataWithoutPreParsedScope);
@@ -150,8 +147,6 @@ class UncompiledDataWithPreParsedScope : public UncompiledData {
FixedBodyDescriptor<kStartOfPointerFieldsOffset,
kEndOfPointerFieldsOffset, kSize>>
BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledDataWithPreParsedScope);
@@ -179,9 +174,6 @@ class InterpreterData : public Struct {
// shared by multiple instances of the function.
class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
static constexpr Object* const kNoSharedNameSentinel = Smi::kZero;
// [name]: Returns shared name if it exists or an empty string otherwise.
@@ -230,14 +222,14 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
DECL_ACCESSORS(scope_info, ScopeInfo)
// End position of this function in the script source.
- inline int EndPosition() const;
+ V8_EXPORT_PRIVATE int EndPosition() const;
// Start position of this function in the script source.
- inline int StartPosition() const;
+ V8_EXPORT_PRIVATE int StartPosition() const;
// Set the start and end position of this function in the script source.
// Updates the scope info if available.
- inline void SetPosition(int start_position, int end_position);
+ V8_EXPORT_PRIVATE void SetPosition(int start_position, int end_position);
// [outer scope info | feedback metadata] Shared storage for outer scope info
// (on uncompiled functions) and feedback metadata (on compiled functions).
@@ -358,7 +350,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
inline String* inferred_name();
// Get the function literal id associated with this function, for parsing.
- inline int FunctionLiteralId(Isolate* isolate) const;
+ V8_EXPORT_PRIVATE int FunctionLiteralId(Isolate* isolate) const;
// Break infos are contained in DebugInfo, this is a convenience method
// to simplify access.
@@ -625,8 +617,6 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
kEndOfPointerFieldsOffset, kAlignedSize>
BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
// Bit positions in |flags|.
#define FLAGS_BIT_FIELDS(V, _) \
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
new file mode 100644
index 0000000000..8398c7cb5b
--- /dev/null
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STACK_FRAME_INFO_INL_H_
+#define V8_OBJECTS_STACK_FRAME_INFO_INL_H_
+
+#include "src/objects/stack-frame-info.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(StackFrameInfo)
+
+SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberIndex)
+SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberIndex)
+SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdIndex)
+ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameIndex)
+ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
+ kScriptNameOrSourceUrlIndex)
+ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameIndex)
+SMI_ACCESSORS(StackFrameInfo, flag, kFlagIndex)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
+SMI_ACCESSORS(StackFrameInfo, id, kIdIndex)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_STACK_FRAME_INFO_INL_H_
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
new file mode 100644
index 0000000000..4adc37109e
--- /dev/null
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -0,0 +1,62 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STACK_FRAME_INFO_H_
+#define V8_OBJECTS_STACK_FRAME_INFO_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class StackFrameInfo : public Struct, public NeverReadOnlySpaceObject {
+ public:
+ DECL_INT_ACCESSORS(line_number)
+ DECL_INT_ACCESSORS(column_number)
+ DECL_INT_ACCESSORS(script_id)
+ DECL_ACCESSORS(script_name, Object)
+ DECL_ACCESSORS(script_name_or_source_url, Object)
+ DECL_ACCESSORS(function_name, Object)
+ DECL_BOOLEAN_ACCESSORS(is_eval)
+ DECL_BOOLEAN_ACCESSORS(is_constructor)
+ DECL_BOOLEAN_ACCESSORS(is_wasm)
+ DECL_INT_ACCESSORS(flag)
+ DECL_INT_ACCESSORS(id)
+
+ DECL_CAST(StackFrameInfo)
+
+ // Dispatched behavior.
+ DECL_PRINTER(StackFrameInfo)
+ DECL_VERIFIER(StackFrameInfo)
+
+ static const int kLineNumberIndex = Struct::kHeaderSize;
+ static const int kColumnNumberIndex = kLineNumberIndex + kPointerSize;
+ static const int kScriptIdIndex = kColumnNumberIndex + kPointerSize;
+ static const int kScriptNameIndex = kScriptIdIndex + kPointerSize;
+ static const int kScriptNameOrSourceUrlIndex =
+ kScriptNameIndex + kPointerSize;
+ static const int kFunctionNameIndex =
+ kScriptNameOrSourceUrlIndex + kPointerSize;
+ static const int kFlagIndex = kFunctionNameIndex + kPointerSize;
+ static const int kIdIndex = kFlagIndex + kPointerSize;
+ static const int kSize = kIdIndex + kPointerSize;
+
+ private:
+ // Bit position in the flag, from least significant bit position.
+ static const int kIsEvalBit = 0;
+ static const int kIsConstructorBit = 1;
+ static const int kIsWasmBit = 2;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrameInfo);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_STACK_FRAME_INFO_H_
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 39f642063e..349fa31f9d 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -19,8 +19,17 @@
namespace v8 {
namespace internal {
-SMI_ACCESSORS(String, length, kLengthOffset)
-SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
+INT32_ACCESSORS(String, length, kLengthOffset)
+
+int String::synchronized_length() const {
+ return base::AsAtomic32::Acquire_Load(
+ reinterpret_cast<const int32_t*>(FIELD_ADDR(this, kLengthOffset)));
+}
+
+void String::synchronized_set_length(int value) {
+ base::AsAtomic32::Release_Store(
+ reinterpret_cast<int32_t*>(FIELD_ADDR(this, kLengthOffset)), value);
+}
CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalOneByteString)
@@ -536,9 +545,9 @@ HeapObject* ThinString::unchecked_actual() const {
return reinterpret_cast<HeapObject*>(READ_FIELD(this, kActualOffset));
}
-bool ExternalString::is_short() const {
+bool ExternalString::is_uncached() const {
InstanceType type = map()->instance_type();
- return (type & kShortExternalStringMask) == kShortExternalStringTag;
+ return (type & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
Address ExternalString::resource_as_address() {
@@ -562,7 +571,7 @@ uint32_t ExternalString::resource_as_uint32() {
void ExternalString::set_uint32_as_resource(uint32_t value) {
*reinterpret_cast<uintptr_t*>(FIELD_ADDR(this, kResourceOffset)) = value;
- if (is_short()) return;
+ if (is_uncached()) return;
const char** data_field =
reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
*data_field = nullptr;
@@ -573,7 +582,7 @@ const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
}
void ExternalOneByteString::update_data_cache() {
- if (is_short()) return;
+ if (is_uncached()) return;
const char** data_field =
reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
*data_field = resource()->data();
@@ -609,7 +618,7 @@ const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
}
void ExternalTwoByteString::update_data_cache() {
- if (is_short()) return;
+ if (is_uncached()) return;
const uint16_t** data_field =
reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
*data_field = resource()->data();
@@ -733,8 +742,7 @@ class String::SubStringRange::iterator final {
typedef uc16* pointer;
typedef uc16& reference;
- iterator(const iterator& other)
- : content_(other.content_), offset_(other.offset_) {}
+ iterator(const iterator& other) = default;
uc16 operator*() { return content_.Get(offset_); }
bool operator==(const iterator& other) const {
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 8003bf1aac..b26e86a381 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -42,7 +42,7 @@ class StringTableShape : public BaseShape<StringTableKey*> {
static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
- static inline int GetMapRootIndex();
+ static inline RootIndex GetMapRootIndex();
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 4058c7cec3..206bed641c 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -31,7 +31,7 @@ enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
// shortcutting. Keeping these restrictions in mind has proven to be error-
// prone and so we no longer put StringShapes in variables unless there is a
// concrete performance benefit at that particular point in the code.
-class StringShape BASE_EMBEDDED {
+class StringShape {
public:
inline explicit StringShape(const String* s);
inline explicit StringShape(Map* s);
@@ -264,7 +264,7 @@ class String : public Name {
virtual MaybeHandle<String> GetNamedCapture(Handle<String> name,
CaptureState* state) = 0;
- virtual ~Match() {}
+ virtual ~Match() = default;
};
// ES#sec-getsubstitution
@@ -300,11 +300,11 @@ class String : public Name {
// do any heap allocations. This is useful when printing stack traces.
std::unique_ptr<char[]> ToCString(AllowNullsFlag allow_nulls,
RobustnessFlag robustness_flag, int offset,
- int length, int* length_output = 0);
+ int length, int* length_output = nullptr);
std::unique_ptr<char[]> ToCString(
AllowNullsFlag allow_nulls = DISALLOW_NULLS,
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
- int* length_output = 0);
+ int* length_output = nullptr);
bool ComputeArrayIndex(uint32_t* index);
@@ -341,8 +341,8 @@ class String : public Name {
inline bool IsFlat();
// Layout description.
- static const int kLengthOffset = Name::kSize;
- static const int kSize = kLengthOffset + kPointerSize;
+ static const int kLengthOffset = Name::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kInt32Size;
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
@@ -360,7 +360,7 @@ class String : public Name {
// See include/v8.h for the definition.
static const int kMaxLength = v8::String::kMaxLength;
- static_assert(kMaxLength <= (Smi::kMaxValue / 2 - kSize),
+ static_assert(kMaxLength <= (Smi::kMaxValue / 2 - kHeaderSize),
"Unexpected max String length");
// Max length for computing hash. For strings longer than this limit the
@@ -370,9 +370,6 @@ class String : public Name {
// Limit for truncation in short printing.
static const int kMaxShortPrintLength = 1024;
- // Support for regular expressions.
- const uc16* GetTwoByteData(unsigned start);
-
// Helper function for flattening strings.
template <typename sinkchar>
static void WriteToFlat(String* source, sinkchar* sink, int from, int to);
@@ -474,9 +471,6 @@ class SeqString : public String {
public:
DECL_CAST(SeqString)
- // Layout description.
- static const int kHeaderSize = String::kSize;
-
// Truncate the string in-place if possible and return the result.
// In case of new_length == 0, the empty string is returned without
// truncating the original string.
@@ -533,8 +527,6 @@ class SeqOneByteString : public SeqString {
STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
@@ -559,9 +551,6 @@ class SeqTwoByteString : public SeqString {
// is deterministic.
void clear_padding();
- // For regexp code.
- const uint16_t* SeqTwoByteStringGetData(unsigned start);
-
DECL_CAST(SeqTwoByteString)
// Garbage collection support. This method is called by the
@@ -581,8 +570,6 @@ class SeqTwoByteString : public SeqString {
String::kMaxLength);
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
@@ -620,7 +607,7 @@ class ConsString : public String {
DECL_CAST(ConsString)
// Layout description.
- static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
+ static const int kFirstOffset = String::kHeaderSize;
static const int kSecondOffset = kFirstOffset + kPointerSize;
static const int kSize = kSecondOffset + kPointerSize;
@@ -629,8 +616,6 @@ class ConsString : public String {
typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
DECL_VERIFIER(ConsString)
@@ -659,12 +644,10 @@ class ThinString : public String {
DECL_VERIFIER(ThinString)
// Layout description.
- static const int kActualOffset = String::kSize;
+ static const int kActualOffset = String::kHeaderSize;
static const int kSize = kActualOffset + kPointerSize;
typedef FixedBodyDescriptor<kActualOffset, kSize, kSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_COPY_AND_ASSIGN(ThinString);
@@ -696,7 +679,7 @@ class SlicedString : public String {
DECL_CAST(SlicedString)
// Layout description.
- static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize);
+ static const int kParentOffset = String::kHeaderSize;
static const int kOffsetOffset = kParentOffset + kPointerSize;
static const int kSize = kOffsetOffset + kPointerSize;
@@ -706,8 +689,6 @@ class SlicedString : public String {
typedef FixedBodyDescriptor<kParentOffset, kOffsetOffset + kPointerSize,
kSize>
BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
DECL_VERIFIER(SlicedString)
@@ -729,13 +710,13 @@ class ExternalString : public String {
DECL_CAST(ExternalString)
// Layout description.
- static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kShortSize = kResourceOffset + kPointerSize;
+ static const int kResourceOffset = String::kHeaderSize;
+ static const int kUncachedSize = kResourceOffset + kPointerSize;
static const int kResourceDataOffset = kResourceOffset + kPointerSize;
static const int kSize = kResourceDataOffset + kPointerSize;
- // Return whether external string is short (data pointer is not cached).
- inline bool is_short() const;
+ // Return whether the external string data pointer is not cached.
+ inline bool is_uncached() const;
// Size in bytes of the external payload.
int ExternalPayloadSize() const;
@@ -782,8 +763,6 @@ class ExternalOneByteString : public ExternalString {
DECL_CAST(ExternalOneByteString)
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
@@ -823,8 +802,6 @@ class ExternalTwoByteString : public ExternalString {
DECL_CAST(ExternalTwoByteString)
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
@@ -837,7 +814,7 @@ class FlatStringReader : public Relocatable {
public:
FlatStringReader(Isolate* isolate, Handle<String> str);
FlatStringReader(Isolate* isolate, Vector<const char> input);
- void PostGarbageCollection();
+ void PostGarbageCollection() override;
inline uc32 Get(int index);
template <typename Char>
inline Char Get(int index);
@@ -855,7 +832,7 @@ class FlatStringReader : public Relocatable {
// traversal of the entire string
class ConsStringIterator {
public:
- inline ConsStringIterator() {}
+ inline ConsStringIterator() = default;
inline explicit ConsStringIterator(ConsString* cons_string, int offset = 0) {
Reset(cons_string, offset);
}
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 6a229d847b..24cbd18bd2 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -15,9 +15,6 @@ namespace internal {
class TemplateInfo : public Struct, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(serial_number, Object)
DECL_INT_ACCESSORS(number_of_properties)
diff --git a/deps/v8/src/optimized-compilation-info.cc b/deps/v8/src/optimized-compilation-info.cc
index f14c3a6661..d6293c2228 100644
--- a/deps/v8/src/optimized-compilation-info.cc
+++ b/deps/v8/src/optimized-compilation-info.cc
@@ -17,22 +17,11 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure)
- : OptimizedCompilationInfo({}, AbstractCode::OPTIMIZED_FUNCTION, zone) {
+ : OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
- SetFlag(kCalledWithCodeStartRegister);
- if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
- if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
- SetFlag(kSwitchJumpTableEnabled);
- if (FLAG_untrusted_code_mitigations) MarkAsPoisoningRegisterArguments();
-
- // TODO(yangguo): Disable this in case of debugging for crbug.com/826613
- if (FLAG_analyze_environment_liveness) {
- MarkAsAnalyzeEnvironmentLiveness();
- }
-
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
// more memory consumption.
@@ -45,39 +34,54 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
OptimizedCompilationInfo::OptimizedCompilationInfo(
Vector<const char> debug_name, Zone* zone, Code::Kind code_kind)
- : OptimizedCompilationInfo(
- debug_name, static_cast<AbstractCode::Kind>(code_kind), zone) {
- if (code_kind == Code::BYTECODE_HANDLER) {
- SetFlag(OptimizedCompilationInfo::kCalledWithCodeStartRegister);
- }
-#if ENABLE_GDB_JIT_INTERFACE
-#if DEBUG
- if (code_kind == Code::BUILTIN || code_kind == Code::STUB) {
- MarkAsSourcePositionsEnabled();
- }
-#endif
-#endif
+ : OptimizedCompilationInfo(code_kind, zone) {
+ debug_name_ = debug_name;
+
SetTracingFlags(
PassesFilter(debug_name, CStrVector(FLAG_trace_turbo_filter)));
- // Embedded builtins don't support embedded absolute code addresses, so we
- // cannot use jump tables.
- if (code_kind != Code::BUILTIN) {
- SetFlag(kSwitchJumpTableEnabled);
- }
}
-OptimizedCompilationInfo::OptimizedCompilationInfo(
- Vector<const char> debug_name, AbstractCode::Kind code_kind, Zone* zone)
- : flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
- code_kind_(code_kind),
- stub_key_(0),
- builtin_index_(Builtins::kNoBuiltinId),
- osr_offset_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(nullptr),
- bailout_reason_(BailoutReason::kNoReason),
- optimization_id_(-1),
- debug_name_(debug_name) {}
+OptimizedCompilationInfo::OptimizedCompilationInfo(Code::Kind code_kind,
+ Zone* zone)
+ : code_kind_(code_kind), zone_(zone) {
+ ConfigureFlags();
+}
+
+void OptimizedCompilationInfo::ConfigureFlags() {
+ if (FLAG_untrusted_code_mitigations) SetFlag(kUntrustedCodeMitigations);
+
+ switch (code_kind_) {
+ case Code::OPTIMIZED_FUNCTION:
+ SetFlag(kCalledWithCodeStartRegister);
+ SetFlag(kSwitchJumpTableEnabled);
+ if (FLAG_function_context_specialization) {
+ MarkAsFunctionContextSpecializing();
+ }
+ if (FLAG_turbo_splitting) {
+ MarkAsSplittingEnabled();
+ }
+ if (FLAG_untrusted_code_mitigations) {
+ MarkAsPoisoningRegisterArguments();
+ }
+ if (FLAG_analyze_environment_liveness) {
+ // TODO(yangguo): Disable this in case of debugging for crbug.com/826613
+ MarkAsAnalyzeEnvironmentLiveness();
+ }
+ break;
+ case Code::BYTECODE_HANDLER:
+ SetFlag(kCalledWithCodeStartRegister);
+ break;
+ case Code::BUILTIN:
+ case Code::STUB:
+#if ENABLE_GDB_JIT_INTERFACE && DEBUG
+ MarkAsSourcePositionsEnabled();
+#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
+ break;
+ default:
+ SetFlag(kSwitchJumpTableEnabled);
+ break;
+ }
+}
OptimizedCompilationInfo::~OptimizedCompilationInfo() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
diff --git a/deps/v8/src/optimized-compilation-info.h b/deps/v8/src/optimized-compilation-info.h
index ecb883f49d..37232a2f06 100644
--- a/deps/v8/src/optimized-compilation-info.h
+++ b/deps/v8/src/optimized-compilation-info.h
@@ -27,6 +27,7 @@ class DeferredHandles;
class FunctionLiteral;
class Isolate;
class JavaScriptFrame;
+class JSGlobalObject;
class ParseInfo;
class SourceRangeMap;
class Zone;
@@ -79,11 +80,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
wasm::WasmCode* wasm_code() const {
return const_cast<wasm::WasmCode*>(code_.as_wasm_code());
}
- AbstractCode::Kind abstract_code_kind() const { return code_kind_; }
- Code::Kind code_kind() const {
- DCHECK(code_kind_ < static_cast<AbstractCode::Kind>(Code::NUMBER_OF_KINDS));
- return static_cast<Code::Kind>(code_kind_);
- }
+ Code::Kind code_kind() const { return code_kind_; }
uint32_t stub_key() const { return stub_key_; }
void set_stub_key(uint32_t stub_key) { stub_key_ = stub_key; }
int32_t builtin_index() const { return builtin_index_; }
@@ -200,15 +197,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
JSGlobalObject* global_object() const;
// Accessors for the different compilation modes.
- bool IsOptimizing() const {
- return abstract_code_kind() == AbstractCode::OPTIMIZED_FUNCTION;
- }
- bool IsWasm() const {
- return abstract_code_kind() == AbstractCode::WASM_FUNCTION;
- }
+ bool IsOptimizing() const { return code_kind() == Code::OPTIMIZED_FUNCTION; }
+ bool IsWasm() const { return code_kind() == Code::WASM_FUNCTION; }
bool IsStub() const {
- return abstract_code_kind() != AbstractCode::OPTIMIZED_FUNCTION &&
- abstract_code_kind() != AbstractCode::WASM_FUNCTION;
+ return code_kind() != Code::OPTIMIZED_FUNCTION &&
+ code_kind() != Code::WASM_FUNCTION;
}
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
@@ -281,8 +274,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
private:
- OptimizedCompilationInfo(Vector<const char> debug_name,
- AbstractCode::Kind code_kind, Zone* zone);
+ OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone);
+ void ConfigureFlags();
void SetFlag(Flag flag) { flags_ |= flag; }
bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
@@ -290,13 +283,13 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
void SetTracingFlags(bool passes_filter);
// Compilation flags.
- unsigned flags_;
+ unsigned flags_ = 0;
PoisoningMitigationLevel poisoning_level_ =
PoisoningMitigationLevel::kDontPoison;
- AbstractCode::Kind code_kind_;
- uint32_t stub_key_;
- int32_t builtin_index_;
+ Code::Kind code_kind_;
+ uint32_t stub_key_ = 0;
+ int32_t builtin_index_ = -1;
Handle<SharedFunctionInfo> shared_info_;
@@ -306,7 +299,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
CodeReference code_;
// Entry point when compiling for OSR, {BailoutId::None} otherwise.
- BailoutId osr_offset_;
+ BailoutId osr_offset_ = BailoutId::None();
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
@@ -314,11 +307,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
std::shared_ptr<DeferredHandles> deferred_handles_;
- BailoutReason bailout_reason_;
+ BailoutReason bailout_reason_ = BailoutReason::kNoReason;
InlinedFunctionList inlined_functions_;
- int optimization_id_;
+ int optimization_id_ = -1;
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index 04486354cb..2dcd7892d8 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -22,10 +22,6 @@ namespace internal {
OFStreamBase::OFStreamBase(FILE* f) : f_(f) {}
-
-OFStreamBase::~OFStreamBase() {}
-
-
int OFStreamBase::sync() {
std::fflush(f_);
return 0;
@@ -47,9 +43,6 @@ OFStream::OFStream(FILE* f) : std::ostream(nullptr), buf_(f) {
rdbuf(&buf_);
}
-
-OFStream::~OFStream() {}
-
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
AndroidLogStream::~AndroidLogStream() {
// If there is anything left in the line buffer, print it now, even though it
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index c6b64a1cd9..189f5384b9 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -18,10 +18,10 @@
namespace v8 {
namespace internal {
-class OFStreamBase : public std::streambuf {
+class V8_EXPORT_PRIVATE OFStreamBase : public std::streambuf {
public:
explicit OFStreamBase(FILE* f);
- virtual ~OFStreamBase();
+ ~OFStreamBase() override = default;
protected:
FILE* const f_;
@@ -35,7 +35,7 @@ class OFStreamBase : public std::streambuf {
class V8_EXPORT_PRIVATE OFStream : public std::ostream {
public:
explicit OFStream(FILE* f);
- virtual ~OFStream();
+ ~OFStream() override = default;
private:
OFStreamBase buf_;
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 24218df199..177f214415 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -2,6 +2,7 @@ set noparent
adamk@chromium.org
gsathya@chromium.org
+leszeks@chromium.org
littledan@chromium.org
marja@chromium.org
neis@chromium.org
diff --git a/deps/v8/src/parsing/duplicate-finder.h b/deps/v8/src/parsing/duplicate-finder.h
index a4981c1872..65bcc4e00d 100644
--- a/deps/v8/src/parsing/duplicate-finder.h
+++ b/deps/v8/src/parsing/duplicate-finder.h
@@ -22,7 +22,7 @@ class Scanner;
// Scanner::IsDuplicateSymbol.
class DuplicateFinder {
public:
- DuplicateFinder() {}
+ DuplicateFinder() = default;
private:
friend class Scanner;
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 7833dbc8d3..2eed75b939 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -5,9 +5,10 @@
#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H_
#define V8_PARSING_EXPRESSION_CLASSIFIER_H_
+#include <type_traits>
+
#include "src/messages.h"
#include "src/parsing/scanner.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -47,14 +48,38 @@ class DuplicateFinder;
// by calling the method Discard. Both actions result in removing the
// classifier from the parser's stack.
+// Expression classifier is split into four parts. The base implementing the
+// general expression classifier logic. Two parts that implement the error
+// tracking interface, where one is the actual implementation and the other is
+// an empty class providing only the interface without logic. The expression
+// classifier class then combines the other parts and provides the full
+// expression classifier interface by inheriting conditionally, controlled by
+// Types::ExpressionClassifierReportErrors, either from the ErrorTracker or the
+// EmptyErrorTracker.
+//
+// Base
+// / \
+// / \
+// / \
+// / \
+// ErrorTracker EmptyErrorTracker
+// \ /
+// \ /
+// \ /
+// \ /
+// ExpressionClassifier
+
template <typename Types>
-class ExpressionClassifier {
+class ExpressionClassifier;
+
+template <typename Types, typename ErrorTracker>
+class ExpressionClassifierBase {
public:
enum ErrorKind : unsigned {
#define DEFINE_ERROR_KIND(NAME, CODE) k##NAME = CODE,
ERROR_CODES(DEFINE_ERROR_KIND)
#undef DEFINE_ERROR_KIND
- kUnusedError = 15 // Larger than error codes; should fit in 4 bits
+ kUnusedError = 15 // Larger than error codes; should fit in 4 bits
};
struct Error {
@@ -86,23 +111,14 @@ class ExpressionClassifier {
};
// clang-format on
- explicit ExpressionClassifier(typename Types::Base* base,
- DuplicateFinder* duplicate_finder = nullptr)
+ explicit ExpressionClassifierBase(typename Types::Base* base,
+ DuplicateFinder* duplicate_finder = nullptr)
: base_(base),
- previous_(base->classifier_),
- zone_(base->impl()->zone()),
- reported_errors_(base->impl()->GetReportedErrorList()),
duplicate_finder_(duplicate_finder),
invalid_productions_(0),
- is_non_simple_parameter_list_(0) {
- base->classifier_ = this;
- reported_errors_begin_ = reported_errors_end_ = reported_errors_->size();
- }
+ is_non_simple_parameter_list_(0) {}
- V8_INLINE ~ExpressionClassifier() {
- Discard();
- if (base_->classifier_ == this) base_->classifier_ = previous_;
- }
+ virtual ~ExpressionClassifierBase() = default;
V8_INLINE bool is_valid(unsigned productions) const {
return (invalid_productions_ & productions) == 0;
@@ -150,80 +166,338 @@ class ExpressionClassifier {
return is_valid(AsyncArrowFormalParametersProduction);
}
+ V8_INLINE bool is_simple_parameter_list() const {
+ return !is_non_simple_parameter_list_;
+ }
+
+ V8_INLINE void RecordNonSimpleParameter() {
+ is_non_simple_parameter_list_ = 1;
+ }
+
+ V8_INLINE void Accumulate(ExpressionClassifier<Types>* const inner,
+ unsigned productions) {
+#ifdef DEBUG
+ static_cast<ErrorTracker*>(this)->CheckErrorPositions(inner);
+#endif
+ // Propagate errors from inner, but don't overwrite already recorded
+ // errors.
+ unsigned non_arrow_inner_invalid_productions =
+ inner->invalid_productions_ & ~ArrowFormalParametersProduction;
+ if (non_arrow_inner_invalid_productions) {
+ unsigned errors = non_arrow_inner_invalid_productions & productions &
+ ~this->invalid_productions_;
+ // The result will continue to be a valid arrow formal parameters if the
+ // inner expression is a valid binding pattern.
+ bool copy_BP_to_AFP = false;
+ if (productions & ArrowFormalParametersProduction &&
+ this->is_valid_arrow_formal_parameters()) {
+ // Also whether we've seen any non-simple parameters
+ // if expecting an arrow function parameter.
+ this->is_non_simple_parameter_list_ |=
+ inner->is_non_simple_parameter_list_;
+ if (!inner->is_valid_binding_pattern()) {
+ copy_BP_to_AFP = true;
+ this->invalid_productions_ |= ArrowFormalParametersProduction;
+ }
+ }
+ if (errors != 0 || copy_BP_to_AFP) {
+ this->invalid_productions_ |= errors;
+ static_cast<ErrorTracker*>(this)->AccumulateErrorImpl(
+ inner, productions, errors, copy_BP_to_AFP);
+ }
+ }
+ static_cast<ErrorTracker*>(this)->RewindErrors(inner);
+ }
+
+ protected:
+ typename Types::Base* base_;
+ DuplicateFinder* duplicate_finder_;
+ unsigned invalid_productions_ : kUnusedError;
+ STATIC_ASSERT(kUnusedError <= 15);
+ unsigned is_non_simple_parameter_list_ : 1;
+};
+
+template <typename Types>
+class ExpressionClassifierErrorTracker
+ : public ExpressionClassifierBase<Types,
+ ExpressionClassifierErrorTracker<Types>> {
+ public:
+ using BaseClassType =
+ ExpressionClassifierBase<Types, ExpressionClassifierErrorTracker<Types>>;
+ using typename BaseClassType::Error;
+ using typename BaseClassType::ErrorKind;
+ using TP = typename BaseClassType::TargetProduction;
+
+ ExpressionClassifierErrorTracker(typename Types::Base* base,
+ DuplicateFinder* duplicate_finder)
+ : BaseClassType(base, duplicate_finder),
+ reported_errors_(base->impl()->GetReportedErrorList()) {
+ reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
+ }
+
+ ~ExpressionClassifierErrorTracker() override { Discard(); }
+
+ V8_INLINE void Discard() {
+ if (reported_errors_end_ == reported_errors_->length()) {
+ reported_errors_->Rewind(reported_errors_begin_);
+ reported_errors_end_ = reported_errors_begin_;
+ }
+ DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
+ }
+
+ protected:
+ V8_INLINE const Error& reported_error(ErrorKind kind) const {
+ if (this->invalid_productions_ & (1 << kind)) {
+ for (int i = reported_errors_begin_; i < reported_errors_end_; i++) {
+ if (reported_errors_->at(i).kind == kind)
+ return reported_errors_->at(i);
+ }
+ UNREACHABLE();
+ }
+ // We should only be looking for an error when we know that one has
+ // been reported. But we're not... So this is to make sure we have
+ // the same behaviour.
+ UNREACHABLE();
+
+ // Make MSVC happy by returning an error from this inaccessible path.
+ static Error none;
+ return none;
+ }
+
+ // Adds e to the end of the list of reported errors for this classifier.
+ // It is expected that this classifier is the last one in the stack.
+ V8_INLINE void Add(const Error& e) {
+ DCHECK_EQ(reported_errors_end_, reported_errors_->length());
+ reported_errors_->Add(e, this->base_->impl()->zone());
+ reported_errors_end_++;
+ }
+
+ // Copies the error at position i of the list of reported errors, so that
+ // it becomes the last error reported for this classifier. Position i
+ // could be either after the existing errors of this classifier (i.e.,
+ // in an inner classifier) or it could be an existing error (in case a
+ // copy is needed).
+ V8_INLINE void Copy(int i) {
+ DCHECK_LT(i, reported_errors_->length());
+ if (reported_errors_end_ != i)
+ reported_errors_->at(reported_errors_end_) = reported_errors_->at(i);
+ reported_errors_end_++;
+ }
+
+ private:
+#ifdef DEBUG
+ V8_INLINE void CheckErrorPositions(ExpressionClassifier<Types>* const inner) {
+ DCHECK_EQ(inner->reported_errors_, this->reported_errors_);
+ DCHECK_EQ(inner->reported_errors_begin_, this->reported_errors_end_);
+ DCHECK_EQ(inner->reported_errors_end_, this->reported_errors_->length());
+ }
+#endif
+
+ V8_INLINE void RewindErrors(ExpressionClassifier<Types>* const inner) {
+ this->reported_errors_->Rewind(this->reported_errors_end_);
+ inner->reported_errors_begin_ = inner->reported_errors_end_ =
+ this->reported_errors_end_;
+ }
+
+ void AccumulateErrorImpl(ExpressionClassifier<Types>* const inner,
+ unsigned productions, unsigned errors,
+ bool copy_BP_to_AFP) {
+ // Traverse the list of errors reported by the inner classifier
+ // to copy what's necessary.
+ int binding_pattern_index = inner->reported_errors_end_;
+ for (int i = inner->reported_errors_begin_; i < inner->reported_errors_end_;
+ i++) {
+ int k = this->reported_errors_->at(i).kind;
+ if (errors & (1 << k)) this->Copy(i);
+ // Check if it's a BP error that has to be copied to an AFP error.
+ if (k == ErrorKind::kBindingPatternProduction && copy_BP_to_AFP) {
+ if (this->reported_errors_end_ <= i) {
+ // If the BP error itself has not already been copied,
+ // copy it now and change it to an AFP error.
+ this->Copy(i);
+ this->reported_errors_->at(this->reported_errors_end_ - 1).kind =
+ ErrorKind::kArrowFormalParametersProduction;
+ } else {
+ // Otherwise, if the BP error was already copied, keep its
+ // position and wait until the end of the traversal.
+ DCHECK_EQ(this->reported_errors_end_, i + 1);
+ binding_pattern_index = i;
+ }
+ }
+ }
+ // Do we still have to copy the BP error to an AFP error?
+ if (binding_pattern_index < inner->reported_errors_end_) {
+ // If there's still unused space in the list of the inner
+ // classifier, copy it there, otherwise add it to the end
+ // of the list.
+ if (this->reported_errors_end_ < inner->reported_errors_end_)
+ this->Copy(binding_pattern_index);
+ else
+ Add(this->reported_errors_->at(binding_pattern_index));
+ this->reported_errors_->at(this->reported_errors_end_ - 1).kind =
+ ErrorKind::kArrowFormalParametersProduction;
+ }
+ }
+
+ private:
+ ZoneList<Error>* reported_errors_;
+ // The uint16_t for reported_errors_begin_ and reported_errors_end_ will
+ // not be enough in the case of a long series of expressions using nested
+ // classifiers, e.g., a long sequence of assignments, as in:
+ // literals with spreads, as in:
+ // var N=65536; eval("var x;" + "x=".repeat(N) + "42");
+ // This should not be a problem, as such things currently fail with a
+ // stack overflow while parsing.
+ uint16_t reported_errors_begin_;
+ uint16_t reported_errors_end_;
+
+ friend BaseClassType;
+};
+
+template <typename Types>
+class ExpressionClassifierEmptyErrorTracker
+ : public ExpressionClassifierBase<
+ Types, ExpressionClassifierEmptyErrorTracker<Types>> {
+ public:
+ using BaseClassType =
+ ExpressionClassifierBase<Types,
+ ExpressionClassifierEmptyErrorTracker<Types>>;
+ using typename BaseClassType::Error;
+ using typename BaseClassType::ErrorKind;
+ using TP = typename BaseClassType::TargetProduction;
+
+ ExpressionClassifierEmptyErrorTracker(typename Types::Base* base,
+ DuplicateFinder* duplicate_finder)
+ : BaseClassType(base, duplicate_finder) {}
+
+ V8_INLINE void Discard() {}
+
+ protected:
+ V8_INLINE const Error& reported_error(ErrorKind kind) const {
+ static Error none;
+ return none;
+ }
+
+ V8_INLINE void Add(const Error& e) {}
+
+ private:
+#ifdef DEBUG
+ V8_INLINE void CheckErrorPositions(ExpressionClassifier<Types>* const inner) {
+ }
+#endif
+ V8_INLINE void RewindErrors(ExpressionClassifier<Types>* const inner) {}
+ V8_INLINE void AccumulateErrorImpl(ExpressionClassifier<Types>* const inner,
+ unsigned productions, unsigned errors,
+ bool copy_BP_to_AFP) {}
+
+ friend BaseClassType;
+};
+
+template <typename Types>
+class ExpressionClassifier
+ : public std::conditional<
+ Types::ExpressionClassifierReportErrors,
+ ExpressionClassifierErrorTracker<Types>,
+ ExpressionClassifierEmptyErrorTracker<Types>>::type {
+ static constexpr bool ReportErrors = Types::ExpressionClassifierReportErrors;
+
+ public:
+ using BaseClassType = typename std::conditional<
+ Types::ExpressionClassifierReportErrors,
+ typename ExpressionClassifierErrorTracker<Types>::BaseClassType,
+ typename ExpressionClassifierEmptyErrorTracker<Types>::BaseClassType>::
+ type;
+ using typename BaseClassType::Error;
+ using typename BaseClassType::ErrorKind;
+ using TP = typename BaseClassType::TargetProduction;
+
+ explicit ExpressionClassifier(typename Types::Base* base,
+ DuplicateFinder* duplicate_finder = nullptr)
+ : std::conditional<Types::ExpressionClassifierReportErrors,
+ ExpressionClassifierErrorTracker<Types>,
+ ExpressionClassifierEmptyErrorTracker<Types>>::
+ type(base, duplicate_finder),
+ previous_(base->classifier_) {
+ base->classifier_ = this;
+ }
+
+ V8_INLINE ~ExpressionClassifier() override {
+ if (this->base_->classifier_ == this) this->base_->classifier_ = previous_;
+ }
+
V8_INLINE const Error& expression_error() const {
- return reported_error(kExpressionProduction);
+ return this->reported_error(ErrorKind::kExpressionProduction);
}
V8_INLINE const Error& formal_parameter_initializer_error() const {
- return reported_error(kFormalParameterInitializerProduction);
+ return this->reported_error(
+ ErrorKind::kFormalParameterInitializerProduction);
}
V8_INLINE const Error& binding_pattern_error() const {
- return reported_error(kBindingPatternProduction);
+ return this->reported_error(ErrorKind::kBindingPatternProduction);
}
V8_INLINE const Error& assignment_pattern_error() const {
- return reported_error(kAssignmentPatternProduction);
+ return this->reported_error(ErrorKind::kAssignmentPatternProduction);
}
V8_INLINE const Error& arrow_formal_parameters_error() const {
- return reported_error(kArrowFormalParametersProduction);
+ return this->reported_error(ErrorKind::kArrowFormalParametersProduction);
}
V8_INLINE const Error& duplicate_formal_parameter_error() const {
- return reported_error(kDistinctFormalParametersProduction);
+ return this->reported_error(ErrorKind::kDistinctFormalParametersProduction);
}
V8_INLINE const Error& strict_mode_formal_parameter_error() const {
- return reported_error(kStrictModeFormalParametersProduction);
+ return this->reported_error(
+ ErrorKind::kStrictModeFormalParametersProduction);
}
V8_INLINE const Error& let_pattern_error() const {
- return reported_error(kLetPatternProduction);
+ return this->reported_error(ErrorKind::kLetPatternProduction);
}
V8_INLINE const Error& async_arrow_formal_parameters_error() const {
- return reported_error(kAsyncArrowFormalParametersProduction);
- }
-
- V8_INLINE bool is_simple_parameter_list() const {
- return !is_non_simple_parameter_list_;
+ return this->reported_error(
+ ErrorKind::kAsyncArrowFormalParametersProduction);
}
- V8_INLINE void RecordNonSimpleParameter() {
- is_non_simple_parameter_list_ = 1;
- }
+ V8_INLINE bool does_error_reporting() { return ReportErrors; }
void RecordExpressionError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_expression()) return;
- invalid_productions_ |= ExpressionProduction;
- Add(Error(loc, message, kExpressionProduction, arg));
+ if (!this->is_valid_expression()) return;
+ this->invalid_productions_ |= TP::ExpressionProduction;
+ this->Add(Error(loc, message, ErrorKind::kExpressionProduction, arg));
}
void RecordFormalParameterInitializerError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_formal_parameter_initializer()) return;
- invalid_productions_ |= FormalParameterInitializerProduction;
- Add(Error(loc, message, kFormalParameterInitializerProduction, arg));
+ if (!this->is_valid_formal_parameter_initializer()) return;
+ this->invalid_productions_ |= TP::FormalParameterInitializerProduction;
+ this->Add(Error(loc, message,
+ ErrorKind::kFormalParameterInitializerProduction, arg));
}
void RecordBindingPatternError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_binding_pattern()) return;
- invalid_productions_ |= BindingPatternProduction;
- Add(Error(loc, message, kBindingPatternProduction, arg));
+ if (!this->is_valid_binding_pattern()) return;
+ this->invalid_productions_ |= TP::BindingPatternProduction;
+ this->Add(Error(loc, message, ErrorKind::kBindingPatternProduction, arg));
}
void RecordAssignmentPatternError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_assignment_pattern()) return;
- invalid_productions_ |= AssignmentPatternProduction;
- Add(Error(loc, message, kAssignmentPatternProduction, arg));
+ if (!this->is_valid_assignment_pattern()) return;
+ this->invalid_productions_ |= TP::AssignmentPatternProduction;
+ this->Add(
+ Error(loc, message, ErrorKind::kAssignmentPatternProduction, arg));
}
void RecordPatternError(const Scanner::Location& loc,
@@ -236,24 +510,26 @@ class ExpressionClassifier {
void RecordArrowFormalParametersError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_arrow_formal_parameters()) return;
- invalid_productions_ |= ArrowFormalParametersProduction;
- Add(Error(loc, message, kArrowFormalParametersProduction, arg));
+ if (!this->is_valid_arrow_formal_parameters()) return;
+ this->invalid_productions_ |= TP::ArrowFormalParametersProduction;
+ this->Add(
+ Error(loc, message, ErrorKind::kArrowFormalParametersProduction, arg));
}
void RecordAsyncArrowFormalParametersError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_async_arrow_formal_parameters()) return;
- invalid_productions_ |= AsyncArrowFormalParametersProduction;
- Add(Error(loc, message, kAsyncArrowFormalParametersProduction, arg));
+ if (!this->is_valid_async_arrow_formal_parameters()) return;
+ this->invalid_productions_ |= TP::AsyncArrowFormalParametersProduction;
+ this->Add(Error(loc, message,
+ ErrorKind::kAsyncArrowFormalParametersProduction, arg));
}
void RecordDuplicateFormalParameterError(const Scanner::Location& loc) {
- if (!is_valid_formal_parameter_list_without_duplicates()) return;
- invalid_productions_ |= DistinctFormalParametersProduction;
- Add(Error(loc, MessageTemplate::kParamDupe,
- kDistinctFormalParametersProduction));
+ if (!this->is_valid_formal_parameter_list_without_duplicates()) return;
+ this->invalid_productions_ |= TP::DistinctFormalParametersProduction;
+ this->Add(Error(loc, MessageTemplate::kParamDupe,
+ ErrorKind::kDistinctFormalParametersProduction));
}
// Record a binding that would be invalid in strict mode. Confusingly this
@@ -262,160 +538,30 @@ class ExpressionClassifier {
void RecordStrictModeFormalParameterError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_strict_mode_formal_parameters()) return;
- invalid_productions_ |= StrictModeFormalParametersProduction;
- Add(Error(loc, message, kStrictModeFormalParametersProduction, arg));
+ if (!this->is_valid_strict_mode_formal_parameters()) return;
+ this->invalid_productions_ |= TP::StrictModeFormalParametersProduction;
+ this->Add(Error(loc, message,
+ ErrorKind::kStrictModeFormalParametersProduction, arg));
}
void RecordLetPatternError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
- if (!is_valid_let_pattern()) return;
- invalid_productions_ |= LetPatternProduction;
- Add(Error(loc, message, kLetPatternProduction, arg));
- }
-
- void Accumulate(ExpressionClassifier* inner, unsigned productions) {
- DCHECK_EQ(inner->reported_errors_, reported_errors_);
- DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
- DCHECK_EQ(inner->reported_errors_end_, reported_errors_->size());
- // Propagate errors from inner, but don't overwrite already recorded
- // errors.
- unsigned non_arrow_inner_invalid_productions =
- inner->invalid_productions_ & ~ArrowFormalParametersProduction;
- if (non_arrow_inner_invalid_productions) {
- unsigned errors = non_arrow_inner_invalid_productions & productions &
- ~invalid_productions_;
- // The result will continue to be a valid arrow formal parameters if the
- // inner expression is a valid binding pattern.
- bool copy_BP_to_AFP = false;
- if (productions & ArrowFormalParametersProduction &&
- is_valid_arrow_formal_parameters()) {
- // Also whether we've seen any non-simple parameters
- // if expecting an arrow function parameter.
- is_non_simple_parameter_list_ |= inner->is_non_simple_parameter_list_;
- if (!inner->is_valid_binding_pattern()) {
- copy_BP_to_AFP = true;
- invalid_productions_ |= ArrowFormalParametersProduction;
- }
- }
- // Traverse the list of errors reported by the inner classifier
- // to copy what's necessary.
- if (errors != 0 || copy_BP_to_AFP) {
- invalid_productions_ |= errors;
- int binding_pattern_index = inner->reported_errors_end_;
- for (int i = inner->reported_errors_begin_;
- i < inner->reported_errors_end_; i++) {
- int k = reported_errors_->at(i).kind;
- if (errors & (1 << k)) Copy(i);
- // Check if it's a BP error that has to be copied to an AFP error.
- if (k == kBindingPatternProduction && copy_BP_to_AFP) {
- if (reported_errors_end_ <= i) {
- // If the BP error itself has not already been copied,
- // copy it now and change it to an AFP error.
- Copy(i);
- reported_errors_->at(reported_errors_end_-1).kind =
- kArrowFormalParametersProduction;
- } else {
- // Otherwise, if the BP error was already copied, keep its
- // position and wait until the end of the traversal.
- DCHECK_EQ(reported_errors_end_, i+1);
- binding_pattern_index = i;
- }
- }
- }
- // Do we still have to copy the BP error to an AFP error?
- if (binding_pattern_index < inner->reported_errors_end_) {
- // If there's still unused space in the list of the inner
- // classifier, copy it there, otherwise add it to the end
- // of the list.
- if (reported_errors_end_ < inner->reported_errors_end_)
- Copy(binding_pattern_index);
- else
- Add(reported_errors_->at(binding_pattern_index));
- reported_errors_->at(reported_errors_end_-1).kind =
- kArrowFormalParametersProduction;
- }
- }
- }
- reported_errors_->resize(reported_errors_end_);
- inner->reported_errors_begin_ = inner->reported_errors_end_ =
- reported_errors_end_;
- }
-
- V8_INLINE void Discard() {
- if (reported_errors_end_ == reported_errors_->size()) {
- reported_errors_->resize(reported_errors_begin_);
- reported_errors_end_ = reported_errors_begin_;
- }
- DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
+ if (!this->is_valid_let_pattern()) return;
+ this->invalid_productions_ |= TP::LetPatternProduction;
+ this->Add(Error(loc, message, ErrorKind::kLetPatternProduction, arg));
}
ExpressionClassifier* previous() const { return previous_; }
private:
- V8_INLINE const Error& reported_error(ErrorKind kind) const {
- if (invalid_productions_ & (1 << kind)) {
- for (int i = reported_errors_begin_; i < reported_errors_end_; i++) {
- if (reported_errors_->at(i).kind == kind)
- return reported_errors_->at(i);
- }
- UNREACHABLE();
- }
- // We should only be looking for an error when we know that one has
- // been reported. But we're not... So this is to make sure we have
- // the same behaviour.
- UNREACHABLE();
-
- // Make MSVC happy by returning an error from this inaccessible path.
- static Error none;
- return none;
- }
-
- // Adds e to the end of the list of reported errors for this classifier.
- // It is expected that this classifier is the last one in the stack.
- V8_INLINE void Add(const Error& e) {
- DCHECK_EQ(reported_errors_end_, reported_errors_->size());
- reported_errors_->push_back(e);
- reported_errors_end_++;
- }
-
- // Copies the error at position i of the list of reported errors, so that
- // it becomes the last error reported for this classifier. Position i
- // could be either after the existing errors of this classifier (i.e.,
- // in an inner classifier) or it could be an existing error (in case a
- // copy is needed).
- V8_INLINE void Copy(int i) {
- DCHECK_LT(i, reported_errors_->size());
- if (reported_errors_end_ != i)
- reported_errors_->at(reported_errors_end_) = reported_errors_->at(i);
- reported_errors_end_++;
- }
-
- typename Types::Base* base_;
ExpressionClassifier* previous_;
- Zone* zone_;
- ZoneVector<Error>* reported_errors_;
- DuplicateFinder* duplicate_finder_;
- unsigned invalid_productions_ : 15;
- unsigned is_non_simple_parameter_list_ : 1;
- // The uint16_t for reported_errors_begin_ and reported_errors_end_ will
- // not be enough in the case of a long series of expressions using nested
- // classifiers, e.g., a long sequence of assignments, as in:
- // literals with spreads, as in:
- // var N=65536; eval("var x;" + "x=".repeat(N) + "42");
- // This should not be a problem, as such things currently fail with a
- // stack overflow while parsing.
- uint16_t reported_errors_begin_;
- uint16_t reported_errors_end_;
DISALLOW_COPY_AND_ASSIGN(ExpressionClassifier);
};
-
#undef ERROR_CODES
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index 8f0f428a05..d46d7f2c2b 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -36,12 +36,8 @@ class FuncNameInferrer : public ZoneObject {
// on the stack.
class State {
public:
- explicit State(FuncNameInferrer* fni) : fni_(fni) {
- if (fni_ != nullptr) fni_->Enter();
- }
- ~State() {
- if (fni_ != nullptr) fni_->Leave();
- }
+ explicit State(FuncNameInferrer* fni) : fni_(fni) { fni_->Enter(); }
+ ~State() { fni_->Leave(); }
private:
FuncNameInferrer* fni_;
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 0a58c4f0bd..129b00a2c2 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -16,7 +16,7 @@
namespace v8 {
namespace internal {
-ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
+ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
: zone_(base::make_unique<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
extension_(nullptr),
@@ -37,7 +37,10 @@ ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
function_name_(nullptr),
runtime_call_stats_(nullptr),
source_range_map_(nullptr),
- literal_(nullptr) {
+ literal_(nullptr) {}
+
+ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
+ : ParseInfo(zone_allocator) {
set_hash_seed(isolate->heap()->HashSeed());
set_stack_limit(isolate->stack_guard()->real_climit());
set_unicode_cache(isolate->unicode_cache());
@@ -54,6 +57,18 @@ ParseInfo::ParseInfo(Isolate* isolate)
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kReserveId, script_id_));
}
+template <typename T>
+void ParseInfo::SetFunctionInfo(T function) {
+ set_is_named_expression(function->is_named_expression());
+ set_language_mode(function->language_mode());
+ set_function_kind(function->kind());
+ set_declaration(function->is_declaration());
+ set_requires_instance_fields_initializer(
+ function->requires_instance_fields_initializer());
+ set_toplevel(function->is_toplevel());
+ set_wrapped_as_function(function->is_wrapped());
+}
+
ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
: ParseInfo(isolate, isolate->allocator()) {
// Do not support re-parsing top-level function of a wrapped script.
@@ -61,19 +76,13 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
// wrapped script at all.
DCHECK_IMPLIES(is_toplevel(), !Script::cast(shared->script())->is_wrapped());
- set_toplevel(shared->is_toplevel());
- set_wrapped_as_function(shared->is_wrapped());
set_allow_lazy_parsing(FLAG_lazy_inner_functions);
- set_is_named_expression(shared->is_named_expression());
+ set_asm_wasm_broken(shared->is_asm_wasm_broken());
+
set_start_position(shared->StartPosition());
set_end_position(shared->EndPosition());
function_literal_id_ = shared->FunctionLiteralId(isolate);
- set_language_mode(shared->language_mode());
- set_function_kind(shared->kind());
- set_declaration(shared->is_declaration());
- set_requires_instance_fields_initializer(
- shared->requires_instance_fields_initializer());
- set_asm_wasm_broken(shared->is_asm_wasm_broken());
+ SetFunctionInfo(shared);
Handle<Script> script(Script::cast(shared->script()), isolate);
set_script(script);
@@ -99,37 +108,44 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<Script> script)
script->IsUserJavaScript());
}
-ParseInfo::~ParseInfo() {}
-
-DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
-
-void ParseInfo::EmitBackgroundParseStatisticsOnBackgroundThread() {
- // If runtime call stats was enabled by tracing, emit a trace event at the
- // end of background parsing on the background thread.
- if (runtime_call_stats_ &&
- (FLAG_runtime_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- auto value = v8::tracing::TracedValue::Create();
- runtime_call_stats_->Dump(value.get());
- TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
- "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
- "runtime-call-stats", std::move(value));
- }
+// static
+std::unique_ptr<ParseInfo> ParseInfo::FromParent(
+ const ParseInfo* outer_parse_info, AccountingAllocator* zone_allocator,
+ const FunctionLiteral* literal, const AstRawString* function_name) {
+ std::unique_ptr<ParseInfo> result =
+ base::make_unique<ParseInfo>(zone_allocator);
+
+ // Replicate shared state of the outer_parse_info.
+ result->flags_ = outer_parse_info->flags_;
+ result->script_id_ = outer_parse_info->script_id_;
+ result->set_logger(outer_parse_info->logger());
+ result->set_ast_string_constants(outer_parse_info->ast_string_constants());
+ result->set_hash_seed(outer_parse_info->hash_seed());
+
+ DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
+ DCHECK_NULL(outer_parse_info->extension());
+ DCHECK(outer_parse_info->maybe_outer_scope_info().is_null());
+
+ // Clone the function_name AstRawString into the ParseInfo's own
+ // AstValueFactory.
+ const AstRawString* cloned_function_name =
+ result->GetOrCreateAstValueFactory()->CloneFromOtherFactory(
+ function_name);
+
+ // Setup function specific details.
+ DCHECK(!literal->is_toplevel());
+ result->set_function_name(cloned_function_name);
+ result->set_start_position(literal->start_position());
+ result->set_end_position(literal->end_position());
+ result->set_function_literal_id(literal->function_literal_id());
+ result->SetFunctionInfo(literal);
+
+ return result;
}
-void ParseInfo::UpdateBackgroundParseStatisticsOnMainThread(Isolate* isolate) {
- // Copy over the counters from the background thread to the main counters on
- // the isolate.
- RuntimeCallStats* main_call_stats = isolate->counters()->runtime_call_stats();
- if (FLAG_runtime_stats ==
- v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE) {
- DCHECK_NE(main_call_stats, runtime_call_stats());
- DCHECK_NOT_NULL(main_call_stats);
- DCHECK_NOT_NULL(runtime_call_stats());
- main_call_stats->Add(runtime_call_stats());
- }
- set_runtime_call_stats(main_call_stats);
-}
+ParseInfo::~ParseInfo() = default;
+
+DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
Handle<Script> ParseInfo::CreateScript(Isolate* isolate, Handle<String> source,
ScriptOriginOptions origin_options,
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 64a50806f5..ba3e3d2898 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -12,6 +12,7 @@
#include "include/v8.h"
#include "src/globals.h"
#include "src/handles.h"
+#include "src/objects/script.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/pending-compilation-error-handler.h"
@@ -37,11 +38,18 @@ class Zone;
// A container for the inputs, configuration options, and outputs of parsing.
class V8_EXPORT_PRIVATE ParseInfo {
public:
- ParseInfo(Isolate*);
+ explicit ParseInfo(AccountingAllocator* zone_allocator);
+ explicit ParseInfo(Isolate*);
ParseInfo(Isolate*, AccountingAllocator* zone_allocator);
ParseInfo(Isolate* isolate, Handle<Script> script);
ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared);
+ // Creates a new parse info based on parent top-level |outer_parse_info| for
+ // function |literal|.
+ static std::unique_ptr<ParseInfo> FromParent(
+ const ParseInfo* outer_parse_info, AccountingAllocator* zone_allocator,
+ const FunctionLiteral* literal, const AstRawString* function_name);
+
~ParseInfo();
Handle<Script> CreateScript(Isolate* isolate, Handle<String> source,
@@ -105,9 +113,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
v8::Extension* extension() const { return extension_; }
void set_extension(v8::Extension* extension) { extension_ = extension; }
-
+ void set_consumed_preparsed_scope_data(
+ std::unique_ptr<ConsumedPreParsedScopeData> data) {
+ consumed_preparsed_scope_data_.swap(data);
+ }
ConsumedPreParsedScopeData* consumed_preparsed_scope_data() {
- return &consumed_preparsed_scope_data_;
+ return consumed_preparsed_scope_data_.get();
}
DeclarationScope* script_scope() const { return script_scope_; }
@@ -198,6 +209,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
// TODO(titzer): these should not be part of ParseInfo.
//--------------------------------------------------------------------------
Handle<Script> script() const { return script_; }
+ void set_script(Handle<Script> script);
+
MaybeHandle<ScopeInfo> maybe_outer_scope_info() const {
return maybe_outer_scope_info_;
}
@@ -216,12 +229,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
set_strict_mode(is_strict(language_mode));
}
- void EmitBackgroundParseStatisticsOnBackgroundThread();
- void UpdateBackgroundParseStatisticsOnMainThread(Isolate* isolate);
-
private:
void SetScriptForToplevelCompile(Isolate* isolate, Handle<Script> script);
- void set_script(Handle<Script> script);
+
+ // Set function info flags based on those in either FunctionLiteral or
+ // SharedFunctionInfo |function|
+ template <typename T>
+ void SetFunctionInfo(T function);
// Various configuration flags for parsing.
enum Flag {
@@ -268,7 +282,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
- ConsumedPreParsedScopeData consumed_preparsed_scope_data_;
+ std::unique_ptr<ConsumedPreParsedScopeData> consumed_preparsed_scope_data_;
std::unique_ptr<AstValueFactory> ast_value_factory_;
const class AstStringConstants* ast_string_constants_;
const AstRawString* function_name_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 9d13724f06..d1ad0e9d13 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -5,12 +5,14 @@
#ifndef V8_PARSING_PARSER_BASE_H_
#define V8_PARSING_PARSER_BASE_H_
+#include <stdint.h>
#include <vector>
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
+#include "src/base/flags.h"
#include "src/base/hashmap.h"
#include "src/base/v8-fallthrough.h"
#include "src/counters.h"
@@ -37,31 +39,13 @@ enum AllowLabelledFunctionStatement {
kDisallowLabelledFunctionStatement,
};
-enum class ParseFunctionFlags {
+enum class ParseFunctionFlag : uint8_t {
kIsNormal = 0,
- kIsGenerator = 1,
- kIsAsync = 2,
- kIsDefault = 4
+ kIsGenerator = 1 << 0,
+ kIsAsync = 1 << 1
};
-static inline ParseFunctionFlags operator|(ParseFunctionFlags lhs,
- ParseFunctionFlags rhs) {
- typedef unsigned char T;
- return static_cast<ParseFunctionFlags>(static_cast<T>(lhs) |
- static_cast<T>(rhs));
-}
-
-static inline ParseFunctionFlags& operator|=(ParseFunctionFlags& lhs,
- const ParseFunctionFlags& rhs) {
- lhs = lhs | rhs;
- return lhs;
-}
-
-static inline bool operator&(ParseFunctionFlags bitfield,
- ParseFunctionFlags mask) {
- typedef unsigned char T;
- return static_cast<T>(bitfield) & static_cast<T>(mask);
-}
+typedef base::Flags<ParseFunctionFlag> ParseFunctionFlags;
struct FormalParametersBase {
explicit FormalParametersBase(DeclarationScope* scope) : scope(scope) {}
@@ -228,6 +212,17 @@ class SourceRangeScope final {
template <typename Impl>
struct ParserTypes;
+enum class ParsePropertyKind : uint8_t {
+ kAccessorGetter,
+ kAccessorSetter,
+ kValue,
+ kShorthand,
+ kMethod,
+ kClassField,
+ kSpread,
+ kNotSet
+};
+
template <typename Impl>
class ParserBase {
public:
@@ -248,6 +243,10 @@ class ParserBase {
typedef typename Types::ForStatement ForStatementT;
typedef typename v8::internal::ExpressionClassifier<Types>
ExpressionClassifier;
+ typedef typename Types::FuncNameInferrer FuncNameInferrer;
+ typedef typename Types::FuncNameInferrer::State FuncNameInferrerState;
+ typedef typename Types::SourceRange SourceRange;
+ typedef typename Types::SourceRangeScope SourceRangeScope;
// All implementation-specific methods must be called through this.
Impl* impl() { return static_cast<Impl*>(this); }
@@ -262,7 +261,7 @@ class ParserBase {
original_scope_(nullptr),
function_state_(nullptr),
extension_(extension),
- fni_(nullptr),
+ fni_(ast_value_factory, zone),
ast_value_factory_(ast_value_factory),
ast_node_factory_(ast_value_factory, zone),
runtime_call_stats_(runtime_call_stats),
@@ -300,12 +299,6 @@ class ParserBase {
#undef ALLOW_ACCESSORS
- bool allow_harmony_bigint() const {
- return scanner()->allow_harmony_bigint();
- }
- void set_allow_harmony_bigint(bool allow) {
- scanner()->set_allow_harmony_bigint(allow);
- }
bool allow_harmony_numeric_separator() const {
return scanner()->allow_harmony_numeric_separator();
}
@@ -370,7 +363,7 @@ class ParserBase {
// The parser's current scope is in scope_. BlockState and FunctionState
// constructors push on the scope stack and the destructors pop. They are also
// used to hold the parser's per-funcion state.
- class BlockState BASE_EMBEDDED {
+ class BlockState {
public:
BlockState(Scope** scope_stack, Scope* scope)
: scope_stack_(scope_stack), outer_scope_(*scope_stack) {
@@ -433,7 +426,7 @@ class ParserBase {
return destructuring_assignments_to_rewrite_;
}
- ZoneVector<typename ExpressionClassifier::Error>* GetReportedErrorList() {
+ ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
return &reported_errors_;
}
@@ -490,8 +483,7 @@ class ParserBase {
ZoneChunkList<RewritableExpressionT> destructuring_assignments_to_rewrite_;
- // We use a ZoneVector here because we need to do a lot of random access.
- ZoneVector<typename ExpressionClassifier::Error> reported_errors_;
+ ZoneList<typename ExpressionClassifier::Error> reported_errors_;
// A reason, if any, why this function should not be optimized.
BailoutReason dont_optimize_reason_;
@@ -652,11 +644,11 @@ class ParserBase {
// scope itself is either allocated in zone() or in target_zone if one is
// passed in.
DeclarationScope* NewFunctionScope(FunctionKind kind,
- Zone* target_zone = nullptr) const {
+ Zone* parse_zone = nullptr) const {
DCHECK(ast_value_factory());
- if (target_zone == nullptr) target_zone = zone();
- DeclarationScope* result = new (target_zone)
- DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
+ if (parse_zone == nullptr) parse_zone = zone();
+ DeclarationScope* result = new (zone())
+ DeclarationScope(parse_zone, scope(), FUNCTION_SCOPE, kind);
// Record presence of an inner function scope
function_state_->RecordFunctionOrEvalCall();
@@ -679,6 +671,8 @@ class ParserBase {
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
int position() const { return scanner_->location().beg_pos; }
int peek_position() const { return scanner_->peek_location().beg_pos; }
+ int end_position() const { return scanner_->location().end_pos; }
+ int peek_end_position() const { return scanner_->peek_location().end_pos; }
bool stack_overflow() const {
return pending_error_handler()->stack_overflow();
}
@@ -694,8 +688,7 @@ class ParserBase {
// Returns the position past the following semicolon (if it exists), and the
// position past the end of the current token otherwise.
int PositionAfterSemicolon() {
- return (peek() == Token::SEMICOLON) ? scanner_->peek_location().end_pos
- : scanner_->location().end_pos;
+ return (peek() == Token::SEMICOLON) ? peek_end_position() : end_position();
}
V8_INLINE Token::Value PeekAhead() {
@@ -720,7 +713,7 @@ class ParserBase {
Token::Value next = Next();
USE(next);
USE(token);
- DCHECK(next == token);
+ DCHECK_EQ(next, token);
}
bool Check(Token::Value token) {
@@ -753,22 +746,14 @@ class ParserBase {
return;
}
- Token::Value current = scanner()->current_token();
- Scanner::Location current_location = scanner()->location();
- Token::Value next = Next();
-
- if (next == Token::SEMICOLON) {
- return;
- }
-
*ok = false;
- if (current == Token::AWAIT && !is_async_function()) {
- ReportMessageAt(current_location,
+ if (scanner()->current_token() == Token::AWAIT && !is_async_function()) {
+ ReportMessageAt(scanner()->location(),
MessageTemplate::kAwaitNotInAsyncFunction, kSyntaxError);
return;
}
- ReportUnexpectedToken(next);
+ ReportUnexpectedToken(Next());
}
// Dummy functions, just useful as arguments to CHECK_OK_CUSTOM.
@@ -778,14 +763,7 @@ class ParserBase {
return result;
}
- bool is_any_identifier(Token::Value token) {
- return token == Token::IDENTIFIER || token == Token::ENUM ||
- token == Token::AWAIT || token == Token::ASYNC ||
- token == Token::ESCAPED_STRICT_RESERVED_WORD ||
- token == Token::FUTURE_STRICT_RESERVED_WORD || token == Token::LET ||
- token == Token::STATIC || token == Token::YIELD;
- }
- bool peek_any_identifier() { return is_any_identifier(peek()); }
+ bool peek_any_identifier() { return Token::IsAnyIdentifier(peek()); }
bool CheckContextualKeyword(Token::Value token) {
if (PeekContextualKeyword(token)) {
@@ -961,7 +939,11 @@ class ParserBase {
void ReportClassifierError(
const typename ExpressionClassifier::Error& error) {
- impl()->ReportMessageAt(error.location, error.message, error.arg);
+ if (classifier()->does_error_reporting()) {
+ impl()->ReportMessageAt(error.location, error.message, error.arg);
+ } else {
+ impl()->ReportUnidentifiableError();
+ }
}
void ValidateExpression(bool* ok) {
@@ -1006,7 +988,7 @@ class ParserBase {
}
bool IsValidArrowFormalParametersStart(Token::Value token) {
- return is_any_identifier(token) || token == Token::LPAREN;
+ return Token::IsAnyIdentifier(token) || token == Token::LPAREN;
}
void ValidateArrowFormalParameters(ExpressionT expr,
@@ -1096,15 +1078,12 @@ class ParserBase {
ExpressionT ParseRegExpLiteral(bool* ok);
+ ExpressionT ParseBindingPattern(bool* ok);
ExpressionT ParsePrimaryExpression(bool* is_async, bool* ok);
- ExpressionT ParsePrimaryExpression(bool* ok) {
- bool is_async;
- return ParsePrimaryExpression(&is_async, ok);
- }
// Use when parsing an expression that is known to not be a pattern or part
// of a pattern.
- V8_INLINE ExpressionT ParseExpression(bool accept_IN, bool* ok);
+ V8_INLINE ExpressionT ParseExpression(bool* ok);
// This method does not wrap the parsing of the expression inside a
// new expression classifier; it uses the top-level classifier instead.
@@ -1117,28 +1096,21 @@ class ParserBase {
ExpressionT ParseArrayLiteral(bool* ok);
- enum class PropertyKind {
- kAccessorProperty,
- kValueProperty,
- kShorthandProperty,
- kMethodProperty,
- kClassField,
- kSpreadProperty,
- kNotSet
- };
+ inline static bool IsAccessor(ParsePropertyKind kind) {
+ return IsInRange(kind, ParsePropertyKind::kAccessorGetter,
+ ParsePropertyKind::kAccessorSetter);
+ }
- bool SetPropertyKindFromToken(Token::Value token, PropertyKind* kind);
- ExpressionT ParsePropertyName(IdentifierT* name, PropertyKind* kind,
- bool* is_generator, bool* is_get, bool* is_set,
- bool* is_async, bool* is_computed_name,
- bool* ok);
+ ExpressionT ParsePropertyName(IdentifierT* name, ParsePropertyKind* kind,
+ ParseFunctionFlags* flags,
+ bool* is_computed_name, bool* ok);
ExpressionT ParseObjectLiteral(bool* ok);
ClassLiteralPropertyT ParseClassPropertyDefinition(
ClassLiteralChecker* checker, ClassInfo* class_info,
IdentifierT* property_name, bool has_extends, bool* is_computed_name,
ClassLiteralProperty::Kind* property_kind, bool* is_static, bool* ok);
- ExpressionT ParseClassFieldInitializer(ClassInfo* class_info, bool is_static,
- bool* ok);
+ ExpressionT ParseClassFieldInitializer(ClassInfo* class_info, int beg_pos,
+ bool is_static, bool* ok);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
ObjectLiteralChecker* checker, bool* is_computed_name,
bool* is_rest_property, bool* ok);
@@ -1147,17 +1119,26 @@ class ParserBase {
bool* is_simple_parameter_list, bool* ok);
ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
bool* ok) {
- return ParseArguments(first_spread_pos, false, nullptr, ok);
+ bool is_simple = true;
+ return ParseArguments(first_spread_pos, false, &is_simple, ok);
}
ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
ExpressionT ParseYieldExpression(bool accept_IN, bool* ok);
V8_INLINE ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseConditionalContinuation(ExpressionT expression,
+ bool accept_IN, int pos, bool* ok);
ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- ExpressionT ParseUnaryExpression(bool* ok);
+ ExpressionT ParseUnaryOpExpression(bool* ok);
+ ExpressionT ParseAwaitExpression(bool* ok);
+ ExpressionT ParsePrefixExpression(bool* ok);
+ V8_INLINE ExpressionT ParseUnaryExpression(bool* ok);
V8_INLINE ExpressionT ParsePostfixExpression(bool* ok);
V8_INLINE ExpressionT ParseLeftHandSideExpression(bool* ok);
- ExpressionT ParseMemberWithNewPrefixesExpression(bool* is_async, bool* ok);
+ ExpressionT ParseMemberWithPresentNewPrefixesExpression(bool* is_async,
+ bool* ok);
+ V8_INLINE ExpressionT ParseMemberWithNewPrefixesExpression(bool* is_async,
+ bool* ok);
V8_INLINE ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
V8_INLINE ExpressionT ParseMemberExpressionContinuation(
ExpressionT expression, bool* is_async, bool* ok);
@@ -1169,8 +1150,6 @@ class ParserBase {
ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
const FormalParametersT& parameters,
int rewritable_length, bool* ok);
- void ParseSingleExpressionFunctionBody(StatementListT body, bool is_async,
- bool accept_IN, bool* ok);
void ParseAsyncFunctionBody(Scope* scope, StatementListT body, bool* ok);
ExpressionT ParseAsyncFunctionLiteral(bool* ok);
ExpressionT ParseClassLiteral(IdentifierT name,
@@ -1205,11 +1184,14 @@ class ParserBase {
bool default_export, bool* ok);
StatementT ParseNativeDeclaration(bool* ok);
+ // Whether we're parsing a single-expression arrow function or something else.
+ enum class FunctionBodyType { kExpression, kBlock };
// Consumes the ending }.
void ParseFunctionBody(StatementListT result, IdentifierT function_name,
int pos, const FormalParametersT& parameters,
FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
+ FunctionLiteral::FunctionType function_type,
+ FunctionBodyType body_type, bool accept_IN, bool* ok);
// Under some circumstances, we allow preparsing to abort if the preparsed
// function is "long and trivial", and fully parse instead. Our current
@@ -1363,8 +1345,7 @@ class ParserBase {
Scope* scope, Variable* var,
typename DeclarationDescriptor::Kind declaration_kind);
- FunctionKind FunctionKindForImpl(bool is_method, bool is_generator,
- bool is_async) {
+ FunctionKind FunctionKindForImpl(bool is_method, ParseFunctionFlags flags) {
static const FunctionKind kFunctionKinds[][2][2] = {
{
// is_method=false
@@ -1382,17 +1363,19 @@ class ParserBase {
FunctionKind::kConciseGeneratorMethod,
FunctionKind::kAsyncConciseGeneratorMethod},
}};
- return kFunctionKinds[is_method][is_generator][is_async];
+ return kFunctionKinds[is_method]
+ [(flags & ParseFunctionFlag::kIsGenerator) != 0]
+ [(flags & ParseFunctionFlag::kIsAsync) != 0];
}
- inline FunctionKind FunctionKindFor(bool is_generator, bool is_async) {
+ inline FunctionKind FunctionKindFor(ParseFunctionFlags flags) {
const bool kIsMethod = false;
- return FunctionKindForImpl(kIsMethod, is_generator, is_async);
+ return FunctionKindForImpl(kIsMethod, flags);
}
- inline FunctionKind MethodKindFor(bool is_generator, bool is_async) {
+ inline FunctionKind MethodKindFor(ParseFunctionFlags flags) {
const bool kIsMethod = true;
- return FunctionKindForImpl(kIsMethod, is_generator, is_async);
+ return FunctionKindForImpl(kIsMethod, flags);
}
// Keep track of eval() calls since they disable all local variable
@@ -1466,8 +1449,8 @@ class ParserBase {
explicit ClassLiteralChecker(ParserBase* parser)
: parser_(parser), has_seen_constructor_(false) {}
- void CheckClassMethodName(Token::Value property, PropertyKind type,
- bool is_generator, bool is_async, bool is_static,
+ void CheckClassMethodName(Token::Value property, ParsePropertyKind type,
+ ParseFunctionFlags flags, bool is_static,
bool* ok);
void CheckClassFieldName(bool is_static, bool* ok);
@@ -1546,7 +1529,7 @@ class ParserBase {
Scope* original_scope_; // The top scope for the current parsing item.
FunctionState* function_state_; // Function state stack.
v8::Extension* extension_;
- FuncNameInferrer* fni_;
+ FuncNameInferrer fni_;
AstValueFactory* ast_value_factory_; // Not owned.
typename Types::Factory ast_node_factory_;
RuntimeCallStats* runtime_call_stats_;
@@ -1577,8 +1560,6 @@ class ParserBase {
bool allow_harmony_import_meta_;
bool allow_harmony_private_fields_;
bool allow_eval_cache_;
-
- friend class DiscardableZoneScope;
};
template <typename Impl>
@@ -1592,13 +1573,12 @@ ParserBase<Impl>::FunctionState::FunctionState(
outer_function_state_(*function_state_stack),
scope_(scope),
destructuring_assignments_to_rewrite_(scope->zone()),
- reported_errors_(scope_->zone()),
+ reported_errors_(16, scope->zone()),
dont_optimize_reason_(BailoutReason::kNoReason),
next_function_is_likely_called_(false),
previous_function_was_likely_called_(false),
contains_function_or_eval_(false) {
*function_state_stack = this;
- reported_errors_.reserve(16);
if (outer_function_state_) {
outer_function_state_->previous_function_was_likely_called_ =
outer_function_state_->next_function_is_likely_called_;
@@ -1704,16 +1684,10 @@ template <typename Impl>
typename ParserBase<Impl>::IdentifierT
ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
Token::Value next = Next();
- if (next == Token::IDENTIFIER || next == Token::ASYNC ||
- (next == Token::AWAIT && !parsing_module_ && !is_async_function())) {
+ STATIC_ASSERT(Token::IDENTIFIER + 1 == Token::ASYNC);
+ if (IsInRange(next, Token::IDENTIFIER, Token::ASYNC)) {
IdentifierT name = impl()->GetSymbol();
- if (impl()->IsArguments(name) && scope()->ShouldBanArguments()) {
- ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
- *ok = false;
- return impl()->NullIdentifier();
- }
-
// When this function is used to read a formal parameter, we don't always
// know whether the function is going to be strict or sloppy. Indeed for
// arrow functions we don't always know that the identifier we are reading
@@ -1721,15 +1695,18 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
// must detect because we know we're in strict mode, we also record any
// error that we might make in the future once we know the language mode.
if (impl()->IsEvalOrArguments(name)) {
+ if (impl()->IsArguments(name) && scope()->ShouldBanArguments()) {
+ ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
+ *ok = false;
+ return impl()->NullIdentifier();
+ }
+
classifier()->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
if (is_strict(language_mode())) {
classifier()->RecordBindingPatternError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
}
- } else if (next == Token::AWAIT) {
- classifier()->RecordAsyncArrowFormalParametersError(
- scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
}
if (classifier()->duplicate_finder() != nullptr &&
@@ -1737,20 +1714,17 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
ast_value_factory())) {
classifier()->RecordDuplicateFormalParameterError(scanner()->location());
}
+
return name;
+ } else if (next == Token::AWAIT && !parsing_module_ && !is_async_function()) {
+ classifier()->RecordAsyncArrowFormalParametersError(
+ scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
+ return impl()->GetSymbol();
} else if (is_sloppy(language_mode()) &&
- (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::ESCAPED_STRICT_RESERVED_WORD ||
- next == Token::LET || next == Token::STATIC ||
+ (Token::IsStrictReservedWord(next) ||
(next == Token::YIELD && !is_generator()))) {
classifier()->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
- if (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
- is_strict(language_mode())) {
- ReportUnexpectedToken(next);
- *ok = false;
- return impl()->NullIdentifier();
- }
if (scanner()->IsLet()) {
classifier()->RecordLetPatternError(
scanner()->location(), MessageTemplate::kLetInLexicalBinding);
@@ -1774,9 +1748,7 @@ ParserBase<Impl>::ParseIdentifierOrStrictReservedWord(
next == Token::ASYNC) {
*is_strict_reserved = false;
*is_await = next == Token::AWAIT;
- } else if (next == Token::ESCAPED_STRICT_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
- next == Token::STATIC ||
+ } else if (Token::IsStrictReservedWord(next) ||
(next == Token::YIELD && !IsGeneratorFunction(function_kind))) {
*is_strict_reserved = true;
} else {
@@ -1792,12 +1764,8 @@ template <typename Impl>
typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifierName(
bool* ok) {
Token::Value next = Next();
- if (next != Token::IDENTIFIER && next != Token::ASYNC &&
- next != Token::ENUM && next != Token::AWAIT && next != Token::LET &&
- next != Token::STATIC && next != Token::YIELD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- next != Token::ESCAPED_KEYWORD &&
- next != Token::ESCAPED_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
+ if (!Token::IsAnyIdentifier(next) && next != Token::ESCAPED_KEYWORD &&
+ !Token::IsKeyword(next)) {
ReportUnexpectedToken(next);
*ok = false;
return impl()->NullIdentifier();
@@ -1852,6 +1820,39 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
}
template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBindingPattern(
+ bool* ok) {
+ // Pattern ::
+ // Identifier
+ // ArrayLiteral
+ // ObjectLiteral
+
+ int beg_pos = peek_position();
+ Token::Value token = peek();
+ ExpressionT result;
+
+ if (Token::IsAnyIdentifier(token)) {
+ IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
+ result = impl()->ExpressionFromIdentifier(name, beg_pos);
+ } else {
+ classifier()->RecordNonSimpleParameter();
+
+ if (token == Token::LBRACK) {
+ result = ParseArrayLiteral(CHECK_OK);
+ } else if (token == Token::LBRACE) {
+ result = ParseObjectLiteral(CHECK_OK);
+ } else {
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->NullExpression();
+ }
+ }
+
+ ValidateBindingPattern(CHECK_OK);
+ return result;
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
bool* is_async, bool* ok) {
// PrimaryExpression ::
@@ -1872,7 +1873,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
// AsyncFunctionLiteral
int beg_pos = peek_position();
- switch (peek()) {
+ Token::Value token = peek();
+ switch (token) {
case Token::THIS: {
BindingPatternUnexpectedToken();
Consume(Token::THIS);
@@ -1884,16 +1886,25 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::FALSE_LITERAL:
case Token::SMI:
case Token::NUMBER:
- case Token::BIGINT:
+ case Token::BIGINT: {
+ // Ensure continuous enum range.
+ DCHECK(Token::IsLiteral(token));
BindingPatternUnexpectedToken();
return impl()->ExpressionFromLiteral(Next(), beg_pos);
+ }
+ case Token::STRING: {
+ DCHECK(Token::IsLiteral(token));
+ BindingPatternUnexpectedToken();
+ Consume(Token::STRING);
+ return impl()->ExpressionFromString(beg_pos);
+ }
case Token::ASYNC:
if (!scanner()->HasLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
BindingPatternUnexpectedToken();
Consume(Token::ASYNC);
- return ParseAsyncFunctionLiteral(CHECK_OK);
+ return ParseAsyncFunctionLiteral(ok);
}
// CoverCallExpressionAndAsyncArrowHead
*is_async = true;
@@ -1903,19 +1914,16 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::STATIC:
case Token::YIELD:
case Token::AWAIT:
- case Token::ESCAPED_STRICT_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD: {
+ // Ensure continuous enum range.
+ DCHECK(IsInRange(token, Token::IDENTIFIER,
+ Token::ESCAPED_STRICT_RESERVED_WORD));
// Using eval or arguments in this context is OK even in strict mode.
IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
return impl()->ExpressionFromIdentifier(name, beg_pos);
}
- case Token::STRING: {
- BindingPatternUnexpectedToken();
- Consume(Token::STRING);
- return impl()->ExpressionFromString(beg_pos);
- }
-
case Token::ASSIGN_DIV:
case Token::DIV:
classifier()->RecordBindingPatternError(
@@ -1931,8 +1939,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::LPAREN: {
// Arrow function formal parameters are either a single identifier or a
// list of BindingPattern productions enclosed in parentheses.
- // Parentheses are not valid on the LHS of a BindingPattern, so we use the
- // is_valid_binding_pattern() check to detect multiple levels of
+ // Parentheses are not valid on the LHS of a BindingPattern, so we use
+ // the is_valid_binding_pattern() check to detect multiple levels of
// parenthesization.
bool pattern_error = !classifier()->is_valid_binding_pattern();
classifier()->RecordPatternError(scanner()->peek_location(),
@@ -1955,7 +1963,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
function_state_->set_next_function_is_likely_called();
}
ExpressionT expr = ParseExpressionCoverGrammar(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, ok);
return expr;
}
@@ -2010,10 +2018,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
- bool accept_IN, bool* ok) {
+ bool* ok) {
ExpressionClassifier classifier(this);
- ExpressionT result = ParseExpressionCoverGrammar(accept_IN, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ ExpressionT result = ParseExpressionCoverGrammar(true, CHECK_OK);
+ ValidateExpression(ok);
return result;
}
@@ -2038,13 +2046,12 @@ ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
Token::String(Token::ELLIPSIS));
int ellipsis_pos = position();
int pattern_pos = peek_position();
- ExpressionT pattern = ParsePrimaryExpression(CHECK_OK);
+ ExpressionT pattern = ParseBindingPattern(CHECK_OK);
if (peek() == Token::ASSIGN) {
ReportMessage(MessageTemplate::kRestDefaultInitializer);
*ok = false;
return result;
}
- ValidateBindingPattern(CHECK_OK);
right = factory()->NewSpread(pattern, ellipsis_pos, pattern_pos);
} else {
right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@@ -2097,14 +2104,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
int pos = peek_position();
ExpressionListT values = impl()->NewExpressionList(4);
int first_spread_index = -1;
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
+ Consume(Token::LBRACK);
+ while (!Check(Token::RBRACK)) {
ExpressionT elem;
if (peek() == Token::COMMA) {
elem = factory()->NewTheHoleLiteral();
- } else if (peek() == Token::ELLIPSIS) {
- int start_pos = peek_position();
- Consume(Token::ELLIPSIS);
+ } else if (Check(Token::ELLIPSIS)) {
+ int start_pos = position();
int expr_pos = peek_position();
ExpressionT argument = ParseAssignmentExpression(true, CHECK_OK);
elem = factory()->NewSpread(argument, start_pos, expr_pos);
@@ -2115,57 +2121,54 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
if (argument->IsAssignment()) {
classifier()->RecordPatternError(
- Scanner::Location(start_pos, scanner()->location().end_pos),
+ Scanner::Location(start_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
} else {
- CheckDestructuringElement(argument, start_pos,
- scanner()->location().end_pos);
+ CheckDestructuringElement(argument, start_pos, end_position());
}
if (peek() == Token::COMMA) {
classifier()->RecordPatternError(
- Scanner::Location(start_pos, scanner()->location().end_pos),
+ Scanner::Location(start_pos, end_position()),
MessageTemplate::kElementAfterRest);
}
} else {
int beg_pos = peek_position();
elem = ParseAssignmentExpression(true, CHECK_OK);
- CheckDestructuringElement(elem, beg_pos, scanner()->location().end_pos);
+ CheckDestructuringElement(elem, beg_pos, end_position());
}
values->Add(elem, zone_);
if (peek() != Token::RBRACK) {
Expect(Token::COMMA, CHECK_OK);
}
}
- Expect(Token::RBRACK, CHECK_OK);
return factory()->NewArrayLiteral(values, first_spread_index, pos);
}
-template <class Impl>
-bool ParserBase<Impl>::SetPropertyKindFromToken(Token::Value token,
- PropertyKind* kind) {
+inline bool ParsePropertyKindFromToken(Token::Value token,
+ ParsePropertyKind* kind) {
// This returns true, setting the property kind, iff the given token is one
// which must occur after a property name, indicating that the previous token
// was in fact a name and not a modifier (like the "get" in "get x").
switch (token) {
case Token::COLON:
- *kind = PropertyKind::kValueProperty;
+ *kind = ParsePropertyKind::kValue;
return true;
case Token::COMMA:
case Token::RBRACE:
case Token::ASSIGN:
- *kind = PropertyKind::kShorthandProperty;
+ *kind = ParsePropertyKind::kShorthand;
return true;
case Token::LPAREN:
- *kind = PropertyKind::kMethodProperty;
+ *kind = ParsePropertyKind::kMethod;
return true;
case Token::MUL:
case Token::SEMICOLON:
- *kind = PropertyKind::kClassField;
+ *kind = ParsePropertyKind::kClassField;
return true;
case Token::PRIVATE_NAME:
- *kind = PropertyKind::kClassField;
+ *kind = ParsePropertyKind::kClassField;
return true;
default:
break;
@@ -2173,57 +2176,55 @@ bool ParserBase<Impl>::SetPropertyKindFromToken(Token::Value token,
return false;
}
-template <class Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
- IdentifierT* name, PropertyKind* kind, bool* is_generator, bool* is_get,
- bool* is_set, bool* is_async, bool* is_computed_name, bool* ok) {
- DCHECK_EQ(*kind, PropertyKind::kNotSet);
- DCHECK(!*is_generator);
- DCHECK(!*is_get);
- DCHECK(!*is_set);
- DCHECK(!*is_async);
- DCHECK(!*is_computed_name);
+inline bool ParseAsAccessor(Token::Value token, Token::Value contextual_token,
+ ParsePropertyKind* kind) {
+ if (ParsePropertyKindFromToken(token, kind)) return false;
- *is_generator = Check(Token::MUL);
- if (*is_generator) {
- *kind = PropertyKind::kMethodProperty;
+ if (contextual_token == Token::GET) {
+ *kind = ParsePropertyKind::kAccessorGetter;
+ } else if (contextual_token == Token::SET) {
+ *kind = ParsePropertyKind::kAccessorSetter;
+ } else {
+ return false;
}
- Token::Value token = peek();
- int pos = peek_position();
+ return true;
+}
- if (!*is_generator && token == Token::ASYNC &&
- !scanner()->HasLineTerminatorAfterNext()) {
- Consume(Token::ASYNC);
- token = peek();
- if (token == Token::MUL && !scanner()->HasLineTerminatorBeforeNext()) {
- Consume(Token::MUL);
- token = peek();
- *is_generator = true;
- } else if (SetPropertyKindFromToken(token, kind)) {
- *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'async'
- impl()->PushLiteralName(*name);
- return factory()->NewStringLiteral(*name, pos);
- }
- *kind = PropertyKind::kMethodProperty;
- *is_async = true;
- pos = peek_position();
- }
+template <class Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
+ IdentifierT* name, ParsePropertyKind* kind, ParseFunctionFlags* flags,
+ bool* is_computed_name, bool* ok) {
+ DCHECK_EQ(ParsePropertyKind::kNotSet, *kind);
+ DCHECK_EQ(*flags, ParseFunctionFlag::kIsNormal);
+ DCHECK(!*is_computed_name);
- if (token == Token::IDENTIFIER && !*is_generator && !*is_async) {
- // This is checking for 'get' and 'set' in particular.
- Consume(Token::IDENTIFIER);
- token = peek();
- if (SetPropertyKindFromToken(token, kind) ||
- !scanner()->IsGetOrSet(is_get, is_set)) {
+ if (Check(Token::ASYNC)) {
+ Token::Value token = peek();
+ if ((token != Token::MUL && ParsePropertyKindFromToken(token, kind)) ||
+ scanner()->HasLineTerminatorBeforeNext()) {
*name = impl()->GetSymbol();
impl()->PushLiteralName(*name);
- return factory()->NewStringLiteral(*name, pos);
+ return factory()->NewStringLiteral(*name, position());
}
- *kind = PropertyKind::kAccessorProperty;
- pos = peek_position();
+ *flags = ParseFunctionFlag::kIsAsync;
+ *kind = ParsePropertyKind::kMethod;
+ }
+
+ if (Check(Token::MUL)) {
+ *flags |= ParseFunctionFlag::kIsGenerator;
+ *kind = ParsePropertyKind::kMethod;
}
+ if (*kind == ParsePropertyKind::kNotSet && Check(Token::IDENTIFIER) &&
+ !ParseAsAccessor(peek(), scanner()->current_contextual_token(), kind)) {
+ *name = impl()->GetSymbol();
+ impl()->PushLiteralName(*name);
+ return factory()->NewStringLiteral(*name, position());
+ }
+
+ int pos = peek_position();
+
// For non computed property names we normalize the name a bit:
//
// "12" -> 12
@@ -2233,41 +2234,50 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
//
// This is important because we use the property name as a key in a hash
// table when we compute constant properties.
- ExpressionT expression = impl()->NullExpression();
- switch (token) {
+ bool is_array_index;
+ uint32_t index;
+ switch (peek()) {
case Token::STRING:
Consume(Token::STRING);
*name = impl()->GetSymbol();
+ is_array_index = impl()->IsArrayIndex(*name, &index);
break;
case Token::SMI:
Consume(Token::SMI);
- *name = impl()->GetNumberAsSymbol();
+ index = scanner()->smi_value();
+ is_array_index = true;
+ // Token::SMI were scanned from their canonical representation.
+ *name = impl()->GetSymbol();
break;
- case Token::NUMBER:
+ case Token::NUMBER: {
Consume(Token::NUMBER);
*name = impl()->GetNumberAsSymbol();
+ is_array_index = impl()->IsArrayIndex(*name, &index);
break;
-
+ }
case Token::LBRACK: {
*name = impl()->NullIdentifier();
*is_computed_name = true;
Consume(Token::LBRACK);
ExpressionClassifier computed_name_classifier(this);
- expression = ParseAssignmentExpression(true, CHECK_OK);
+ ExpressionT expression = ParseAssignmentExpression(true, CHECK_OK);
ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
Expect(Token::RBRACK, CHECK_OK);
- break;
+ if (*kind == ParsePropertyKind::kNotSet) {
+ ParsePropertyKindFromToken(peek(), kind);
+ }
+ return expression;
}
case Token::ELLIPSIS:
- if (!*is_generator && !*is_async && !*is_get && !*is_set) {
+ if (*kind == ParsePropertyKind::kNotSet) {
*name = impl()->NullIdentifier();
Consume(Token::ELLIPSIS);
- expression = ParseAssignmentExpression(true, CHECK_OK);
- *kind = PropertyKind::kSpreadProperty;
+ ExpressionT expression = ParseAssignmentExpression(true, CHECK_OK);
+ *kind = ParsePropertyKind::kSpread;
if (!impl()->IsIdentifier(expression)) {
classifier()->RecordBindingPatternError(
@@ -2291,23 +2301,16 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
default:
*name = ParseIdentifierName(CHECK_OK);
+ is_array_index = false;
break;
}
- if (*kind == PropertyKind::kNotSet) {
- SetPropertyKindFromToken(peek(), kind);
+ if (*kind == ParsePropertyKind::kNotSet) {
+ ParsePropertyKindFromToken(peek(), kind);
}
-
- if (*is_computed_name) {
- return expression;
- }
-
impl()->PushLiteralName(*name);
-
- uint32_t index;
- return impl()->IsArrayIndex(*name, &index)
- ? factory()->NewNumberLiteral(index, pos)
- : factory()->NewStringLiteral(*name, pos);
+ return is_array_index ? factory()->NewNumberLiteral(index, pos)
+ : factory()->NewStringLiteral(*name, pos);
}
template <typename Impl>
@@ -2317,26 +2320,24 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
bool has_extends, bool* is_computed_name,
ClassLiteralProperty::Kind* property_kind, bool* is_static, bool* ok) {
DCHECK_NOT_NULL(class_info);
- bool is_get = false;
- bool is_set = false;
- bool is_generator = false;
- bool is_async = false;
+ ParseFunctionFlags function_flags = ParseFunctionFlag::kIsNormal;
*is_static = false;
*property_kind = ClassLiteralProperty::METHOD;
- PropertyKind kind = PropertyKind::kNotSet;
+ ParsePropertyKind kind = ParsePropertyKind::kNotSet;
Token::Value name_token = peek();
DCHECK_IMPLIES(name_token == Token::PRIVATE_NAME,
allow_harmony_private_fields());
- int name_token_position = scanner()->peek_location().beg_pos;
+ int property_beg_pos = scanner()->peek_location().beg_pos;
+ int name_token_position = property_beg_pos;
*name = impl()->NullIdentifier();
ExpressionT name_expression;
if (name_token == Token::STATIC) {
Consume(Token::STATIC);
name_token_position = scanner()->peek_location().beg_pos;
if (peek() == Token::LPAREN) {
- kind = PropertyKind::kMethodProperty;
+ kind = ParsePropertyKind::kMethod;
*name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
name_expression = factory()->NewStringLiteral(*name, position());
} else if (peek() == Token::ASSIGN || peek() == Token::SEMICOLON ||
@@ -2351,18 +2352,18 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
return impl()->NullLiteralProperty();
} else {
*is_static = true;
- name_expression = ParsePropertyName(name, &kind, &is_generator, &is_get,
- &is_set, &is_async, is_computed_name,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ name_expression =
+ ParsePropertyName(name, &kind, &function_flags, is_computed_name,
+ CHECK_OK_CUSTOM(NullLiteralProperty));
}
} else if (name_token == Token::PRIVATE_NAME) {
Consume(Token::PRIVATE_NAME);
*name = impl()->GetSymbol();
name_expression = factory()->NewStringLiteral(*name, position());
} else {
- name_expression = ParsePropertyName(name, &kind, &is_generator, &is_get,
- &is_set, &is_async, is_computed_name,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ name_expression =
+ ParsePropertyName(name, &kind, &function_flags, is_computed_name,
+ CHECK_OK_CUSTOM(NullLiteralProperty));
}
if (!class_info->has_name_static_property && *is_static &&
@@ -2371,16 +2372,17 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
}
switch (kind) {
- case PropertyKind::kClassField:
- case PropertyKind::kNotSet: // This case is a name followed by a name or
- // other property. Here we have to assume
- // that's an uninitialized field followed by a
- // linebreak followed by a property, with ASI
- // adding the semicolon. If not, there will be
- // a syntax error after parsing the first name
- // as an uninitialized field.
- case PropertyKind::kShorthandProperty:
- case PropertyKind::kValueProperty:
+ case ParsePropertyKind::kClassField:
+ case ParsePropertyKind::kNotSet: // This case is a name followed by a name
+ // or other property. Here we have to
+ // assume that's an uninitialized field
+ // followed by a linebreak followed by a
+ // property, with ASI adding the
+ // semicolon. If not, there will be a
+ // syntax error after parsing the first
+ // name as an uninitialized field.
+ case ParsePropertyKind::kShorthand:
+ case ParsePropertyKind::kValue:
if (allow_harmony_public_fields() || allow_harmony_private_fields()) {
*property_kind = name_token == Token::PRIVATE_NAME
? ClassLiteralProperty::PRIVATE_FIELD
@@ -2394,8 +2396,9 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
checker->CheckClassFieldName(*is_static,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
- ExpressionT initializer = ParseClassFieldInitializer(
- class_info, *is_static, CHECK_OK_CUSTOM(NullLiteralProperty));
+ ExpressionT initializer =
+ ParseClassFieldInitializer(class_info, property_beg_pos, *is_static,
+ CHECK_OK_CUSTOM(NullLiteralProperty));
ExpectSemicolon(CHECK_OK_CUSTOM(NullLiteralProperty));
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
name_expression, initializer, *property_kind, *is_static,
@@ -2409,9 +2412,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
return impl()->NullLiteralProperty();
}
- case PropertyKind::kMethodProperty: {
- DCHECK(!is_get && !is_set);
-
+ case ParsePropertyKind::kMethod: {
// MethodDefinition
// PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
// '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
@@ -2421,12 +2422,12 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// '{' FunctionBody '}'
if (!*is_computed_name) {
- checker->CheckClassMethodName(name_token, PropertyKind::kMethodProperty,
- is_generator, is_async, *is_static,
+ checker->CheckClassMethodName(name_token, ParsePropertyKind::kMethod,
+ function_flags, *is_static,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
- FunctionKind kind = MethodKindFor(is_generator, is_async);
+ FunctionKind kind = MethodKindFor(function_flags);
if (!*is_static && impl()->IsConstructor(*name)) {
class_info->has_seen_constructor = true;
@@ -2436,10 +2437,8 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
ExpressionT value = impl()->ParseFunctionLiteral(
*name, scanner()->location(), kSkipFunctionNameCheck, kind,
- FLAG_harmony_function_tostring ? name_token_position
- : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ name_token_position, FunctionLiteral::kAccessorOrMethod,
+ language_mode(), nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
*property_kind = ClassLiteralProperty::METHOD;
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
@@ -2449,13 +2448,15 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
return result;
}
- case PropertyKind::kAccessorProperty: {
- DCHECK((is_get || is_set) && !is_generator && !is_async);
+ case ParsePropertyKind::kAccessorGetter:
+ case ParsePropertyKind::kAccessorSetter: {
+ DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
+ bool is_get = kind == ParsePropertyKind::kAccessorGetter;
if (!*is_computed_name) {
- checker->CheckClassMethodName(
- name_token, PropertyKind::kAccessorProperty, false, false,
- *is_static, CHECK_OK_CUSTOM(NullLiteralProperty));
+ checker->CheckClassMethodName(name_token, kind,
+ ParseFunctionFlag::kIsNormal, *is_static,
+ CHECK_OK_CUSTOM(NullLiteralProperty));
// Make sure the name expression is a string since we need a Name for
// Runtime_DefineAccessorPropertyUnchecked and since we can determine
// this statically we can skip the extra runtime check.
@@ -2468,10 +2469,8 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
FunctionLiteralT value = impl()->ParseFunctionLiteral(
*name, scanner()->location(), kSkipFunctionNameCheck, kind,
- FLAG_harmony_function_tostring ? name_token_position
- : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ name_token_position, FunctionLiteral::kAccessorOrMethod,
+ language_mode(), nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
*property_kind =
is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER;
@@ -2484,7 +2483,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
impl()->SetFunctionNameFromPropertyName(result, *name, prefix);
return result;
}
- case PropertyKind::kSpreadProperty:
+ case ParsePropertyKind::kSpread:
ReportUnexpectedTokenAt(
Scanner::Location(name_token_position, name_expression->position()),
name_token);
@@ -2496,7 +2495,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
+ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info, int beg_pos,
bool is_static, bool* ok) {
DeclarationScope* initializer_scope = is_static
? class_info->static_fields_scope
@@ -2506,7 +2505,7 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
initializer_scope =
NewFunctionScope(FunctionKind::kClassFieldsInitializerFunction);
// TODO(gsathya): Make scopes be non contiguous.
- initializer_scope->set_start_position(scanner()->location().end_pos);
+ initializer_scope->set_start_position(beg_pos);
initializer_scope->SetLanguageMode(LanguageMode::kStrict);
}
@@ -2523,7 +2522,7 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
initializer = factory()->NewUndefinedLiteral(kNoSourcePosition);
}
- initializer_scope->set_end_position(scanner()->location().end_pos);
+ initializer_scope->set_end_position(end_position());
if (is_static) {
class_info->static_fields_scope = initializer_scope;
class_info->has_static_class_fields = true;
@@ -2541,26 +2540,23 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
bool* is_computed_name,
bool* is_rest_property,
bool* ok) {
- bool is_get = false;
- bool is_set = false;
- bool is_generator = false;
- bool is_async = false;
- PropertyKind kind = PropertyKind::kNotSet;
+ ParseFunctionFlags function_flags = ParseFunctionFlag::kIsNormal;
+ ParsePropertyKind kind = ParsePropertyKind::kNotSet;
IdentifierT name = impl()->NullIdentifier();
Token::Value name_token = peek();
- int next_beg_pos = scanner()->peek_location().beg_pos;
- int next_end_pos = scanner()->peek_location().end_pos;
+ int next_beg_pos = peek_position();
+ int next_end_pos = peek_end_position();
- ExpressionT name_expression = ParsePropertyName(
- &name, &kind, &is_generator, &is_get, &is_set, &is_async,
- is_computed_name, CHECK_OK_CUSTOM(NullLiteralProperty));
+ ExpressionT name_expression =
+ ParsePropertyName(&name, &kind, &function_flags, is_computed_name,
+ CHECK_OK_CUSTOM(NullLiteralProperty));
switch (kind) {
- case PropertyKind::kSpreadProperty:
- DCHECK(!is_get && !is_set && !is_generator && !is_async &&
- !*is_computed_name);
- DCHECK(name_token == Token::ELLIPSIS);
+ case ParsePropertyKind::kSpread:
+ DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
+ DCHECK(!*is_computed_name);
+ DCHECK_EQ(Token::ELLIPSIS, name_token);
*is_computed_name = true;
*is_rest_property = true;
@@ -2569,8 +2565,8 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
factory()->NewTheHoleLiteral(), name_expression,
ObjectLiteralProperty::SPREAD, true);
- case PropertyKind::kValueProperty: {
- DCHECK(!is_get && !is_set && !is_generator && !is_async);
+ case ParsePropertyKind::kValue: {
+ DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
if (!*is_computed_name) {
checker->CheckDuplicateProto(name_token);
@@ -2579,7 +2575,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
int beg_pos = peek_position();
ExpressionT value =
ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullLiteralProperty));
- CheckDestructuringElement(value, beg_pos, scanner()->location().end_pos);
+ CheckDestructuringElement(value, beg_pos, end_position());
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value, *is_computed_name);
@@ -2587,14 +2583,14 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
return result;
}
- case PropertyKind::kShorthandProperty: {
+ case ParsePropertyKind::kShorthand: {
// PropertyDefinition
// IdentifierReference
// CoverInitializedName
//
// CoverInitializedName
// IdentifierReference Initializer?
- DCHECK(!is_get && !is_set && !is_generator && !is_async);
+ DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
if (!Token::IsIdentifier(name_token, language_mode(),
this->is_generator(),
@@ -2642,7 +2638,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
kNoSourcePosition);
classifier()->RecordExpressionError(
- Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ Scanner::Location(next_beg_pos, end_position()),
MessageTemplate::kInvalidCoverInitializedName);
impl()->SetFunctionNameFromIdentifierRef(rhs, lhs);
@@ -2656,24 +2652,21 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
return result;
}
- case PropertyKind::kMethodProperty: {
- DCHECK(!is_get && !is_set);
-
+ case ParsePropertyKind::kMethod: {
// MethodDefinition
// PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
// '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
classifier()->RecordPatternError(
- Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ Scanner::Location(next_beg_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
- FunctionKind kind = MethodKindFor(is_generator, is_async);
+ FunctionKind kind = MethodKindFor(function_flags);
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ next_beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
+ nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value, ObjectLiteralProperty::COMPUTED,
@@ -2682,12 +2675,13 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
return result;
}
- case PropertyKind::kAccessorProperty: {
- DCHECK((is_get || is_set) && !(is_set && is_get) && !is_generator &&
- !is_async);
+ case ParsePropertyKind::kAccessorGetter:
+ case ParsePropertyKind::kAccessorSetter: {
+ DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
+ bool is_get = kind == ParsePropertyKind::kAccessorGetter;
classifier()->RecordPatternError(
- Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ Scanner::Location(next_beg_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
if (!*is_computed_name) {
@@ -2703,9 +2697,8 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
FunctionLiteralT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ next_beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
+ nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value,
@@ -2719,8 +2712,8 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
return result;
}
- case PropertyKind::kClassField:
- case PropertyKind::kNotSet:
+ case ParsePropertyKind::kClassField:
+ case ParsePropertyKind::kNotSet:
ReportUnexpectedToken(Next());
*ok = false;
return impl()->NullLiteralProperty();
@@ -2743,10 +2736,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
bool has_rest_property = false;
ObjectLiteralChecker checker(this);
- Expect(Token::LBRACE, CHECK_OK);
+ Consume(Token::LBRACE);
- while (peek() != Token::RBRACE) {
- FuncNameInferrer::State fni_state(fni_);
+ while (!Check(Token::RBRACE)) {
+ FuncNameInferrerState fni_state(&fni_);
bool is_computed_name = false;
bool is_rest_property = false;
@@ -2774,9 +2767,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
Expect(Token::COMMA, CHECK_OK);
}
- if (fni_ != nullptr) fni_->Infer();
+ fni_.Infer();
}
- Expect(Token::RBRACE, CHECK_OK);
// In pattern rewriter, we rewrite rest property to call out to a
// runtime function passing all the other properties as arguments to
@@ -2802,25 +2794,20 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
Scanner::Location spread_arg = Scanner::Location::invalid();
ExpressionListT result = impl()->NewExpressionList(4);
Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
- bool done = (peek() == Token::RPAREN);
- while (!done) {
+ while (peek() != Token::RPAREN) {
int start_pos = peek_position();
bool is_spread = Check(Token::ELLIPSIS);
int expr_pos = peek_position();
ExpressionT argument =
ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpressionList));
- if (!impl()->IsIdentifier(argument) &&
- is_simple_parameter_list != nullptr) {
- *is_simple_parameter_list = false;
- }
+ if (!impl()->IsIdentifier(argument)) *is_simple_parameter_list = false;
+
if (!maybe_arrow) {
ValidateExpression(CHECK_OK_CUSTOM(NullExpressionList));
}
if (is_spread) {
- if (is_simple_parameter_list != nullptr) {
- *is_simple_parameter_list = false;
- }
+ *is_simple_parameter_list = false;
if (!spread_arg.IsValid()) {
spread_arg.beg_pos = start_pos;
spread_arg.end_pos = peek_position();
@@ -2833,24 +2820,22 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
}
result->Add(argument, zone_);
- if (result->length() > Code::kMaxArguments) {
- ReportMessage(MessageTemplate::kTooManyArguments);
- *ok = false;
- return impl()->NullExpressionList();
- }
- done = (peek() != Token::COMMA);
- if (!done) {
- Next();
- if (argument->IsSpread()) {
- classifier()->RecordAsyncArrowFormalParametersError(
- scanner()->location(), MessageTemplate::kParamAfterRest);
- }
- if (peek() == Token::RPAREN) {
- // allow trailing comma
- done = true;
- }
+ if (peek() != Token::COMMA) break;
+
+ Next();
+
+ if (argument->IsSpread()) {
+ classifier()->RecordAsyncArrowFormalParametersError(
+ scanner()->location(), MessageTemplate::kParamAfterRest);
}
}
+
+ if (result->length() > Code::kMaxArguments) {
+ ReportMessage(MessageTemplate::kTooManyArguments);
+ *ok = false;
+ return impl()->NullExpressionList();
+ }
+
Scanner::Location location = scanner_->location();
if (Token::RPAREN != Next()) {
impl()->ReportMessageAt(location, MessageTemplate::kUnterminatedArgList);
@@ -2883,7 +2868,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
return ParseYieldExpression(accept_IN, ok);
}
- FuncNameInferrer::State fni_state(fni_);
+ FuncNameInferrerState fni_state(&fni_);
ExpressionClassifier arrow_formals_classifier(
this, classifier()->duplicate_finder());
@@ -2915,10 +2900,8 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
expression =
impl()->ExpressionFromIdentifier(name, position(), InferName::kNo);
- if (fni_) {
- // Remove `async` keyword from inferred name stack.
- fni_->RemoveAsyncKeywordFromEnd();
- }
+ // Remove `async` keyword from inferred name stack.
+ fni_.RemoveAsyncKeywordFromEnd();
}
if (peek() == Token::ARROW) {
@@ -2931,9 +2914,9 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// in an arrow parameter list, this is correct.
// TODO(adamk): Rename "FormalParameterInitializerError" to refer to
// "YieldExpression", which is its only use.
- ValidateFormalParameterInitializer(ok);
+ ValidateFormalParameterInitializer(CHECK_OK);
- Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
+ Scanner::Location loc(lhs_beg_pos, end_position());
DeclarationScope* scope =
NewFunctionScope(is_async ? FunctionKind::kAsyncArrowFunction
: FunctionKind::kArrowFunction);
@@ -2962,7 +2945,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
MessageTemplate::kUnexpectedToken,
Token::String(Token::ARROW));
- if (fni_ != nullptr) fni_->Infer();
+ fni_.Infer();
return expression;
}
@@ -2997,7 +2980,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
ValidateAssignmentPattern(CHECK_OK);
} else {
expression = CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, scanner()->location().end_pos,
+ expression, lhs_beg_pos, end_position(),
MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
}
@@ -3027,15 +3010,13 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
impl()->CheckAssigningFunctionLiteralToProperty(expression, right);
- if (fni_ != nullptr) {
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "a = function(){...}();"-like
- // expression.
- if (op == Token::ASSIGN && !right->IsCall() && !right->IsCallNew()) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
+ // Check if the right hand side is a call to avoid inferring a
+ // name if we're dealing with "a = function(){...}();"-like
+ // expression.
+ if (op == Token::ASSIGN && !right->IsCall() && !right->IsCallNew()) {
+ fni_.Infer();
+ } else {
+ fni_.RemoveLastFunction();
}
if (op == Token::ASSIGN) {
@@ -3124,11 +3105,20 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
// LogicalOrExpression
// LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
- SourceRange then_range, else_range;
int pos = peek_position();
// We start using the binary expression parser for prec >= 4 only!
ExpressionT expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
+ return peek() == Token::CONDITIONAL
+ ? ParseConditionalContinuation(expression, accept_IN, pos, ok)
+ : expression;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseConditionalContinuation(ExpressionT expression,
+ bool accept_IN, int pos,
+ bool* ok) {
+ SourceRange then_range, else_range;
ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
@@ -3184,10 +3174,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
right_range_scope.Finalize();
ValidateExpression(CHECK_OK);
- if (impl()->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos)) {
- continue;
- }
-
// For now we distinguish between comparisons and other binary
// operations. (We could combine the two and get rid of this
// code and AST node eventually.)
@@ -3204,9 +3190,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
// The comparison was negated - add a NOT.
x = factory()->NewUnaryOperation(Token::NOT, x, pos);
}
- } else if (impl()->CollapseNaryExpression(&x, y, op, pos, right_range)) {
- continue;
- } else {
+ } else if (!impl()->ShortcutNumericLiteralBinaryExpression(&x, y, op,
+ pos) &&
+ !impl()->CollapseNaryExpression(&x, y, op, pos, right_range)) {
// We have a "normal" binary operation.
x = factory()->NewBinaryOperation(op, x, y, pos);
if (op == Token::OR || op == Token::AND) {
@@ -3219,97 +3205,109 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryOpExpression(
bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
- // [+Await] AwaitExpression[?Yield]
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
-
- op = Next();
- int pos = position();
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
- // Assume "! function ..." indicates the function is likely to be called.
- if (op == Token::NOT && peek() == Token::FUNCTION) {
- function_state_->set_next_function_is_likely_called();
- }
+ Token::Value op = Next();
+ int pos = position();
- ExpressionT expression = ParseUnaryExpression(CHECK_OK);
- ValidateExpression(CHECK_OK);
+ // Assume "! function ..." indicates the function is likely to be called.
+ if (op == Token::NOT && peek() == Token::FUNCTION) {
+ function_state_->set_next_function_is_likely_called();
+ }
- if (op == Token::DELETE) {
- if (impl()->IsIdentifier(expression) && is_strict(language_mode())) {
- // "delete identifier" is a syntax error in strict mode.
- ReportMessage(MessageTemplate::kStrictDelete);
- *ok = false;
- return impl()->NullExpression();
- }
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+ ValidateExpression(CHECK_OK);
- if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
- ReportMessage(MessageTemplate::kDeletePrivateField);
- *ok = false;
- return impl()->NullExpression();
- }
+ if (op == Token::DELETE) {
+ if (impl()->IsIdentifier(expression) && is_strict(language_mode())) {
+ // "delete identifier" is a syntax error in strict mode.
+ ReportMessage(MessageTemplate::kStrictDelete);
+ *ok = false;
+ return impl()->NullExpression();
}
- if (peek() == Token::EXP) {
- ReportUnexpectedToken(Next());
+ if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
+ ReportMessage(MessageTemplate::kDeletePrivateField);
*ok = false;
return impl()->NullExpression();
}
+ }
- // Allow the parser's implementation to rewrite the expression.
- return impl()->BuildUnaryExpression(expression, op, pos);
- } else if (Token::IsCountOp(op)) {
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
- op = Next();
- int beg_pos = peek_position();
- ExpressionT expression = ParseUnaryExpression(CHECK_OK);
- expression = CheckAndRewriteReferenceExpression(
- expression, beg_pos, scanner()->location().end_pos,
- MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
- impl()->MarkExpressionAsAssigned(expression);
- ValidateExpression(CHECK_OK);
+ if (peek() == Token::EXP) {
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->NullExpression();
+ }
- return factory()->NewCountOperation(op,
- true /* prefix */,
- expression,
- position());
+ // Allow the parser's implementation to rewrite the expression.
+ return impl()->BuildUnaryExpression(expression, op, pos);
+}
- } else if (is_async_function() && peek() == Token::AWAIT) {
- classifier()->RecordFormalParameterInitializerError(
- scanner()->peek_location(),
- MessageTemplate::kAwaitExpressionFormalParameter);
- int await_pos = peek_position();
- Consume(Token::AWAIT);
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrefixExpression(
+ bool* ok) {
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
+ Token::Value op = Next();
+ int beg_pos = peek_position();
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+ expression = CheckAndRewriteReferenceExpression(
+ expression, beg_pos, end_position(),
+ MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
+ impl()->MarkExpressionAsAssigned(expression);
+ ValidateExpression(CHECK_OK);
- ExpressionT value = ParseUnaryExpression(CHECK_OK);
+ return factory()->NewCountOperation(op, true /* prefix */, expression,
+ position());
+}
- classifier()->RecordBindingPatternError(
- Scanner::Location(await_pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidDestructuringTarget);
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseAwaitExpression(
+ bool* ok) {
+ classifier()->RecordFormalParameterInitializerError(
+ scanner()->peek_location(),
+ MessageTemplate::kAwaitExpressionFormalParameter);
+ int await_pos = peek_position();
+ Consume(Token::AWAIT);
- ExpressionT expr = factory()->NewAwait(value, await_pos);
- function_state_->AddSuspend();
- impl()->RecordSuspendSourceRange(expr, PositionAfterSemicolon());
- return expr;
- } else {
- return ParsePostfixExpression(ok);
+ ExpressionT value = ParseUnaryExpression(CHECK_OK);
+
+ classifier()->RecordBindingPatternError(
+ Scanner::Location(await_pos, end_position()),
+ MessageTemplate::kInvalidDestructuringTarget);
+
+ ExpressionT expr = factory()->NewAwait(value, await_pos);
+ function_state_->AddSuspend();
+ impl()->RecordSuspendSourceRange(expr, PositionAfterSemicolon());
+ return expr;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
+ bool* ok) {
+ // UnaryExpression ::
+ // PostfixExpression
+ // 'delete' UnaryExpression
+ // 'void' UnaryExpression
+ // 'typeof' UnaryExpression
+ // '++' UnaryExpression
+ // '--' UnaryExpression
+ // '+' UnaryExpression
+ // '-' UnaryExpression
+ // '~' UnaryExpression
+ // '!' UnaryExpression
+ // [+Await] AwaitExpression[?Yield]
+
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) return ParseUnaryOpExpression(ok);
+ if (Token::IsCountOp(op)) return ParsePrefixExpression(ok);
+ if (is_async_function() && op == Token::AWAIT) {
+ return ParseAwaitExpression(ok);
}
+ return ParsePostfixExpression(ok);
}
template <typename Impl>
@@ -3325,7 +3323,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
ArrowFormalParametersUnexpectedToken();
expression = CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, scanner()->location().end_pos,
+ expression, lhs_beg_pos, end_position(),
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
impl()->MarkExpressionAsAssigned(expression);
ValidateExpression(CHECK_OK);
@@ -3398,9 +3396,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
args = ParseArguments(&spread_pos, true, &is_simple_parameter_list,
CHECK_OK);
if (peek() == Token::ARROW) {
- if (fni_) {
- fni_->RemoveAsyncKeywordFromEnd();
- }
+ fni_.RemoveAsyncKeywordFromEnd();
ValidateBindingPattern(CHECK_OK);
ValidateFormalParameterInitializer(CHECK_OK);
if (!classifier()->is_valid_async_arrow_formal_parameters()) {
@@ -3443,7 +3439,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
result = factory()->NewCall(result, args, pos, is_possibly_eval);
}
- if (fni_ != nullptr) fni_->RemoveLastFunction();
+ fni_.RemoveLastFunction();
break;
}
@@ -3475,8 +3471,8 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
- bool* ok) {
+ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression(bool* is_async,
+ bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
//
@@ -3496,49 +3492,52 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
// new new foo means new (new foo)
// new new foo() means new (new foo())
// new new foo().bar().baz means (new (new foo()).bar()).baz
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
+ Consume(Token::NEW);
+ int new_pos = position();
+ ExpressionT result;
+ if (peek() == Token::SUPER) {
+ const bool is_new = true;
+ result = ParseSuperExpression(is_new, CHECK_OK);
+ } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT &&
+ (!allow_harmony_import_meta() || PeekAhead() == Token::LPAREN)) {
+ impl()->ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kImportCallNotNewExpression);
+ *ok = false;
+ return impl()->NullExpression();
+ } else if (peek() == Token::PERIOD) {
+ *is_async = false;
+ result = ParseNewTargetExpression(CHECK_OK);
+ return ParseMemberExpressionContinuation(result, is_async, ok);
+ } else {
+ result = ParseMemberWithNewPrefixesExpression(is_async, CHECK_OK);
+ }
+ ValidateExpression(CHECK_OK);
+ if (peek() == Token::LPAREN) {
+ // NewExpression with arguments.
+ Scanner::Location spread_pos;
+ ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
- if (peek() == Token::NEW) {
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
- Consume(Token::NEW);
- int new_pos = position();
- ExpressionT result;
- if (peek() == Token::SUPER) {
- const bool is_new = true;
- result = ParseSuperExpression(is_new, CHECK_OK);
- } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT &&
- (!allow_harmony_import_meta() || PeekAhead() == Token::LPAREN)) {
- impl()->ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kImportCallNotNewExpression);
- *ok = false;
- return impl()->NullExpression();
- } else if (peek() == Token::PERIOD) {
- *is_async = false;
- result = ParseNewTargetExpression(CHECK_OK);
- return ParseMemberExpressionContinuation(result, is_async, CHECK_OK);
+ if (spread_pos.IsValid()) {
+ result = impl()->SpreadCallNew(result, args, new_pos);
} else {
- result = ParseMemberWithNewPrefixesExpression(is_async, CHECK_OK);
+ result = factory()->NewCallNew(result, args, new_pos);
}
- ValidateExpression(CHECK_OK);
- if (peek() == Token::LPAREN) {
- // NewExpression with arguments.
- Scanner::Location spread_pos;
- ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
-
- if (spread_pos.IsValid()) {
- result = impl()->SpreadCallNew(result, args, new_pos);
- } else {
- result = factory()->NewCallNew(result, args, new_pos);
- }
- // The expression can still continue with . or [ after the arguments.
- result = ParseMemberExpressionContinuation(result, is_async, CHECK_OK);
- return result;
- }
- // NewExpression without arguments.
- return factory()->NewCallNew(result, impl()->NewExpressionList(0), new_pos);
+ // The expression can still continue with . or [ after the arguments.
+ return ParseMemberExpressionContinuation(result, is_async, ok);
}
- // No 'new' or 'super' keyword.
- return ParseMemberExpression(is_async, ok);
+ // NewExpression without arguments.
+ return factory()->NewCallNew(result, impl()->NewExpressionList(0), new_pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
+ bool* ok) {
+ return peek() == Token::NEW
+ ? ParseMemberWithPresentNewPrefixesExpression(is_async, ok)
+ : ParseMemberExpression(is_async, ok);
}
template <typename Impl>
@@ -3604,8 +3603,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
result = ParsePrimaryExpression(is_async, CHECK_OK);
}
- result = ParseMemberExpressionContinuation(result, is_async, CHECK_OK);
- return result;
+ return ParseMemberExpressionContinuation(result, is_async, ok);
}
template <typename Impl>
@@ -3678,9 +3676,9 @@ void ParserBase<Impl>::ExpectMetaProperty(Token::Value property_name,
Consume(Token::PERIOD);
ExpectContextualKeyword(property_name, CHECK_OK_CUSTOM(Void));
if (scanner()->literal_contains_escapes()) {
- impl()->ReportMessageAt(
- Scanner::Location(pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidEscapedMetaProperty, full_name);
+ impl()->ReportMessageAt(Scanner::Location(pos, end_position()),
+ MessageTemplate::kInvalidEscapedMetaProperty,
+ full_name);
*ok = false;
}
}
@@ -3692,7 +3690,7 @@ ParserBase<Impl>::ParseNewTargetExpression(bool* ok) {
ExpectMetaProperty(Token::TARGET, "new.target", pos, CHECK_OK);
classifier()->RecordAssignmentPatternError(
- Scanner::Location(pos, scanner()->location().end_pos),
+ Scanner::Location(pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
if (!GetReceiverScope()->is_function_scope()) {
@@ -3780,14 +3778,11 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
// BindingElement[?Yield, ?GeneratorParameter]
bool is_rest = parameters->has_rest;
- FuncNameInferrer::State fni_state(fni_);
- ExpressionT pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(Void));
- ValidateBindingPattern(CHECK_OK_CUSTOM(Void));
-
+ FuncNameInferrerState fni_state(&fni_);
+ ExpressionT pattern = ParseBindingPattern(CHECK_OK_CUSTOM(Void));
if (!impl()->IsIdentifier(pattern)) {
parameters->is_simple = false;
ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
- classifier()->RecordNonSimpleParameter();
}
ExpressionT initializer = impl()->NullExpression();
@@ -3808,8 +3803,8 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
impl()->SetFunctionNameFromIdentifierRef(initializer, pattern);
}
- impl()->AddFormalParameter(parameters, pattern, initializer,
- scanner()->location().end_pos, is_rest);
+ impl()->AddFormalParameter(parameters, pattern, initializer, end_position(),
+ is_rest);
}
template <typename Impl>
@@ -3908,23 +3903,21 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
int bindings_start = peek_position();
do {
// Parse binding pattern.
- FuncNameInferrer::State fni_state(fni_);
+ FuncNameInferrerState fni_state(&fni_);
ExpressionT pattern = impl()->NullExpression();
int decl_pos = peek_position();
{
ExpressionClassifier pattern_classifier(this);
- pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(NullStatement));
+ pattern = ParseBindingPattern(CHECK_OK_CUSTOM(NullStatement));
- ValidateBindingPattern(CHECK_OK_CUSTOM(NullStatement));
if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
ValidateLetPattern(CHECK_OK_CUSTOM(NullStatement));
}
}
-
Scanner::Location variable_loc = scanner()->location();
- bool single_name = impl()->IsIdentifier(pattern);
+ bool single_name = impl()->IsIdentifier(pattern);
if (single_name) {
impl()->PushVariableName(impl()->AsIdentifier(pattern));
}
@@ -3939,32 +3932,32 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
value = ParseAssignmentExpression(var_context != kForStatement,
CHECK_OK_CUSTOM(NullStatement));
ValidateExpression(CHECK_OK_CUSTOM(NullStatement));
- variable_loc.end_pos = scanner()->location().end_pos;
+ variable_loc.end_pos = end_position();
if (!parsing_result->first_initializer_loc.IsValid()) {
parsing_result->first_initializer_loc = variable_loc;
}
// Don't infer if it is "a = function(){...}();"-like expression.
- if (single_name && fni_ != nullptr) {
+ if (single_name) {
if (!value->IsCall() && !value->IsCallNew()) {
- fni_->Infer();
+ fni_.Infer();
} else {
- fni_->RemoveLastFunction();
+ fni_.RemoveLastFunction();
}
}
impl()->SetFunctionNameFromIdentifierRef(value, pattern);
// End position of the initializer is after the assignment expression.
- initializer_position = scanner()->location().end_pos;
+ initializer_position = end_position();
} else {
if (var_context != kForStatement || !PeekInOrOf()) {
// ES6 'const' and binding patterns require initializers.
if (parsing_result->descriptor.mode == VariableMode::kConst ||
!impl()->IsIdentifier(pattern)) {
impl()->ReportMessageAt(
- Scanner::Location(decl_pos, scanner()->location().end_pos),
+ Scanner::Location(decl_pos, end_position()),
MessageTemplate::kDeclarationMissingInitializer,
!impl()->IsIdentifier(pattern) ? "destructuring" : "const");
*ok = false;
@@ -3998,7 +3991,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
} while (Check(Token::COMMA));
parsing_result->bindings_loc =
- Scanner::Location(bindings_start, scanner()->location().end_pos);
+ Scanner::Location(bindings_start, end_position());
DCHECK(*ok);
return init_block;
@@ -4009,7 +4002,7 @@ typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseFunctionDeclaration(bool* ok) {
Consume(Token::FUNCTION);
int pos = position();
- ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ ParseFunctionFlags flags = ParseFunctionFlag::kIsNormal;
if (Check(Token::MUL)) {
impl()->ReportMessageAt(
scanner()->location(),
@@ -4026,9 +4019,9 @@ ParserBase<Impl>::ParseHoistableDeclaration(
ZonePtrList<const AstRawString>* names, bool default_export, bool* ok) {
Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
int pos = position();
- ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ ParseFunctionFlags flags = ParseFunctionFlag::kIsNormal;
if (Check(Token::MUL)) {
- flags |= ParseFunctionFlags::kIsGenerator;
+ flags |= ParseFunctionFlag::kIsGenerator;
}
return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
}
@@ -4049,13 +4042,12 @@ ParserBase<Impl>::ParseHoistableDeclaration(
//
// 'function' and '*' (if present) have been consumed by the caller.
- bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
- const bool is_async = flags & ParseFunctionFlags::kIsAsync;
- DCHECK(!is_generator || !is_async);
+ DCHECK_IMPLIES((flags & ParseFunctionFlag::kIsAsync) != 0,
+ (flags & ParseFunctionFlag::kIsGenerator) == 0);
- if (is_async && Check(Token::MUL)) {
+ if ((flags & ParseFunctionFlag::kIsAsync) != 0 && Check(Token::MUL)) {
// Async generator
- is_generator = true;
+ flags |= ParseFunctionFlag::kIsGenerator;
}
IdentifierT name;
@@ -4074,10 +4066,10 @@ ParserBase<Impl>::ParseHoistableDeclaration(
variable_name = name;
}
- FuncNameInferrer::State fni_state(fni_);
+ FuncNameInferrerState fni_state(&fni_);
impl()->PushEnclosingName(name);
- FunctionKind kind = FunctionKindFor(is_generator, is_async);
+ FunctionKind kind = FunctionKindFor(flags);
FunctionLiteralT function = impl()->ParseFunctionLiteral(
name, scanner()->location(), name_validity, kind, pos,
@@ -4097,7 +4089,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
// a flag and UseCounting violations to assess web compatibility.
bool is_sloppy_block_function = is_sloppy(language_mode()) &&
!scope()->is_declaration_scope() &&
- !is_async && !is_generator;
+ flags == ParseFunctionFlag::kIsNormal;
return impl()->DeclareFunction(variable_name, function, mode, pos,
is_sloppy_block_function, names, ok);
@@ -4187,7 +4179,7 @@ ParserBase<Impl>::ParseAsyncFunctionDeclaration(
return impl()->NullStatement();
}
Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
- ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
+ ParseFunctionFlags flags = ParseFunctionFlag::kIsAsync;
return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
}
@@ -4195,7 +4187,8 @@ template <typename Impl>
void ParserBase<Impl>::ParseFunctionBody(
typename ParserBase<Impl>::StatementListT result, IdentifierT function_name,
int pos, const FormalParametersT& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
+ FunctionLiteral::FunctionType function_type, FunctionBodyType body_type,
+ bool accept_IN, bool* ok) {
DeclarationScope* function_scope = scope()->AsDeclarationScope();
DeclarationScope* inner_scope = function_scope;
BlockT inner_block = impl()->NullStatement();
@@ -4209,35 +4202,56 @@ void ParserBase<Impl>::ParseFunctionBody(
body = inner_block->statements();
}
- // If we are parsing the source as if it is wrapped in a function, the source
- // ends without a closing brace.
- Token::Value closing_token =
- function_type == FunctionLiteral::kWrapped ? Token::EOS : Token::RBRACE;
-
{
BlockState block_state(&scope_, inner_scope);
if (IsResumableFunction(kind)) impl()->PrepareGeneratorVariables();
- if (IsAsyncGeneratorFunction(kind)) {
- impl()->ParseAndRewriteAsyncGeneratorFunctionBody(pos, kind, body, ok);
- } else if (IsGeneratorFunction(kind)) {
- impl()->ParseAndRewriteGeneratorFunctionBody(pos, kind, body, ok);
- } else if (IsAsyncFunction(kind)) {
- ParseAsyncFunctionBody(inner_scope, body, CHECK_OK_VOID);
+ if (body_type == FunctionBodyType::kExpression) {
+ ExpressionClassifier classifier(this);
+ ExpressionT expression =
+ ParseAssignmentExpression(accept_IN, CHECK_OK_VOID);
+ ValidateExpression(CHECK_OK_VOID);
+
+ if (IsAsyncFunction(kind)) {
+ BlockT block = factory()->NewBlock(1, true);
+ impl()->RewriteAsyncFunctionBody(body, block, expression,
+ CHECK_OK_VOID);
+ } else {
+ body->Add(BuildReturnStatement(expression, expression->position()),
+ zone());
+ }
} else {
- ParseStatementList(body, closing_token, CHECK_OK_VOID);
- }
+ DCHECK(accept_IN);
+ DCHECK_EQ(FunctionBodyType::kBlock, body_type);
+ // If we are parsing the source as if it is wrapped in a function, the
+ // source ends without a closing brace.
+ Token::Value closing_token = function_type == FunctionLiteral::kWrapped
+ ? Token::EOS
+ : Token::RBRACE;
+
+ if (IsAsyncGeneratorFunction(kind)) {
+ impl()->ParseAndRewriteAsyncGeneratorFunctionBody(pos, kind, body,
+ CHECK_OK_VOID);
+ } else if (IsGeneratorFunction(kind)) {
+ impl()->ParseAndRewriteGeneratorFunctionBody(pos, kind, body,
+ CHECK_OK_VOID);
+ } else if (IsAsyncFunction(kind)) {
+ ParseAsyncFunctionBody(inner_scope, body, CHECK_OK_VOID);
+ } else {
+ ParseStatementList(body, closing_token, CHECK_OK_VOID);
+ }
- if (IsDerivedConstructor(kind)) {
- body->Add(factory()->NewReturnStatement(impl()->ThisExpression(),
- kNoSourcePosition),
- zone());
+ if (IsDerivedConstructor(kind)) {
+ body->Add(factory()->NewReturnStatement(impl()->ThisExpression(),
+ kNoSourcePosition),
+ zone());
+ }
+ Expect(closing_token, CHECK_OK_VOID);
}
}
- Expect(closing_token, CHECK_OK_VOID);
- scope()->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(end_position());
if (!parameters.is_simple) {
DCHECK_NOT_NULL(inner_scope);
@@ -4256,7 +4270,7 @@ void ParserBase<Impl>::ParseFunctionBody(
init_block = impl()->BuildRejectPromiseOnException(init_block);
}
- inner_scope->set_end_position(scanner()->location().end_pos);
+ inner_scope->set_end_position(end_position());
if (inner_scope->FinalizeBlockScope() != nullptr) {
impl()->CheckConflictingVarDeclarations(inner_scope, CHECK_OK_VOID);
impl()->InsertShadowingVarBindingInitializers(inner_block);
@@ -4315,7 +4329,7 @@ void ParserBase<Impl>::CheckArityRestrictions(int param_count,
template <typename Impl>
bool ParserBase<Impl>::IsNextLetKeyword() {
- DCHECK(peek() == Token::LET);
+ DCHECK_EQ(Token::LET, peek());
Token::Value next_next = PeekAhead();
switch (next_next) {
case Token::LBRACE:
@@ -4340,17 +4354,13 @@ bool ParserBase<Impl>::IsNextLetKeyword() {
template <typename Impl>
bool ParserBase<Impl>::IsTrivialExpression() {
- Token::Value peek_token = peek();
- if (peek_token == Token::SMI || peek_token == Token::NUMBER ||
- peek_token == Token::BIGINT || peek_token == Token::NULL_LITERAL ||
- peek_token == Token::TRUE_LITERAL || peek_token == Token::FALSE_LITERAL ||
- peek_token == Token::STRING || peek_token == Token::IDENTIFIER ||
- peek_token == Token::THIS) {
- // PeekAhead() is expensive & may not always be called, so we only call it
- // after checking peek().
+ if (Token::IsTrivialExpressionToken(peek())) {
+ // PeekAhead() may not always be called, so we only call it after checking
+ // peek().
Token::Value peek_ahead = PeekAhead();
if (peek_ahead == Token::COMMA || peek_ahead == Token::RPAREN ||
- peek_ahead == Token::SEMICOLON || peek_ahead == Token::RBRACK) {
+ peek_ahead == Token::SEMICOLON || peek_ahead == Token::RBRACK ||
+ Token::IsAssignmentOp(peek_ahead)) {
return true;
}
}
@@ -4421,39 +4431,52 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// parameters.
int dummy_num_parameters = -1;
DCHECK_NE(kind & FunctionKind::kArrowFunction, 0);
- LazyParsingResult result = impl()->SkipFunction(
+ FunctionLiteral::EagerCompileHint hint;
+ bool did_preparse_successfully = impl()->SkipFunction(
nullptr, kind, FunctionLiteral::kAnonymousExpression,
formal_parameters.scope, &dummy_num_parameters,
- &produced_preparsed_scope_data, false, false, CHECK_OK);
- DCHECK_NE(result, kLazyParsingAborted);
+ &produced_preparsed_scope_data, false, false, &hint, CHECK_OK);
+
DCHECK_NULL(produced_preparsed_scope_data);
- USE(result);
- formal_parameters.scope->ResetAfterPreparsing(ast_value_factory_,
- false);
- // Discard any queued destructuring assignments which appeared
- // in this function's parameter list, and which were adopted
- // into this function state, above.
- function_state.RewindDestructuringAssignments(0);
+
+ if (did_preparse_successfully) {
+ // Discard any queued destructuring assignments which appeared
+ // in this function's parameter list, and which were adopted
+ // into this function state, above.
+ function_state.RewindDestructuringAssignments(0);
+ } else {
+ // In case we did not sucessfully preparse the function because of an
+ // unidentified error we do a full reparse to return the error.
+ Consume(Token::LBRACE);
+ body = impl()->NewStatementList(8);
+ ParseFunctionBody(body, impl()->NullIdentifier(), kNoSourcePosition,
+ formal_parameters, kind,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionBodyType::kBlock, true, ok);
+ CHECK(!*ok);
+ return impl()->NullExpression();
+ }
} else {
Consume(Token::LBRACE);
body = impl()->NewStatementList(8);
ParseFunctionBody(body, impl()->NullIdentifier(), kNoSourcePosition,
formal_parameters, kind,
- FunctionLiteral::kAnonymousExpression, CHECK_OK);
+ FunctionLiteral::kAnonymousExpression,
+ FunctionBodyType::kBlock, true, CHECK_OK);
expected_property_count = function_state.expected_property_count();
}
} else {
// Single-expression body
has_braces = false;
- const bool is_async = IsAsyncFunction(kind);
body = impl()->NewStatementList(1);
- impl()->AddParameterInitializationBlock(formal_parameters, body, is_async,
- CHECK_OK);
- ParseSingleExpressionFunctionBody(body, is_async, accept_IN, CHECK_OK);
+ ParseFunctionBody(body, impl()->NullIdentifier(), kNoSourcePosition,
+ formal_parameters, kind,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionBodyType::kExpression, accept_IN, CHECK_OK);
expected_property_count = function_state.expected_property_count();
}
- formal_parameters.scope->set_end_position(scanner()->location().end_pos);
+ formal_parameters.scope->set_end_position(end_position());
// Arrow function formal parameters are parsed as StrictFormalParameterList,
// which is not the same as "parameters of a strict function"; it only means
@@ -4466,7 +4489,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// Validate strict mode.
if (is_strict(language_mode())) {
CheckStrictOctalLiteral(formal_parameters.scope->start_position(),
- scanner()->location().end_pos, CHECK_OK);
+ end_position(), CHECK_OK);
}
impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
@@ -4532,9 +4555,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
class_info.is_anonymous = is_anonymous;
impl()->DeclareClassVariable(name, &class_info, class_token_pos, CHECK_OK);
- scope()->set_start_position(scanner()->location().end_pos);
+ scope()->set_start_position(end_position());
if (Check(Token::EXTENDS)) {
- FuncNameInferrer::State fni_state(fni_);
+ FuncNameInferrerState fni_state(&fni_);
ExpressionClassifier extends_classifier(this);
class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
ValidateExpression(CHECK_OK);
@@ -4548,7 +4571,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
const bool has_extends = !impl()->IsNull(class_info.extends);
while (peek() != Token::RBRACE) {
if (Check(Token::SEMICOLON)) continue;
- FuncNameInferrer::State fni_state(fni_);
+ FuncNameInferrerState fni_state(&fni_);
bool is_computed_name = false; // Classes do not care about computed
// property names here.
bool is_static;
@@ -4580,32 +4603,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
Expect(Token::RBRACE, CHECK_OK);
- int end_pos = scanner()->location().end_pos;
+ int end_pos = end_position();
block_scope->set_end_position(end_pos);
return impl()->RewriteClassLiteral(block_scope, name, &class_info,
class_token_pos, end_pos, ok);
}
template <typename Impl>
-void ParserBase<Impl>::ParseSingleExpressionFunctionBody(StatementListT body,
- bool is_async,
- bool accept_IN,
- bool* ok) {
- if (is_async) impl()->PrepareGeneratorVariables();
-
- ExpressionClassifier classifier(this);
- ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK_VOID);
- ValidateExpression(CHECK_OK_VOID);
-
- if (is_async) {
- BlockT block = factory()->NewBlock(1, true);
- impl()->RewriteAsyncFunctionBody(body, block, expression, CHECK_OK_VOID);
- } else {
- body->Add(BuildReturnStatement(expression, expression->position()), zone());
- }
-}
-
-template <typename Impl>
void ParserBase<Impl>::ParseAsyncFunctionBody(Scope* scope, StatementListT body,
bool* ok) {
BlockT block = factory()->NewBlock(8, true);
@@ -4614,7 +4618,7 @@ void ParserBase<Impl>::ParseAsyncFunctionBody(Scope* scope, StatementListT body,
impl()->RewriteAsyncFunctionBody(
body, block, factory()->NewUndefinedLiteral(kNoSourcePosition),
CHECK_OK_VOID);
- scope->set_end_position(scanner()->location().end_pos);
+ scope->set_end_position(end_position());
}
template <typename Impl>
@@ -4633,9 +4637,9 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
IdentifierT name = impl()->NullIdentifier();
FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
- bool is_generator = Check(Token::MUL);
- const bool kIsAsync = true;
- const FunctionKind kind = FunctionKindFor(is_generator, kIsAsync);
+ ParseFunctionFlags flags = ParseFunctionFlag::kIsAsync;
+ if (Check(Token::MUL)) flags |= ParseFunctionFlag::kIsGenerator;
+ const FunctionKind kind = FunctionKindFor(flags);
if (impl()->ParsingDynamicFunctionDeclaration()) {
// We don't want dynamic functions to actually declare their name
@@ -4659,7 +4663,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
name, scanner()->location(),
is_strict_reserved ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
- kind, pos, type, language_mode(), nullptr, CHECK_OK);
+ kind, pos, type, language_mode(), nullptr, ok);
}
template <typename Impl>
@@ -5112,6 +5116,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
Expect(Token::LBRACE, CHECK_OK_CUSTOM(NullStatement));
{
BlockState block_state(zone(), &scope_);
+ // Scope starts before opening brace.
scope()->set_start_position(scanner()->location().beg_pos);
typename Types::Target target(this, body);
@@ -5123,9 +5128,10 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
}
Expect(Token::RBRACE, CHECK_OK_CUSTOM(NullStatement));
- int end_pos = scanner()->location().end_pos;
- scope()->set_end_position(end_pos);
- impl()->RecordBlockSourceRange(body, end_pos);
+ // Scope ends after closing brace.
+ scope()->set_end_position(scanner()->location().end_pos);
+ // Coverage range uses position before closing brace.
+ impl()->RecordBlockSourceRange(body, scanner()->location().beg_pos);
body->set_scope(scope()->FinalizeBlockScope());
}
return body;
@@ -5144,7 +5150,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
BlockT block = factory()->NewBlock(1, false);
StatementT body = ParseFunctionDeclaration(CHECK_OK);
block->statements()->Add(body, zone());
- scope()->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(end_position());
block->set_scope(scope()->FinalizeBlockScope());
return block;
}
@@ -5172,7 +5178,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseVariableStatement(
DeclarationParsingResult parsing_result;
StatementT result =
ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
+ ExpectSemicolon(ok);
return result;
}
@@ -5234,7 +5240,7 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
}
bool starts_with_identifier = peek_any_identifier();
- ExpressionT expr = ParseExpression(true, CHECK_OK);
+ ExpressionT expr = ParseExpression(CHECK_OK);
if (peek() == Token::COLON && starts_with_identifier &&
impl()->IsIdentifier(expr)) {
// The whole expression was a single identifier, and not, e.g.,
@@ -5273,7 +5279,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
int pos = peek_position();
Expect(Token::IF, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- ExpressionT condition = ParseExpression(true, CHECK_OK);
+ ExpressionT condition = ParseExpression(CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
SourceRange then_range, else_range;
@@ -5285,9 +5291,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
StatementT else_statement = impl()->NullStatement();
if (Check(Token::ELSE)) {
- else_range = SourceRange::ContinuationOf(then_range);
else_statement = ParseScopedStatement(labels, CHECK_OK);
- else_range.end = scanner_->location().end_pos;
+ else_range = SourceRange::ContinuationOf(then_range, end_position());
} else {
else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
}
@@ -5330,7 +5335,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
}
ExpectSemicolon(CHECK_OK);
StatementT stmt = factory()->NewContinueStatement(target, pos);
- impl()->RecordJumpStatementSourceRange(stmt, scanner_->location().end_pos);
+ impl()->RecordJumpStatementSourceRange(stmt, end_position());
return stmt;
}
@@ -5369,7 +5374,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
}
ExpectSemicolon(CHECK_OK);
StatementT stmt = factory()->NewBreakStatement(target, pos);
- impl()->RecordJumpStatementSourceRange(stmt, scanner_->location().end_pos);
+ impl()->RecordJumpStatementSourceRange(stmt, end_position());
return stmt;
}
@@ -5404,14 +5409,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
return_value = impl()->ThisExpression(loc.beg_pos);
}
} else {
- return_value = ParseExpression(true, CHECK_OK);
+ return_value = ParseExpression(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
return_value = impl()->RewriteReturn(return_value, loc.beg_pos);
- int continuation_pos = scanner_->location().end_pos;
+ int continuation_pos = end_position();
StatementT stmt =
BuildReturnStatement(return_value, loc.beg_pos, continuation_pos);
- impl()->RecordJumpStatementSourceRange(stmt, scanner_->location().end_pos);
+ impl()->RecordJumpStatementSourceRange(stmt, end_position());
return stmt;
}
@@ -5431,7 +5436,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
}
Expect(Token::LPAREN, CHECK_OK);
- ExpressionT expr = ParseExpression(true, CHECK_OK);
+ ExpressionT expr = ParseExpression(CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
Scope* with_scope = NewScope(WITH_SCOPE);
@@ -5440,7 +5445,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
body = ParseStatement(labels, nullptr, CHECK_OK);
- with_scope->set_end_position(scanner()->location().end_pos);
+ with_scope->set_end_position(end_position());
}
return factory()->NewWithStatement(with_scope, expr, body, pos);
}
@@ -5467,7 +5472,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- ExpressionT cond = ParseExpression(true, CHECK_OK);
+ ExpressionT cond = ParseExpression(CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
// Allow do-statements to be terminated with and without
@@ -5497,7 +5502,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- ExpressionT cond = ParseExpression(true, CHECK_OK);
+ ExpressionT cond = ParseExpression(CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
{
SourceRangeScope range_scope(scanner(), &body_range);
@@ -5523,11 +5528,11 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
*ok = false;
return impl()->NullStatement();
}
- ExpressionT exception = ParseExpression(true, CHECK_OK);
+ ExpressionT exception = ParseExpression(CHECK_OK);
ExpectSemicolon(CHECK_OK);
StatementT stmt = impl()->NewThrowStatement(exception, pos);
- impl()->RecordThrowSourceRange(stmt, scanner_->location().end_pos);
+ impl()->RecordThrowSourceRange(stmt, end_position());
return stmt;
}
@@ -5545,7 +5550,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
Expect(Token::SWITCH, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- ExpressionT tag = ParseExpression(true, CHECK_OK);
+ ExpressionT tag = ParseExpression(CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
auto switch_statement =
@@ -5565,7 +5570,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
SourceRange clause_range;
SourceRangeScope range_scope(scanner(), &clause_range);
if (Check(Token::CASE)) {
- label = ParseExpression(true, CHECK_OK);
+ label = ParseExpression(CHECK_OK);
} else {
Expect(Token::DEFAULT, CHECK_OK);
if (default_seen) {
@@ -5588,9 +5593,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
}
Expect(Token::RBRACE, CHECK_OK);
- int end_position = scanner()->location().end_pos;
- scope()->set_end_position(end_position);
- impl()->RecordSwitchStatementSourceRange(switch_statement, end_position);
+ int end_pos = end_position();
+ scope()->set_end_position(end_pos);
+ impl()->RecordSwitchStatementSourceRange(switch_statement, end_pos);
Scope* switch_scope = scope()->FinalizeBlockScope();
if (switch_scope != nullptr) {
return impl()->RewriteSwitchStatement(switch_statement, switch_scope);
@@ -5659,8 +5664,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
} else {
ExpressionClassifier pattern_classifier(this);
- catch_info.pattern = ParsePrimaryExpression(CHECK_OK);
- ValidateBindingPattern(CHECK_OK);
+ catch_info.pattern = ParseBindingPattern(CHECK_OK);
}
Expect(Token::RPAREN, CHECK_OK);
@@ -5672,12 +5676,12 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
catch_block->statements()->Add(catch_info.inner_block, zone());
impl()->ValidateCatchBlock(catch_info, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(end_position());
catch_block->set_scope(scope()->FinalizeBlockScope());
}
}
- catch_info.scope->set_end_position(scanner()->location().end_pos);
+ catch_info.scope->set_end_position(end_position());
} else {
catch_block = ParseBlock(nullptr, CHECK_OK);
}
@@ -5777,7 +5781,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
int lhs_beg_pos = peek_position();
ExpressionClassifier classifier(this);
ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
- int lhs_end_pos = scanner()->location().end_pos;
+ int lhs_end_pos = end_position();
bool is_for_each = CheckInOrOf(&for_info.mode);
bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
@@ -5853,7 +5857,7 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
enumerable = ParseAssignmentExpression(true, CHECK_OK);
ValidateExpression(CHECK_OK);
} else {
- enumerable = ParseExpression(true, CHECK_OK);
+ enumerable = ParseExpression(CHECK_OK);
}
Expect(Token::RPAREN, CHECK_OK);
@@ -5861,7 +5865,7 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
Scope* for_scope = nullptr;
if (inner_block_scope != nullptr) {
for_scope = inner_block_scope->outer_scope();
- DCHECK(for_scope == scope());
+ DCHECK_EQ(for_scope, scope());
inner_block_scope->set_start_position(scanner()->location().beg_pos);
}
@@ -5882,7 +5886,7 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
body_block->statements()->Add(body, zone());
if (inner_block_scope != nullptr) {
- inner_block_scope->set_end_position(scanner()->location().end_pos);
+ inner_block_scope->set_end_position(end_position());
body_block->set_scope(inner_block_scope->FinalizeBlockScope());
}
}
@@ -5890,10 +5894,11 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
StatementT final_loop = impl()->InitializeForEachStatement(
loop, each_variable, enumerable, body_block);
- init_block = impl()->CreateForEachStatementTDZ(init_block, *for_info, ok);
+ init_block =
+ impl()->CreateForEachStatementTDZ(init_block, *for_info, CHECK_OK);
if (for_scope != nullptr) {
- for_scope->set_end_position(scanner()->location().end_pos);
+ for_scope->set_end_position(end_position());
for_scope = for_scope->FinalizeBlockScope();
}
@@ -5931,7 +5936,7 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
enumerable = ParseAssignmentExpression(true, CHECK_OK);
ValidateExpression(CHECK_OK);
} else {
- enumerable = ParseExpression(true, CHECK_OK);
+ enumerable = ParseExpression(CHECK_OK);
}
Expect(Token::RPAREN, CHECK_OK);
@@ -5965,10 +5970,10 @@ ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
scope()->set_start_position(scanner()->location().beg_pos);
loop = ParseStandardForLoop(stmt_pos, labels, own_labels, &cond, &next,
&body, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(end_position());
}
- scope()->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(end_position());
if (for_info->bound_names.length() > 0 &&
function_state_->contains_function_or_eval()) {
scope()->set_is_hidden();
@@ -6014,12 +6019,12 @@ typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
typename Types::Target target(this, loop);
if (peek() != Token::SEMICOLON) {
- *cond = ParseExpression(true, CHECK_OK);
+ *cond = ParseExpression(CHECK_OK);
}
Expect(Token::SEMICOLON, CHECK_OK);
if (peek() != Token::RPAREN) {
- ExpressionT exp = ParseExpression(true, CHECK_OK);
+ ExpressionT exp = ParseExpression(CHECK_OK);
*next = factory()->NewExpressionStatement(exp, exp->position());
}
Expect(Token::RPAREN, CHECK_OK);
@@ -6114,7 +6119,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
BlockState inner_state(&scope_, inner_block_scope);
ExpressionClassifier classifier(this);
ExpressionT lhs = each_variable = ParseLeftHandSideExpression(CHECK_OK);
- int lhs_end_pos = scanner()->location().end_pos;
+ int lhs_end_pos = end_position();
if (lhs->IsArrayLiteral() || lhs->IsObjectLiteral()) {
ValidateAssignmentPattern(CHECK_OK);
@@ -6149,7 +6154,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
SourceRangeScope range_scope(scanner(), &body_range);
body = ParseStatement(nullptr, nullptr, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(end_position());
impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
if (has_declarations) {
@@ -6177,10 +6182,10 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
return final_loop;
}
- BlockT init_block =
- impl()->CreateForEachStatementTDZ(impl()->NullStatement(), for_info, ok);
+ BlockT init_block = impl()->CreateForEachStatementTDZ(impl()->NullStatement(),
+ for_info, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(end_position());
Scope* for_scope = scope()->FinalizeBlockScope();
// Parsed for-in loop w/ variable declarations.
if (!impl()->IsNull(init_block)) {
@@ -6209,10 +6214,9 @@ void ParserBase<Impl>::ObjectLiteralChecker::CheckDuplicateProto(
template <typename Impl>
void ParserBase<Impl>::ClassLiteralChecker::CheckClassMethodName(
- Token::Value property, PropertyKind type, bool is_generator, bool is_async,
+ Token::Value property, ParsePropertyKind type, ParseFunctionFlags flags,
bool is_static, bool* ok) {
- DCHECK(type == PropertyKind::kMethodProperty ||
- type == PropertyKind::kAccessorProperty);
+ DCHECK(type == ParsePropertyKind::kMethod || IsAccessor(type));
if (property == Token::SMI || property == Token::NUMBER) return;
@@ -6223,11 +6227,13 @@ void ParserBase<Impl>::ClassLiteralChecker::CheckClassMethodName(
return;
}
} else if (IsConstructor()) {
- if (is_generator || is_async || type == PropertyKind::kAccessorProperty) {
+ if (flags != ParseFunctionFlag::kIsNormal || IsAccessor(type)) {
MessageTemplate::Template msg =
- is_generator ? MessageTemplate::kConstructorIsGenerator
- : is_async ? MessageTemplate::kConstructorIsAsync
- : MessageTemplate::kConstructorIsAccessor;
+ (flags & ParseFunctionFlag::kIsGenerator) != 0
+ ? MessageTemplate::kConstructorIsGenerator
+ : (flags & ParseFunctionFlag::kIsAsync) != 0
+ ? MessageTemplate::kConstructorIsAsync
+ : MessageTemplate::kConstructorIsAccessor;
this->parser()->ReportMessage(msg);
*ok = false;
return;
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 41ff551091..6d0d9fff21 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -29,56 +29,6 @@
namespace v8 {
namespace internal {
-
-
-// Helper for putting parts of the parse results into a temporary zone when
-// parsing inner function bodies.
-class DiscardableZoneScope {
- public:
- DiscardableZoneScope(Parser* parser, Zone* temp_zone, bool use_temp_zone)
- : fni_(parser->ast_value_factory_, temp_zone),
- parser_(parser),
- prev_fni_(parser->fni_),
- prev_zone_(parser->zone_),
- prev_allow_lazy_(parser->allow_lazy_),
- prev_temp_zoned_(parser->temp_zoned_) {
- if (use_temp_zone) {
- DCHECK(!parser_->temp_zoned_);
- parser_->allow_lazy_ = false;
- parser_->temp_zoned_ = true;
- parser_->fni_ = &fni_;
- parser_->zone_ = temp_zone;
- parser_->factory()->set_zone(temp_zone);
- if (parser_->reusable_preparser_ != nullptr) {
- parser_->reusable_preparser_->zone_ = temp_zone;
- parser_->reusable_preparser_->factory()->set_zone(temp_zone);
- }
- }
- }
- void Reset() {
- parser_->fni_ = prev_fni_;
- parser_->zone_ = prev_zone_;
- parser_->factory()->set_zone(prev_zone_);
- parser_->allow_lazy_ = prev_allow_lazy_;
- parser_->temp_zoned_ = prev_temp_zoned_;
- if (parser_->reusable_preparser_ != nullptr) {
- parser_->reusable_preparser_->zone_ = prev_zone_;
- parser_->reusable_preparser_->factory()->set_zone(prev_zone_);
- }
- }
- ~DiscardableZoneScope() { Reset(); }
-
- private:
- FuncNameInferrer fni_;
- Parser* parser_;
- FuncNameInferrer* prev_fni_;
- Zone* prev_zone_;
- bool prev_allow_lazy_;
- bool prev_temp_zoned_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableZoneScope);
-};
-
FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
bool call_super, int pos,
int end_pos) {
@@ -414,12 +364,12 @@ Parser::Parser(ParseInfo* info)
info->is_module(), true),
scanner_(info->unicode_cache(), info->character_stream(),
info->is_module()),
+ preparser_zone_(info->zone()->allocator(), ZONE_NAME),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
target_stack_(nullptr),
total_preparse_skipped_(0),
- temp_zoned_(false),
consumed_preparsed_scope_data_(info->consumed_preparsed_scope_data()),
parameters_end_pos_(info->parameters_end_pos()) {
// Even though we were passed ParseInfo, we should not store it in
@@ -449,7 +399,6 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_static_fields(FLAG_harmony_static_fields);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
- set_allow_harmony_bigint(FLAG_harmony_bigint);
set_allow_harmony_numeric_separator(FLAG_harmony_numeric_separator);
set_allow_harmony_private_fields(FLAG_harmony_private_fields);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
@@ -458,22 +407,27 @@ Parser::Parser(ParseInfo* info)
}
}
-void Parser::DeserializeScopeChain(
- Isolate* isolate, ParseInfo* info,
- MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
+void Parser::InitializeEmptyScopeChain(ParseInfo* info) {
+ DCHECK_NULL(original_scope_);
+ DCHECK_NULL(info->script_scope());
// TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
// context, which will have the "this" binding for script scopes.
DeclarationScope* script_scope = NewScriptScope();
info->set_script_scope(script_scope);
- Scope* scope = script_scope;
+ original_scope_ = script_scope;
+}
+
+void Parser::DeserializeScopeChain(
+ Isolate* isolate, ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
+ InitializeEmptyScopeChain(info);
Handle<ScopeInfo> outer_scope_info;
if (maybe_outer_scope_info.ToHandle(&outer_scope_info)) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
- scope = Scope::DeserializeScopeChain(
- isolate, zone(), *outer_scope_info, script_scope, ast_value_factory(),
- Scope::DeserializationMode::kScopesOnly);
+ original_scope_ = Scope::DeserializeScopeChain(
+ isolate, zone(), *outer_scope_info, info->script_scope(),
+ ast_value_factory(), Scope::DeserializationMode::kScopesOnly);
}
- original_scope_ = scope;
}
namespace {
@@ -503,7 +457,6 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
- fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
// Initialize parser state.
DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info());
@@ -749,8 +702,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
DCHECK_NULL(target_stack_);
DCHECK(ast_value_factory());
- fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
- fni_->PushEnclosingName(raw_name);
+ fni_.PushEnclosingName(raw_name);
ResetFunctionLiteralId();
DCHECK_LT(0, info->function_literal_id());
@@ -2584,116 +2536,71 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int function_literal_id = GetNextFunctionLiteralId();
ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr;
- Zone* outer_zone = zone();
- DeclarationScope* scope;
-
- {
- // Temporary zones can nest. When we migrate free variables (see below), we
- // need to recreate them in the previous Zone.
- AstNodeFactory previous_zone_ast_node_factory(ast_value_factory(), zone());
-
- // Open a new zone scope, which sets our AstNodeFactory to allocate in the
- // new temporary zone if the preconditions are satisfied, and ensures that
- // the previous zone is always restored after parsing the body. To be able
- // to do scope analysis correctly after full parsing, we migrate needed
- // information when the function is parsed.
- Zone temp_zone(zone()->allocator(), ZONE_NAME);
- DiscardableZoneScope zone_scope(this, &temp_zone, should_preparse);
-
- // This Scope lives in the main zone. We'll migrate data into that zone
- // later.
- scope = NewFunctionScope(kind, outer_zone);
- SetLanguageMode(scope, language_mode);
+ // This Scope lives in the main zone. We'll migrate data into that zone later.
+ Zone* parse_zone = should_preparse ? &preparser_zone_ : zone();
+ DeclarationScope* scope = NewFunctionScope(kind, parse_zone);
+ SetLanguageMode(scope, language_mode);
#ifdef DEBUG
- scope->SetScopeName(function_name);
- if (should_preparse) scope->set_needs_migration();
+ scope->SetScopeName(function_name);
#endif
- if (!is_wrapped) Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner()->location().beg_pos);
-
- // Eager or lazy parse? If is_lazy_top_level_function, we'll parse
- // lazily. We'll call SkipFunction, which may decide to
- // abort lazy parsing if it suspects that wasn't a good idea. If so (in
- // which case the parser is expected to have backtracked), or if we didn't
- // try to lazy parse in the first place, we'll have to parse eagerly.
- if (should_preparse) {
- DCHECK(parse_lazily());
- DCHECK(is_lazy_top_level_function || is_lazy_inner_function);
- DCHECK(!is_wrapped);
- Scanner::BookmarkScope bookmark(scanner());
- bookmark.Set();
- LazyParsingResult result = SkipFunction(
- function_name, kind, function_type, scope, &num_parameters,
- &produced_preparsed_scope_data, is_lazy_inner_function,
- is_lazy_top_level_function, CHECK_OK);
-
- if (result == kLazyParsingAborted) {
- DCHECK(is_lazy_top_level_function);
- bookmark.Apply();
- // This is probably an initialization function. Inform the compiler it
- // should also eager-compile this function.
- eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
- scope->ResetAfterPreparsing(ast_value_factory(), true);
- zone_scope.Reset();
- // Trigger eager (re-)parsing, just below this block.
- should_preparse = false;
- }
- }
-
- if (should_preparse) {
- scope->AnalyzePartially(&previous_zone_ast_node_factory);
- } else {
- body = ParseFunction(
- function_name, pos, kind, function_type, scope, &num_parameters,
- &function_length, &has_duplicate_parameters, &expected_property_count,
- &suspend_count, arguments_for_wrapped_function, CHECK_OK);
- }
-
- DCHECK_EQ(should_preparse, temp_zoned_);
- if (V8_UNLIKELY(FLAG_log_function_events)) {
- double ms = timer.Elapsed().InMillisecondsF();
- const char* event_name = should_preparse
- ? (is_top_level ? "preparse-no-resolution"
- : "preparse-resolution")
- : "full-parse";
- logger_->FunctionEvent(
- event_name, script_id(), ms, scope->start_position(),
- scope->end_position(),
- reinterpret_cast<const char*>(function_name->raw_data()),
- function_name->byte_length());
- }
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- if (should_preparse) {
- RuntimeCallCounterId counter_id =
- parsing_on_main_thread_
- ? RuntimeCallCounterId::kPreParseWithVariableResolution
- : RuntimeCallCounterId::
- kPreParseBackgroundWithVariableResolution;
- if (is_top_level) {
- counter_id = parsing_on_main_thread_
- ? RuntimeCallCounterId::kPreParseNoVariableResolution
- : RuntimeCallCounterId::
- kPreParseBackgroundNoVariableResolution;
- }
- if (runtime_call_stats_) {
- runtime_call_stats_->CorrectCurrentCounterId(counter_id);
- }
- }
+ if (!is_wrapped) Expect(Token::LPAREN, CHECK_OK);
+ scope->set_start_position(position());
+
+ // Eager or lazy parse? If is_lazy_top_level_function, we'll parse
+ // lazily. We'll call SkipFunction, which may decide to
+ // abort lazy parsing if it suspects that wasn't a good idea. If so (in
+ // which case the parser is expected to have backtracked), or if we didn't
+ // try to lazy parse in the first place, we'll have to parse eagerly.
+ bool did_preparse_successfully =
+ should_preparse &&
+ SkipFunction(function_name, kind, function_type, scope, &num_parameters,
+ &produced_preparsed_scope_data, is_lazy_inner_function,
+ is_lazy_top_level_function, &eager_compile_hint, CHECK_OK);
+ if (!did_preparse_successfully) {
+ body = ParseFunction(
+ function_name, pos, kind, function_type, scope, &num_parameters,
+ &function_length, &has_duplicate_parameters, &expected_property_count,
+ &suspend_count, arguments_for_wrapped_function, CHECK_OK);
+ }
+
+ if (V8_UNLIKELY(FLAG_log_function_events)) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ const char* event_name =
+ should_preparse
+ ? (is_top_level ? "preparse-no-resolution" : "preparse-resolution")
+ : "full-parse";
+ logger_->FunctionEvent(
+ event_name, script_id(), ms, scope->start_position(),
+ scope->end_position(),
+ reinterpret_cast<const char*>(function_name->raw_data()),
+ function_name->byte_length());
+ }
+ if (V8_UNLIKELY(FLAG_runtime_stats) && did_preparse_successfully) {
+ const RuntimeCallCounterId counters[2][2] = {
+ {RuntimeCallCounterId::kPreParseBackgroundNoVariableResolution,
+ RuntimeCallCounterId::kPreParseNoVariableResolution},
+ {RuntimeCallCounterId::kPreParseBackgroundWithVariableResolution,
+ RuntimeCallCounterId::kPreParseWithVariableResolution}};
+ if (runtime_call_stats_) {
+ bool tracked_variables =
+ PreParser::ShouldTrackUnresolvedVariables(is_lazy_top_level_function);
+ runtime_call_stats_->CorrectCurrentCounterId(
+ counters[tracked_variables][parsing_on_main_thread_]);
}
+ }
- // Validate function name. We can do this only after parsing the function,
- // since the function can declare itself strict.
- language_mode = scope->language_mode();
- CheckFunctionName(language_mode, function_name, function_name_validity,
- function_name_location, CHECK_OK);
+ // Validate function name. We can do this only after parsing the function,
+ // since the function can declare itself strict.
+ language_mode = scope->language_mode();
+ CheckFunctionName(language_mode, function_name, function_name_validity,
+ function_name_location, CHECK_OK);
- if (is_strict(language_mode)) {
- CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
- CHECK_OK);
- }
- CheckConflictingVarDeclarations(scope, CHECK_OK);
- } // DiscardableZoneScope goes out of scope.
+ if (is_strict(language_mode)) {
+ CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
+ CHECK_OK);
+ }
+ CheckConflictingVarDeclarations(scope, CHECK_OK);
FunctionLiteral::ParameterFlag duplicate_parameters =
has_duplicate_parameters ? FunctionLiteral::kHasDuplicateParameters
@@ -2708,19 +2615,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_literal->set_suspend_count(suspend_count);
if (should_infer_name) {
- DCHECK_NOT_NULL(fni_);
- fni_->AddFunction(function_literal);
+ fni_.AddFunction(function_literal);
}
return function_literal;
}
-Parser::LazyParsingResult Parser::SkipFunction(
+bool Parser::SkipFunction(
const AstRawString* function_name, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
ProducedPreParsedScopeData** produced_preparsed_scope_data,
- bool is_inner_function, bool may_abort, bool* ok) {
+ bool is_inner_function, bool may_abort,
+ FunctionLiteral::EagerCompileHint* hint, bool* ok) {
FunctionState function_state(&function_state_, &scope_, function_scope);
+ function_scope->set_zone(&preparser_zone_);
DCHECK_NE(kNoSourcePosition, function_scope->start_position());
DCHECK_EQ(kNoSourcePosition, parameters_end_pos_);
@@ -2729,8 +2637,7 @@ Parser::LazyParsingResult Parser::SkipFunction(
scanner()->current_token() == Token::ARROW);
// FIXME(marja): There are 2 ways to skip functions now. Unify them.
- DCHECK_NOT_NULL(consumed_preparsed_scope_data_);
- if (consumed_preparsed_scope_data_->HasData()) {
+ if (consumed_preparsed_scope_data_) {
DCHECK(FLAG_preparser_scope_analysis);
int end_position;
LanguageMode language_mode;
@@ -2752,9 +2659,13 @@ Parser::LazyParsingResult Parser::SkipFunction(
function_scope->RecordSuperPropertyUsage();
}
SkipFunctionLiterals(num_inner_functions);
- return kLazyParsingComplete;
+ function_scope->ResetAfterPreparsing(ast_value_factory_, false);
+ return true;
}
+ Scanner::BookmarkScope bookmark(scanner());
+ bookmark.Set();
+
// With no cached data, we partially parse the function, without building an
// AST. This gathers the data needed to build a lazy function.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
@@ -2768,28 +2679,41 @@ Parser::LazyParsingResult Parser::SkipFunction(
may_abort, use_counts_, produced_preparsed_scope_data, this->script_id());
// Return immediately if pre-parser decided to abort parsing.
- if (result == PreParser::kPreParseAbort) return kLazyParsingAborted;
+ if (result == PreParser::kPreParseAbort) {
+ bookmark.Apply();
+ function_scope->ResetAfterPreparsing(ast_value_factory(), true);
+ *hint = FunctionLiteral::kShouldEagerCompile;
+ return false;
+ }
+
if (result == PreParser::kPreParseStackOverflow) {
// Propagate stack overflow.
set_stack_overflow();
*ok = false;
- return kLazyParsingComplete;
- }
- if (pending_error_handler()->has_pending_error()) {
+ } else if (pending_error_handler()->ErrorUnidentifiableByPreParser()) {
+ // If we encounter an error that the preparser can not identify we reset to
+ // the state before preparsing. The caller may then fully parse the function
+ // to identify the actual error.
+ bookmark.Apply();
+ function_scope->ResetAfterPreparsing(ast_value_factory(), true);
+ pending_error_handler()->ResetUnidentifiableError();
+ return false;
+ } else if (pending_error_handler()->has_pending_error()) {
*ok = false;
- return kLazyParsingComplete;
- }
+ } else {
+ set_allow_eval_cache(reusable_preparser()->allow_eval_cache());
- set_allow_eval_cache(reusable_preparser()->allow_eval_cache());
+ PreParserLogger* logger = reusable_preparser()->logger();
+ function_scope->set_end_position(logger->end());
+ Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
+ total_preparse_skipped_ +=
+ function_scope->end_position() - function_scope->start_position();
+ *num_parameters = logger->num_parameters();
+ SkipFunctionLiterals(logger->num_inner_functions());
+ function_scope->AnalyzePartially(factory());
+ }
- PreParserLogger* logger = reusable_preparser()->logger();
- function_scope->set_end_position(logger->end());
- Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
- total_preparse_skipped_ +=
- function_scope->end_position() - function_scope->start_position();
- *num_parameters = logger->num_parameters();
- SkipFunctionLiterals(logger->num_inner_functions());
- return kLazyParsingComplete;
+ return true;
}
Statement* Parser::BuildAssertIsCoercible(Variable* var,
@@ -2863,7 +2787,7 @@ Block* Parser::BuildParameterInitializationBlock(
DCHECK(!parameters.is_simple);
DCHECK(scope()->is_function_scope());
DCHECK_EQ(scope(), parameters.scope);
- Block* init_block = factory()->NewBlock(1, true);
+ Block* init_block = factory()->NewBlock(parameters.num_parameters(), true);
int index = 0;
for (auto parameter : parameters.params) {
DeclarationDescriptor descriptor;
@@ -3002,19 +2926,6 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
return result;
}
-Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
- // %ResolvePromise(.promise, value), .promise
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(2, zone());
- args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
- args->Add(value, zone());
- Expression* call_runtime =
- factory()->NewCallRuntime(Runtime::kInlineResolvePromise, args, pos);
- return factory()->NewBinaryOperation(
- Token::COMMA, call_runtime,
- factory()->NewVariableProxy(PromiseVariable()), pos);
-}
-
Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
// %promise_internal_reject(.promise, value, false), .promise
// Disables the additional debug event for the rejection since a debug event
@@ -3038,7 +2949,7 @@ Variable* Parser::PromiseVariable() {
Variable* promise = function_state_->scope()->promise_var();
if (promise == nullptr) {
promise = function_state_->scope()->DeclarePromiseVar(
- ast_value_factory()->empty_string());
+ ast_value_factory()->dot_promise_string());
}
return promise;
}
@@ -3128,7 +3039,8 @@ ZonePtrList<Statement>* Parser::ParseFunction(
*function_length = formals.function_length;
ZonePtrList<Statement>* body = new (zone()) ZonePtrList<Statement>(8, zone());
- ParseFunctionBody(body, function_name, pos, formals, kind, function_type, ok);
+ ParseFunctionBody(body, function_name, pos, formals, kind, function_type,
+ FunctionBodyType::kBlock, true, ok);
// Validate parameter names. We can do this only after parsing the function,
// since the function can declare itself strict.
@@ -3238,7 +3150,8 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
}
FunctionLiteral* Parser::CreateInitializerFunction(
- DeclarationScope* scope, ZonePtrList<ClassLiteral::Property>* fields) {
+ const char* name, DeclarationScope* scope,
+ ZonePtrList<ClassLiteral::Property>* fields) {
DCHECK_EQ(scope->function_kind(),
FunctionKind::kClassFieldsInitializerFunction);
// function() { .. class fields initializer .. }
@@ -3247,10 +3160,10 @@ FunctionLiteral* Parser::CreateInitializerFunction(
factory()->NewInitializeClassFieldsStatement(fields, kNoSourcePosition);
statements->Add(static_fields, zone());
return factory()->NewFunctionLiteral(
- ast_value_factory()->empty_string(), scope, statements, 0, 0, 0,
+ ast_value_factory()->GetOneByteString(name), scope, statements, 0, 0, 0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldEagerCompile, scope->start_position(), true,
+ FunctionLiteral::kShouldEagerCompile, scope->start_position(), false,
GetNextFunctionLiteralId());
}
@@ -3285,13 +3198,15 @@ Expression* Parser::RewriteClassLiteral(Scope* block_scope,
FunctionLiteral* static_fields_initializer = nullptr;
if (class_info->has_static_class_fields) {
static_fields_initializer = CreateInitializerFunction(
- class_info->static_fields_scope, class_info->static_fields);
+ "<static_fields_initializer>", class_info->static_fields_scope,
+ class_info->static_fields);
}
FunctionLiteral* instance_fields_initializer_function = nullptr;
if (class_info->has_instance_class_fields) {
instance_fields_initializer_function = CreateInitializerFunction(
- class_info->instance_fields_scope, class_info->instance_fields);
+ "<instance_fields_initializer>", class_info->instance_fields_scope,
+ class_info->instance_fields);
class_info->constructor->set_requires_instance_fields_initializer(true);
}
@@ -3460,7 +3375,6 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// position set at the end of the script (the top scope and possible eval
// scopes) and set their end position after we know the script length.
if (info->is_toplevel()) {
- fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
result = DoParseProgram(/* isolate = */ nullptr, info);
} else {
result =
@@ -3647,10 +3561,9 @@ void Parser::RewriteAsyncFunctionBody(ZonePtrList<Statement>* body,
// })
// }
- return_value = BuildResolvePromise(return_value, return_value->position());
- block->statements()->Add(
- factory()->NewReturnStatement(return_value, return_value->position()),
- zone());
+ block->statements()->Add(factory()->NewAsyncReturnStatement(
+ return_value, return_value->position()),
+ zone());
block = BuildRejectPromiseOnException(block);
body->Add(block, zone());
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 00e73f37a2..35de0656d3 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -11,11 +11,11 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/compiler-specific.h"
+#include "src/base/threaded-list.h"
#include "src/globals.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
#include "src/parsing/preparser.h"
-#include "src/utils.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -31,7 +31,7 @@ class ParserTargetScope;
class PendingCompilationErrorHandler;
class PreParsedScopeData;
-class FunctionEntry BASE_EMBEDDED {
+class FunctionEntry {
public:
enum {
kStartPositionIndex,
@@ -109,7 +109,7 @@ struct ParserFormalParameters : FormalParametersBase {
explicit ParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
- ThreadedList<Parameter> params;
+ base::ThreadedList<Parameter> params;
};
template <>
@@ -135,12 +135,17 @@ struct ParserTypes<Parser> {
typedef v8::internal::BreakableStatement* BreakableStatement;
typedef v8::internal::ForStatement* ForStatement;
typedef v8::internal::IterationStatement* IterationStatement;
+ typedef v8::internal::FuncNameInferrer FuncNameInferrer;
+ typedef v8::internal::SourceRange SourceRange;
+ typedef v8::internal::SourceRangeScope SourceRangeScope;
// For constructing objects returned by the traversing functions.
typedef AstNodeFactory Factory;
typedef ParserTarget Target;
typedef ParserTargetScope TargetScope;
+
+ static constexpr bool ExpressionClassifierReportErrors = true;
};
class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
@@ -155,6 +160,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void ParseOnBackground(ParseInfo* info);
+ // Initializes an empty scope chain for top-level scripts, or scopes which
+ // consist of only the native context.
+ void InitializeEmptyScopeChain(ParseInfo* info);
+
// Deserialize the scope chain prior to parsing in which the script is going
// to be executed. If the script is a top-level script, or the scope chain
// consists of only a native context, maybe_outer_scope_info should be an
@@ -172,7 +181,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
private:
friend class ParserBase<Parser>;
- friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
+ friend class v8::internal::ExpressionClassifierErrorTracker<
+ ParserTypes<Parser>>;
friend bool v8::internal::parsing::ParseProgram(ParseInfo*, Isolate*);
friend bool v8::internal::parsing::ParseFunction(
ParseInfo*, Handle<SharedFunctionInfo> shared_info, Isolate*);
@@ -185,7 +195,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool parse_lazily() const { return mode_ == PARSE_LAZILY; }
enum Mode { PARSE_LAZILY, PARSE_EAGERLY };
- class ParsingModeScope BASE_EMBEDDED {
+ class ParsingModeScope {
public:
ParsingModeScope(Parser* parser, Mode mode)
: parser_(parser), old_mode_(parser->mode_) {
@@ -233,14 +243,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ParseInfo* info,
Zone* zone);
- void StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate);
-
PreParser* reusable_preparser() {
if (reusable_preparser_ == nullptr) {
- reusable_preparser_ =
- new PreParser(zone(), &scanner_, stack_limit_, ast_value_factory(),
- pending_error_handler(), runtime_call_stats_, logger_,
- -1, parsing_module_, parsing_on_main_thread_);
+ reusable_preparser_ = new PreParser(
+ &preparser_zone_, &scanner_, stack_limit_, ast_value_factory(),
+ pending_error_handler(), runtime_call_stats_, logger_, -1,
+ parsing_module_, parsing_on_main_thread_);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
SET_ALLOW(harmony_do_expressions);
@@ -248,7 +256,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
SET_ALLOW(harmony_static_fields);
SET_ALLOW(harmony_dynamic_import);
SET_ALLOW(harmony_import_meta);
- SET_ALLOW(harmony_bigint);
SET_ALLOW(harmony_private_fields);
SET_ALLOW(eval_cache);
#undef SET_ALLOW
@@ -315,7 +322,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name,
bool* ok);
FunctionLiteral* CreateInitializerFunction(
- DeclarationScope* scope, ZonePtrList<ClassLiteral::Property>* fields);
+ const char* name, DeclarationScope* scope,
+ ZonePtrList<ClassLiteral::Property>* fields);
V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
Expression* value,
ZonePtrList<const AstRawString>* names,
@@ -442,12 +450,19 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// by parsing the function with PreParser. Consumes the ending }.
// If may_abort == true, the (pre-)parser may decide to abort skipping
// in order to force the function to be eagerly parsed, after all.
- LazyParsingResult SkipFunction(
- const AstRawString* function_name, FunctionKind kind,
- FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, int* num_parameters,
- ProducedPreParsedScopeData** produced_preparsed_scope_data,
- bool is_inner_function, bool may_abort, bool* ok);
+ // In case the preparser detects an error it cannot identify, it resets the
+ // scanner- and preparser state to the initial one, before PreParsing the
+ // function.
+ // SkipFunction returns true if it correctly parsed the function, including
+ // cases where we detect an error. It returns false, if we needed to stop
+ // parsing or could not identify an error correctly, meaning the caller needs
+ // to fully reparse. In this case it resets the scanner and preparser state.
+ bool SkipFunction(const AstRawString* function_name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters,
+ ProducedPreParsedScopeData** produced_preparsed_scope_data,
+ bool is_inner_function, bool may_abort,
+ FunctionLiteral::EagerCompileHint* hint, bool* ok);
Block* BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok);
@@ -532,7 +547,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* BuildInitialYield(int pos, FunctionKind kind);
Assignment* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
- Expression* BuildResolvePromise(Expression* value, int pos);
Expression* BuildRejectPromise(Expression* value, int pos);
Variable* PromiseVariable();
Variable* AsyncGeneratorAwaitVariable();
@@ -662,38 +676,30 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Functions for encapsulating the differences between parsing and preparsing;
// operations interleaved with the recursive descent.
V8_INLINE void PushLiteralName(const AstRawString* id) {
- DCHECK_NOT_NULL(fni_);
- fni_->PushLiteralName(id);
+ fni_.PushLiteralName(id);
}
V8_INLINE void PushVariableName(const AstRawString* id) {
- DCHECK_NOT_NULL(fni_);
- fni_->PushVariableName(id);
+ fni_.PushVariableName(id);
}
V8_INLINE void PushPropertyName(Expression* expression) {
- DCHECK_NOT_NULL(fni_);
if (expression->IsPropertyName()) {
- fni_->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
+ fni_.PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
} else {
- fni_->PushLiteralName(ast_value_factory()->anonymous_function_string());
+ fni_.PushLiteralName(ast_value_factory()->anonymous_function_string());
}
}
V8_INLINE void PushEnclosingName(const AstRawString* name) {
- DCHECK_NOT_NULL(fni_);
- fni_->PushEnclosingName(name);
+ fni_.PushEnclosingName(name);
}
V8_INLINE void AddFunctionForNameInference(FunctionLiteral* func_to_infer) {
- DCHECK_NOT_NULL(fni_);
- fni_->AddFunction(func_to_infer);
+ fni_.AddFunction(func_to_infer);
}
- V8_INLINE void InferFunctionName() {
- DCHECK_NOT_NULL(fni_);
- fni_->Infer();
- }
+ V8_INLINE void InferFunctionName() { fni_.Infer(); }
// If we assign a function literal to a property we pretenure the
// literal so it can be added as a constant function property.
@@ -784,6 +790,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
arg, error_type);
}
+ // Dummy implementation. The parser should never have a unidentifiable
+ // error.
+ V8_INLINE void ReportUnidentifiableError() { UNREACHABLE(); }
+
void ReportMessageAt(Scanner::Location source_location,
MessageTemplate::Template message,
const AstRawString* arg,
@@ -856,14 +866,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const AstRawString* name, int start_position,
InferName infer = InferName::kYes) {
if (infer == InferName::kYes) {
- fni_->PushVariableName(name);
+ fni_.PushVariableName(name);
}
return NewUnresolved(name, start_position);
}
V8_INLINE Expression* ExpressionFromString(int pos) {
const AstRawString* symbol = GetSymbol();
- fni_->PushLiteralName(symbol);
+ fni_.PushLiteralName(symbol);
return factory()->NewStringLiteral(symbol, pos);
}
@@ -891,18 +901,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
factory()->NewThrow(exception, pos), pos);
}
- V8_INLINE void AddParameterInitializationBlock(
- const ParserFormalParameters& parameters, ZonePtrList<Statement>* body,
- bool is_async, bool* ok) {
- if (parameters.is_simple) return;
- auto* init_block = BuildParameterInitializationBlock(parameters, ok);
- if (!*ok) return;
- if (is_async) {
- init_block = BuildRejectPromiseOnException(init_block);
- }
- body->Add(init_block, zone());
- }
-
V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
Expression* pattern,
Expression* initializer,
@@ -923,7 +921,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE void DeclareFormalParameters(
DeclarationScope* scope,
- const ThreadedList<ParserFormalParameters::Parameter>& parameters,
+ const base::ThreadedList<ParserFormalParameters::Parameter>& parameters,
bool is_simple, bool* has_duplicate = nullptr) {
if (!is_simple) scope->SetHasNonSimpleParameters();
for (auto parameter : parameters) {
@@ -958,7 +956,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void SetFunctionNameFromIdentifierRef(Expression* value,
Expression* identifier);
- V8_INLINE ZoneVector<typename ExpressionClassifier::Error>*
+ V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
GetReportedErrorList() const {
return function_state_->GetReportedErrorList();
}
@@ -1094,11 +1092,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
// Parser's private field members.
- friend class DiscardableZoneScope; // Uses reusable_preparser_.
- // FIXME(marja): Make reusable_preparser_ always use its own temp Zone (call
- // DeleteAll after each function), so this won't be needed.
+ friend class PreParserZoneScope; // Uses reusable_preparser().
Scanner scanner_;
+ Zone preparser_zone_;
PreParser* reusable_preparser_;
Mode mode_;
@@ -1131,7 +1128,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// 'continue' statement targets). Upon construction, a new target is
// added; it is removed upon destruction.
-class ParserTarget BASE_EMBEDDED {
+class ParserTarget {
public:
ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
: variable_(&parser->impl()->target_stack_),
@@ -1151,7 +1148,7 @@ class ParserTarget BASE_EMBEDDED {
ParserTarget* previous_;
};
-class ParserTargetScope BASE_EMBEDDED {
+class ParserTargetScope {
public:
explicit ParserTargetScope(ParserBase<Parser>* parser)
: variable_(&parser->impl()->target_stack_),
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index ed3231c151..4465670a8f 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -31,38 +31,32 @@ class PatternRewriter final : public AstVisitor<PatternRewriter> {
const Parser::DeclarationParsingResult::Declaration* declaration,
ZonePtrList<const AstRawString>* names, bool* ok);
- static void RewriteDestructuringAssignment(Parser* parser,
- RewritableExpression* to_rewrite,
- Scope* scope);
+ static Expression* RewriteDestructuringAssignment(Parser* parser,
+ Assignment* to_rewrite,
+ Scope* scope);
private:
- enum PatternContext { BINDING, ASSIGNMENT, ASSIGNMENT_ELEMENT };
-
- class AssignmentElementScope {
- public:
- explicit AssignmentElementScope(PatternRewriter* rewriter)
- : rewriter_(rewriter), context_(rewriter->context()) {
- if (context_ == ASSIGNMENT) rewriter->context_ = ASSIGNMENT_ELEMENT;
- }
- ~AssignmentElementScope() { rewriter_->context_ = context_; }
-
- private:
- PatternRewriter* const rewriter_;
- const PatternContext context_;
- };
-
- PatternRewriter(Scope* scope, Parser* parser, PatternContext context)
+ enum PatternContext : uint8_t { BINDING, ASSIGNMENT };
+
+ PatternRewriter(Scope* scope, Parser* parser, PatternContext context,
+ const DeclarationDescriptor* descriptor = nullptr,
+ ZonePtrList<const AstRawString>* names = nullptr,
+ int initializer_position = kNoSourcePosition,
+ int value_beg_position = kNoSourcePosition,
+ bool declares_parameter_containing_sloppy_eval = false)
: scope_(scope),
parser_(parser),
- context_(context),
- initializer_position_(kNoSourcePosition),
- value_beg_position_(kNoSourcePosition),
block_(nullptr),
- descriptor_(nullptr),
- names_(nullptr),
+ descriptor_(descriptor),
+ names_(names),
current_value_(nullptr),
- recursion_level_(0),
- ok_(nullptr) {}
+ ok_(nullptr),
+ initializer_position_(initializer_position),
+ value_beg_position_(value_beg_position),
+ context_(context),
+ declares_parameter_containing_sloppy_eval_(
+ declares_parameter_containing_sloppy_eval),
+ recursion_level_(0) {}
#define DECLARE_VISIT(type) void Visit##type(v8::internal::type* node);
// Visiting functions for AST nodes make this an AstVisitor.
@@ -80,16 +74,34 @@ class PatternRewriter final : public AstVisitor<PatternRewriter> {
current_value_ = old_value;
}
+ Expression* Rewrite(Assignment* assign) {
+ DCHECK_EQ(Token::ASSIGN, assign->op());
+
+ int pos = assign->position();
+ DCHECK_NULL(block_);
+ block_ = factory()->NewBlock(8, true);
+ Variable* temp = nullptr;
+ Expression* pattern = assign->target();
+ Expression* old_value = current_value_;
+ current_value_ = assign->value();
+ if (pattern->IsObjectLiteral()) {
+ VisitObjectLiteral(pattern->AsObjectLiteral(), &temp);
+ } else {
+ DCHECK(pattern->IsArrayLiteral());
+ VisitArrayLiteral(pattern->AsArrayLiteral(), &temp);
+ }
+ DCHECK_NOT_NULL(temp);
+ current_value_ = old_value;
+ return factory()->NewDoExpression(block_, temp, pos);
+ }
+
void VisitObjectLiteral(ObjectLiteral* node, Variable** temp_var);
void VisitArrayLiteral(ArrayLiteral* node, Variable** temp_var);
bool IsBindingContext() const { return context_ == BINDING; }
- bool IsAssignmentContext() const {
- return context_ == ASSIGNMENT || context_ == ASSIGNMENT_ELEMENT;
- }
+ bool IsAssignmentContext() const { return context_ == ASSIGNMENT; }
bool IsSubPattern() const { return recursion_level_ > 1; }
- bool DeclaresParameterContainingSloppyEval() const;
void RewriteParameterScopes(Expression* expr);
Variable* CreateTempVar(Expression* value = nullptr);
@@ -103,15 +115,16 @@ class PatternRewriter final : public AstVisitor<PatternRewriter> {
Scope* const scope_;
Parser* const parser_;
- PatternContext context_;
- int initializer_position_;
- int value_beg_position_;
Block* block_;
const DeclarationDescriptor* descriptor_;
ZonePtrList<const AstRawString>* names_;
Expression* current_value_;
- int recursion_level_;
bool* ok_;
+ const int initializer_position_;
+ const int value_beg_position_;
+ PatternContext context_;
+ const bool declares_parameter_containing_sloppy_eval_ : 1;
+ int recursion_level_;
DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
};
@@ -125,15 +138,18 @@ void Parser::DeclareAndInitializeVariables(
}
void Parser::RewriteDestructuringAssignment(RewritableExpression* to_rewrite) {
- PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite, scope());
+ DCHECK(!to_rewrite->is_rewritten());
+ Assignment* assignment = to_rewrite->expression()->AsAssignment();
+ Expression* result = PatternRewriter::RewriteDestructuringAssignment(
+ this, assignment, scope());
+ to_rewrite->Rewrite(result);
}
Expression* Parser::RewriteDestructuringAssignment(Assignment* assignment) {
DCHECK_NOT_NULL(assignment);
DCHECK_EQ(Token::ASSIGN, assignment->op());
- auto to_rewrite = factory()->NewRewritableExpression(assignment, scope());
- RewriteDestructuringAssignment(to_rewrite);
- return to_rewrite->expression();
+ return PatternRewriter::RewriteDestructuringAssignment(this, assignment,
+ scope());
}
void PatternRewriter::DeclareAndInitializeVariables(
@@ -143,25 +159,26 @@ void PatternRewriter::DeclareAndInitializeVariables(
ZonePtrList<const AstRawString>* names, bool* ok) {
DCHECK(block->ignore_completion_value());
- PatternRewriter rewriter(declaration_descriptor->scope, parser, BINDING);
- rewriter.initializer_position_ = declaration->initializer_position;
- rewriter.value_beg_position_ = declaration->value_beg_position;
+ Scope* scope = declaration_descriptor->scope;
+ PatternRewriter rewriter(scope, parser, BINDING, declaration_descriptor,
+ names, declaration->initializer_position,
+ declaration->value_beg_position,
+ declaration_descriptor->declaration_kind ==
+ DeclarationDescriptor::PARAMETER &&
+ scope->is_block_scope());
rewriter.block_ = block;
- rewriter.descriptor_ = declaration_descriptor;
- rewriter.names_ = names;
rewriter.ok_ = ok;
rewriter.RecurseIntoSubpattern(declaration->pattern,
declaration->initializer);
}
-void PatternRewriter::RewriteDestructuringAssignment(
- Parser* parser, RewritableExpression* to_rewrite, Scope* scope) {
+Expression* PatternRewriter::RewriteDestructuringAssignment(
+ Parser* parser, Assignment* to_rewrite, Scope* scope) {
DCHECK(!scope->HasBeenRemoved());
- DCHECK(!to_rewrite->is_rewritten());
PatternRewriter rewriter(scope, parser, ASSIGNMENT);
- rewriter.RecurseIntoSubpattern(to_rewrite, nullptr);
+ return rewriter.Rewrite(to_rewrite);
}
void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
@@ -181,7 +198,16 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK_NOT_NULL(descriptor_);
DCHECK_NOT_NULL(ok_);
- descriptor_->scope->RemoveUnresolved(pattern);
+ Scope* outer_function_scope = nullptr;
+ bool success;
+ if (declares_parameter_containing_sloppy_eval_) {
+ outer_function_scope = scope()->outer_scope();
+ success = outer_function_scope->RemoveUnresolved(pattern);
+ } else {
+ success = scope()->RemoveUnresolved(pattern);
+ }
+ USE(success);
+ DCHECK(success);
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
@@ -192,15 +218,13 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// an initial value in the declaration (because they are initialized upon
// entering the function).
const AstRawString* name = pattern->raw_name();
- VariableProxy* proxy =
- factory()->NewVariableProxy(name, NORMAL_VARIABLE, pattern->position());
+ VariableProxy* proxy = pattern;
Declaration* declaration;
if (descriptor_->mode == VariableMode::kVar &&
- !descriptor_->scope->is_declaration_scope()) {
- DCHECK(descriptor_->scope->is_block_scope() ||
- descriptor_->scope->is_with_scope());
+ !scope()->is_declaration_scope()) {
+ DCHECK(scope()->is_block_scope() || scope()->is_with_scope());
declaration = factory()->NewNestedVariableDeclaration(
- proxy, descriptor_->scope, descriptor_->declaration_pos);
+ proxy, scope(), descriptor_->declaration_pos);
} else {
declaration =
factory()->NewVariableDeclaration(proxy, descriptor_->declaration_pos);
@@ -210,10 +234,6 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// a sloppy eval in a default parameter or function body, the parameter
// needs to be declared in the function's scope, not in the varblock
// scope which will be used for the initializer expression.
- Scope* outer_function_scope = nullptr;
- if (DeclaresParameterContainingSloppyEval()) {
- outer_function_scope = descriptor_->scope->outer_scope();
- }
Variable* var = parser_->Declare(
declaration, descriptor_->declaration_kind, descriptor_->mode,
Variable::DefaultInitializationFlag(descriptor_->mode), ok_,
@@ -224,12 +244,11 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK_NE(initializer_position_, kNoSourcePosition);
var->set_initializer_position(initializer_position_);
- Scope* declaration_scope =
- outer_function_scope != nullptr
- ? outer_function_scope
- : (IsLexicalVariableMode(descriptor_->mode)
- ? descriptor_->scope
- : descriptor_->scope->GetDeclarationScope());
+ Scope* declaration_scope = outer_function_scope != nullptr
+ ? outer_function_scope
+ : (IsLexicalVariableMode(descriptor_->mode)
+ ? scope()
+ : scope()->GetDeclarationScope());
if (declaration_scope->num_var() > kMaxNumFunctionLocals) {
parser_->ReportMessage(MessageTemplate::kTooManyVariables);
*ok_ = false;
@@ -242,7 +261,7 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// If there's no initializer, we're done.
if (value == nullptr) return;
- Scope* var_init_scope = descriptor_->scope;
+ Scope* var_init_scope = scope();
Parser::MarkLoopVariableAsAssigned(var_init_scope, proxy->var(),
descriptor_->declaration_kind);
@@ -254,15 +273,15 @@ void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' as it may be a different
- // 'v' than the 'v' in the declaration (e.g., if we are inside a
- // 'with' statement or 'catch' block). Global var declarations
- // also need special treatment.
-
- // For 'let' and 'const' declared variables the initialization always
- // assigns to the declared variable.
- // But for var declarations we need to do a new lookup.
- if (descriptor_->mode == VariableMode::kVar) {
+ // In particular, we need to re-lookup 'v' if it may be a different 'v' than
+ // the 'v' in the declaration (e.g., if we are inside a 'with' statement or
+ // 'catch' block).
+
+ // For 'let' and 'const' declared variables the initialization always assigns
+ // to the declared variable. But for var declarations that target a different
+ // scope we need to do a new lookup.
+ if (descriptor_->mode == VariableMode::kVar &&
+ var_init_scope != declaration_scope) {
proxy = var_init_scope->NewUnresolved(factory(), name);
} else {
DCHECK_NOT_NULL(proxy);
@@ -294,64 +313,13 @@ Variable* PatternRewriter::CreateTempVar(Expression* value) {
}
void PatternRewriter::VisitRewritableExpression(RewritableExpression* node) {
- if (!node->expression()->IsAssignment()) {
- // RewritableExpressions are also used for desugaring Spread, which is
- // orthogonal to PatternRewriter; just visit the underlying expression.
- DCHECK_EQ(AstNode::kArrayLiteral, node->expression()->node_type());
- return Visit(node->expression());
- } else if (context() != ASSIGNMENT) {
- // This is not a destructuring assignment. Mark the node as rewritten to
- // prevent redundant rewriting and visit the underlying expression.
- DCHECK(!node->is_rewritten());
- node->set_rewritten();
- return Visit(node->expression());
- }
-
+ DCHECK(node->expression()->IsAssignment());
+ // This is not a top-level destructuring assignment. Mark the node as
+ // rewritten to prevent redundant rewriting and visit the underlying
+ // expression.
DCHECK(!node->is_rewritten());
- DCHECK_EQ(ASSIGNMENT, context());
- Assignment* assign = node->expression()->AsAssignment();
- DCHECK_NOT_NULL(assign);
- DCHECK_EQ(Token::ASSIGN, assign->op());
-
- int pos = assign->position();
- Block* old_block = block_;
- block_ = factory()->NewBlock(8, true);
- Variable* temp = nullptr;
- Expression* pattern = assign->target();
- Expression* old_value = current_value_;
- current_value_ = assign->value();
- if (pattern->IsObjectLiteral()) {
- VisitObjectLiteral(pattern->AsObjectLiteral(), &temp);
- } else {
- DCHECK(pattern->IsArrayLiteral());
- VisitArrayLiteral(pattern->AsArrayLiteral(), &temp);
- }
- DCHECK_NOT_NULL(temp);
- current_value_ = old_value;
- Expression* expr = factory()->NewDoExpression(block_, temp, pos);
- node->Rewrite(expr);
- block_ = old_block;
- if (block_) {
- block_->statements()->Add(factory()->NewExpressionStatement(expr, pos),
- zone());
- }
-}
-
-bool PatternRewriter::DeclaresParameterContainingSloppyEval() const {
- // Need to check for a binding context to make sure we have a descriptor.
- if (IsBindingContext() &&
- // Only relevant for parameters.
- descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
- // And only when scope is a block scope;
- // without eval, it is a function scope.
- scope()->is_block_scope()) {
- DCHECK(scope()->is_declaration_scope());
- DCHECK(scope()->AsDeclarationScope()->calls_sloppy_eval());
- DCHECK(scope()->outer_scope()->is_function_scope());
- return true;
- }
-
- return false;
+ node->set_rewritten();
+ return Visit(node->expression());
}
// When an extra declaration scope needs to be inserted to account for
@@ -359,7 +327,7 @@ bool PatternRewriter::DeclaresParameterContainingSloppyEval() const {
// needs to be in that new inner scope which was added after initial
// parsing.
void PatternRewriter::RewriteParameterScopes(Expression* expr) {
- if (DeclaresParameterContainingSloppyEval()) {
+ if (declares_parameter_containing_sloppy_eval_) {
ReparentExpressionScope(parser_->stack_limit(), expr, scope());
}
}
@@ -428,7 +396,6 @@ void PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
kNoSourcePosition);
}
- AssignmentElementScope element_scope(this);
RecurseIntoSubpattern(property->value(), value);
}
}
@@ -557,10 +524,7 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
factory()->NewExpressionStatement(assignment, nopos), zone());
}
- {
- AssignmentElementScope element_scope(this);
- RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
- }
+ RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
{
// completion = kNormalCompletion;
@@ -709,10 +673,6 @@ void PatternRewriter::VisitAssignment(Assignment* node) {
// <pattern> = temp === undefined ? <init> : temp;
DCHECK_EQ(Token::ASSIGN, node->op());
- // Rewriting of Assignment nodes for destructuring assignment
- // is handled in VisitRewritableExpression().
- DCHECK_NE(ASSIGNMENT, context());
-
auto initializer = node->value();
auto value = initializer;
auto temp = CreateTempVar(current_value_);
diff --git a/deps/v8/src/parsing/preparsed-scope-data-impl.h b/deps/v8/src/parsing/preparsed-scope-data-impl.h
new file mode 100644
index 0000000000..e2d31c07d5
--- /dev/null
+++ b/deps/v8/src/parsing/preparsed-scope-data-impl.h
@@ -0,0 +1,259 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSED_SCOPE_DATA_IMPL_H_
+#define V8_PARSING_PREPARSED_SCOPE_DATA_IMPL_H_
+
+#include "src/parsing/preparsed-scope-data.h"
+
+#include "src/assert-scope.h"
+
+namespace v8 {
+namespace internal {
+
+// Classes which are internal to prepared-scope-data.cc, but are exposed in
+// a header for tests.
+
+struct PreParsedScopeByteDataConstants {
+#ifdef DEBUG
+ static constexpr int kMagicValue = 0xC0DE0DE;
+
+ static constexpr size_t kUint32Size = 5;
+ static constexpr size_t kUint8Size = 2;
+ static constexpr size_t kQuarterMarker = 0;
+ static constexpr size_t kPlaceholderSize = kUint32Size;
+#else
+ static constexpr size_t kUint32Size = 4;
+ static constexpr size_t kUint8Size = 1;
+ static constexpr size_t kPlaceholderSize = 0;
+#endif
+
+ static const size_t kSkippableFunctionDataSize =
+ 4 * kUint32Size + 1 * kUint8Size;
+};
+
+class PreParsedScopeDataBuilder::ByteData
+ : public ZoneObject,
+ public PreParsedScopeByteDataConstants {
+ public:
+ explicit ByteData(Zone* zone)
+ : backing_store_(zone), free_quarters_in_last_byte_(0) {}
+
+ void WriteUint32(uint32_t data);
+ void WriteUint8(uint8_t data);
+ void WriteQuarter(uint8_t data);
+
+#ifdef DEBUG
+ // For overwriting previously written data at position 0.
+ void OverwriteFirstUint32(uint32_t data);
+#endif
+
+ Handle<PodArray<uint8_t>> Serialize(Isolate* isolate);
+
+ size_t size() const { return backing_store_.size(); }
+
+ ZoneChunkList<uint8_t>::iterator begin() { return backing_store_.begin(); }
+
+ ZoneChunkList<uint8_t>::iterator end() { return backing_store_.end(); }
+
+ private:
+ ZoneChunkList<uint8_t> backing_store_;
+ uint8_t free_quarters_in_last_byte_;
+};
+
+template <class Data>
+class BaseConsumedPreParsedScopeData : public ConsumedPreParsedScopeData {
+ public:
+ class ByteData : public PreParsedScopeByteDataConstants {
+ public:
+ ByteData()
+ : data_(nullptr), index_(0), stored_quarters_(0), stored_byte_(0) {}
+
+ // Reading from the ByteData is only allowed when a ReadingScope is on the
+ // stack. This ensures that we have a DisallowHeapAllocation in place
+ // whenever ByteData holds a raw pointer into the heap.
+ class ReadingScope {
+ public:
+ ReadingScope(ByteData* consumed_data, Data* data)
+ : consumed_data_(consumed_data) {
+ consumed_data->data_ = data;
+ }
+ explicit ReadingScope(BaseConsumedPreParsedScopeData<Data>* parent)
+ : ReadingScope(parent->scope_data_.get(), parent->GetScopeData()) {}
+ ~ReadingScope() { consumed_data_->data_ = nullptr; }
+
+ private:
+ ByteData* consumed_data_;
+ DisallowHeapAllocation no_gc;
+ };
+
+ void SetPosition(int position) { index_ = position; }
+
+ size_t RemainingBytes() const {
+ DCHECK_NOT_NULL(data_);
+ return data_->length() - index_;
+ }
+
+ int32_t ReadUint32() {
+ DCHECK_NOT_NULL(data_);
+ DCHECK_GE(RemainingBytes(), kUint32Size);
+#ifdef DEBUG
+ // Check that there indeed is an integer following.
+ DCHECK_EQ(data_->get(index_++), kUint32Size);
+#endif
+ int32_t result = 0;
+ byte* p = reinterpret_cast<byte*>(&result);
+ for (int i = 0; i < 4; ++i) {
+ *p++ = data_->get(index_++);
+ }
+ stored_quarters_ = 0;
+ return result;
+ }
+
+ uint8_t ReadUint8() {
+ DCHECK_NOT_NULL(data_);
+ DCHECK_GE(RemainingBytes(), kUint8Size);
+#ifdef DEBUG
+ // Check that there indeed is a byte following.
+ DCHECK_EQ(data_->get(index_++), kUint8Size);
+#endif
+ stored_quarters_ = 0;
+ return data_->get(index_++);
+ }
+
+ uint8_t ReadQuarter() {
+ DCHECK_NOT_NULL(data_);
+ if (stored_quarters_ == 0) {
+ DCHECK_GE(RemainingBytes(), kUint8Size);
+#ifdef DEBUG
+ // Check that there indeed are quarters following.
+ DCHECK_EQ(data_->get(index_++), kQuarterMarker);
+#endif
+ stored_byte_ = data_->get(index_++);
+ stored_quarters_ = 4;
+ }
+ // Read the first 2 bits from stored_byte_.
+ uint8_t result = (stored_byte_ >> 6) & 3;
+ DCHECK_LE(result, 3);
+ --stored_quarters_;
+ stored_byte_ <<= 2;
+ return result;
+ }
+
+ private:
+ Data* data_;
+ int index_;
+ uint8_t stored_quarters_;
+ uint8_t stored_byte_;
+ };
+
+ BaseConsumedPreParsedScopeData()
+ : scope_data_(new ByteData()), child_index_(0) {}
+
+ virtual Data* GetScopeData() = 0;
+
+ virtual ProducedPreParsedScopeData* GetChildData(Zone* zone,
+ int child_index) = 0;
+
+ ProducedPreParsedScopeData* GetDataForSkippableFunction(
+ Zone* zone, int start_position, int* end_position, int* num_parameters,
+ int* num_inner_functions, bool* uses_super_property,
+ LanguageMode* language_mode) final;
+
+ void RestoreScopeAllocationData(DeclarationScope* scope) final;
+
+#ifdef DEBUG
+ void VerifyDataStart();
+#endif
+
+ private:
+ void RestoreData(Scope* scope);
+ void RestoreDataForVariable(Variable* var);
+ void RestoreDataForInnerScopes(Scope* scope);
+
+ std::unique_ptr<ByteData> scope_data_;
+ // When consuming the data, these indexes point to the data we're going to
+ // consume next.
+ int child_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaseConsumedPreParsedScopeData);
+};
+
+// Implementation of ConsumedPreParsedScopeData for on-heap data.
+class OnHeapConsumedPreParsedScopeData final
+ : public BaseConsumedPreParsedScopeData<PodArray<uint8_t>> {
+ public:
+ OnHeapConsumedPreParsedScopeData(Isolate* isolate,
+ Handle<PreParsedScopeData> data);
+
+ PodArray<uint8_t>* GetScopeData() final;
+ ProducedPreParsedScopeData* GetChildData(Zone* zone, int child_index) final;
+
+ private:
+ Isolate* isolate_;
+ Handle<PreParsedScopeData> data_;
+};
+
+// Wraps a ZoneVector<uint8_t> to have with functions named the same as
+// PodArray<uint8_t>.
+class ZoneVectorWrapper {
+ public:
+ explicit ZoneVectorWrapper(ZoneVector<uint8_t>* data) : data_(data) {}
+
+ int length() const { return static_cast<int>(data_->size()); }
+
+ uint8_t get(int index) const { return data_->at(index); }
+
+ private:
+ ZoneVector<uint8_t>* data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ZoneVectorWrapper);
+};
+
+// A serialized PreParsedScopeData in zone memory (as apposed to being on-heap).
+class ZonePreParsedScopeData : public ZoneObject {
+ public:
+ ZonePreParsedScopeData(Zone* zone,
+ ZoneChunkList<uint8_t>::iterator byte_data_begin,
+ ZoneChunkList<uint8_t>::iterator byte_data_end,
+ int child_length);
+
+ Handle<PreParsedScopeData> Serialize(Isolate* isolate);
+
+ int child_length() const { return static_cast<int>(children_.size()); }
+
+ ZonePreParsedScopeData* get_child(int index) { return children_[index]; }
+
+ void set_child(int index, ZonePreParsedScopeData* child) {
+ children_[index] = child;
+ }
+
+ ZoneVector<uint8_t>* byte_data() { return &byte_data_; }
+
+ private:
+ ZoneVector<uint8_t> byte_data_;
+ ZoneVector<ZonePreParsedScopeData*> children_;
+
+ DISALLOW_COPY_AND_ASSIGN(ZonePreParsedScopeData);
+};
+
+// Implementation of ConsumedPreParsedScopeData for PreParsedScopeData
+// serialized into zone memory.
+class ZoneConsumedPreParsedScopeData final
+ : public BaseConsumedPreParsedScopeData<ZoneVectorWrapper> {
+ public:
+ ZoneConsumedPreParsedScopeData(Zone* zone, ZonePreParsedScopeData* data);
+
+ ZoneVectorWrapper* GetScopeData() final;
+ ProducedPreParsedScopeData* GetChildData(Zone* zone, int child_index) final;
+
+ private:
+ ZonePreParsedScopeData* data_;
+ ZoneVectorWrapper scope_data_wrapper_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PREPARSED_SCOPE_DATA_IMPL_H_
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 90e8819e32..9d61740753 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -4,11 +4,14 @@
#include "src/parsing/preparsed-scope-data.h"
+#include <vector>
+
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
#include "src/handles.h"
#include "src/objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/parsing/preparsed-scope-data-impl.h"
#include "src/parsing/preparser.h"
namespace v8 {
@@ -24,22 +27,6 @@ class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
class VariableContextAllocatedField
: public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
-
-#ifdef DEBUG
-const int kMagicValue = 0xC0DE0DE;
-
-const size_t kUint32Size = 5;
-const size_t kUint8Size = 2;
-const size_t kQuarterMarker = 0;
-const size_t kPlaceholderSize = kUint32Size;
-#else
-const size_t kUint32Size = 4;
-const size_t kUint8Size = 1;
-const size_t kPlaceholderSize = 0;
-#endif
-
-const size_t kSkippableFunctionDataSize = 4 * kUint32Size + 1 * kUint8Size;
-
class LanguageField : public BitField8<LanguageMode, 0, 1> {};
class UsesSuperField : public BitField8<bool, LanguageField::kNext, 1> {};
STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
@@ -48,7 +35,7 @@ STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
/*
- Internal data format for the backing store of ProducedPreparsedScopeData and
+ Internal data format for the backing store of PreParsedScopeDataBuilder and
PreParsedScopeData::scope_data (on the heap):
(Skippable function data:)
@@ -91,7 +78,7 @@ STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
*/
-void ProducedPreParsedScopeData::ByteData::WriteUint32(uint32_t data) {
+void PreParsedScopeDataBuilder::ByteData::WriteUint32(uint32_t data) {
#ifdef DEBUG
// Save expected item size in debug mode.
backing_store_.push_back(kUint32Size);
@@ -104,7 +91,7 @@ void ProducedPreParsedScopeData::ByteData::WriteUint32(uint32_t data) {
}
#ifdef DEBUG
-void ProducedPreParsedScopeData::ByteData::OverwriteFirstUint32(uint32_t data) {
+void PreParsedScopeDataBuilder::ByteData::OverwriteFirstUint32(uint32_t data) {
auto it = backing_store_.begin();
// Check that that position already holds an item of the expected size.
DCHECK_GE(backing_store_.size(), kUint32Size);
@@ -117,7 +104,7 @@ void ProducedPreParsedScopeData::ByteData::OverwriteFirstUint32(uint32_t data) {
}
#endif
-void ProducedPreParsedScopeData::ByteData::WriteUint8(uint8_t data) {
+void PreParsedScopeDataBuilder::ByteData::WriteUint8(uint8_t data) {
#ifdef DEBUG
// Save expected item size in debug mode.
backing_store_.push_back(kUint8Size);
@@ -126,7 +113,7 @@ void ProducedPreParsedScopeData::ByteData::WriteUint8(uint8_t data) {
free_quarters_in_last_byte_ = 0;
}
-void ProducedPreParsedScopeData::ByteData::WriteQuarter(uint8_t data) {
+void PreParsedScopeDataBuilder::ByteData::WriteQuarter(uint8_t data) {
DCHECK_LE(data, 3);
if (free_quarters_in_last_byte_ == 0) {
#ifdef DEBUG
@@ -144,7 +131,7 @@ void ProducedPreParsedScopeData::ByteData::WriteQuarter(uint8_t data) {
backing_store_.back() |= (data << shift_amount);
}
-Handle<PodArray<uint8_t>> ProducedPreParsedScopeData::ByteData::Serialize(
+Handle<PodArray<uint8_t>> PreParsedScopeDataBuilder::ByteData::Serialize(
Isolate* isolate) {
Handle<PodArray<uint8_t>> array = PodArray<uint8_t>::New(
isolate, static_cast<int>(backing_store_.size()), TENURED);
@@ -159,12 +146,13 @@ Handle<PodArray<uint8_t>> ProducedPreParsedScopeData::ByteData::Serialize(
return array;
}
-ProducedPreParsedScopeData::ProducedPreParsedScopeData(
- Zone* zone, ProducedPreParsedScopeData* parent)
+PreParsedScopeDataBuilder::PreParsedScopeDataBuilder(
+ Zone* zone, PreParsedScopeDataBuilder* parent)
: parent_(parent),
byte_data_(new (zone) ByteData(zone)),
data_for_inner_functions_(zone),
bailed_out_(false) {
+ DCHECK(FLAG_preparser_scope_analysis);
if (parent != nullptr) {
parent->data_for_inner_functions_.push_back(this);
}
@@ -174,59 +162,43 @@ ProducedPreParsedScopeData::ProducedPreParsedScopeData(
#endif
}
-// Create a ProducedPreParsedScopeData which is just a proxy for a previous
-// produced PreParsedScopeData.
-ProducedPreParsedScopeData::ProducedPreParsedScopeData(
- Handle<PreParsedScopeData> data, Zone* zone)
- : parent_(nullptr),
- byte_data_(nullptr),
- data_for_inner_functions_(zone),
- bailed_out_(false),
- previously_produced_preparsed_scope_data_(data) {}
-
-ProducedPreParsedScopeData::DataGatheringScope::DataGatheringScope(
+PreParsedScopeDataBuilder::DataGatheringScope::DataGatheringScope(
DeclarationScope* function_scope, PreParser* preparser)
: function_scope_(function_scope),
preparser_(preparser),
- produced_preparsed_scope_data_(nullptr) {
+ builder_(nullptr) {
if (FLAG_preparser_scope_analysis) {
- ProducedPreParsedScopeData* parent =
- preparser->produced_preparsed_scope_data();
+ PreParsedScopeDataBuilder* parent =
+ preparser->preparsed_scope_data_builder();
Zone* main_zone = preparser->main_zone();
- produced_preparsed_scope_data_ =
- new (main_zone) ProducedPreParsedScopeData(main_zone, parent);
- preparser->set_produced_preparsed_scope_data(
- produced_preparsed_scope_data_);
- function_scope->set_produced_preparsed_scope_data(
- produced_preparsed_scope_data_);
+ builder_ = new (main_zone) PreParsedScopeDataBuilder(main_zone, parent);
+ preparser->set_preparsed_scope_data_builder(builder_);
+ function_scope->set_preparsed_scope_data_builder(builder_);
}
}
-ProducedPreParsedScopeData::DataGatheringScope::~DataGatheringScope() {
- if (FLAG_preparser_scope_analysis) {
- preparser_->set_produced_preparsed_scope_data(
- produced_preparsed_scope_data_->parent_);
+PreParsedScopeDataBuilder::DataGatheringScope::~DataGatheringScope() {
+ if (builder_) {
+ preparser_->set_preparsed_scope_data_builder(builder_->parent_);
}
}
-void ProducedPreParsedScopeData::DataGatheringScope::MarkFunctionAsSkippable(
+void PreParsedScopeDataBuilder::DataGatheringScope::MarkFunctionAsSkippable(
int end_position, int num_inner_functions) {
- DCHECK(FLAG_preparser_scope_analysis);
- DCHECK_NOT_NULL(produced_preparsed_scope_data_);
- DCHECK_NOT_NULL(produced_preparsed_scope_data_->parent_);
- produced_preparsed_scope_data_->parent_->AddSkippableFunction(
+ DCHECK_NOT_NULL(builder_);
+ DCHECK_NOT_NULL(builder_->parent_);
+ builder_->parent_->AddSkippableFunction(
function_scope_->start_position(), end_position,
function_scope_->num_parameters(), num_inner_functions,
function_scope_->language_mode(), function_scope_->NeedsHomeObject());
}
-void ProducedPreParsedScopeData::AddSkippableFunction(
- int start_position, int end_position, int num_parameters,
- int num_inner_functions, LanguageMode language_mode,
- bool uses_super_property) {
- DCHECK(FLAG_preparser_scope_analysis);
- DCHECK(previously_produced_preparsed_scope_data_.is_null());
-
+void PreParsedScopeDataBuilder::AddSkippableFunction(int start_position,
+ int end_position,
+ int num_parameters,
+ int num_inner_functions,
+ LanguageMode language_mode,
+ bool uses_super_property) {
if (bailed_out_) {
return;
}
@@ -245,15 +217,14 @@ void ProducedPreParsedScopeData::AddSkippableFunction(
byte_data_->WriteQuarter(language_and_super);
}
-void ProducedPreParsedScopeData::SaveScopeAllocationData(
+void PreParsedScopeDataBuilder::SaveScopeAllocationData(
DeclarationScope* scope) {
- DCHECK(FLAG_preparser_scope_analysis);
- DCHECK(previously_produced_preparsed_scope_data_.is_null());
// The data contains a uint32 (reserved space for scope_data_start) and
// function data items, kSkippableFunctionDataSize each.
- DCHECK_GE(byte_data_->size(), kPlaceholderSize);
+ DCHECK_GE(byte_data_->size(), ByteData::kPlaceholderSize);
DCHECK_LE(byte_data_->size(), std::numeric_limits<uint32_t>::max());
- DCHECK_EQ(byte_data_->size() % kSkippableFunctionDataSize, kPlaceholderSize);
+ DCHECK_EQ(byte_data_->size() % ByteData::kSkippableFunctionDataSize,
+ ByteData::kPlaceholderSize);
if (bailed_out_) {
return;
@@ -262,7 +233,7 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
uint32_t scope_data_start = static_cast<uint32_t>(byte_data_->size());
// If there are no skippable inner functions, we don't need to save anything.
- if (scope_data_start == kPlaceholderSize) {
+ if (scope_data_start == ByteData::kPlaceholderSize) {
return;
}
@@ -271,7 +242,7 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
// For a data integrity check, write a value between data about skipped inner
// funcs and data about variables.
- byte_data_->WriteUint32(kMagicValue);
+ byte_data_->WriteUint32(ByteData::kMagicValue);
byte_data_->WriteUint32(scope->start_position());
byte_data_->WriteUint32(scope->end_position());
#endif
@@ -279,24 +250,19 @@ void ProducedPreParsedScopeData::SaveScopeAllocationData(
SaveDataForScope(scope);
}
-bool ProducedPreParsedScopeData::ContainsInnerFunctions() const {
- return byte_data_->size() > kPlaceholderSize;
+bool PreParsedScopeDataBuilder::ContainsInnerFunctions() const {
+ return byte_data_->size() > ByteData::kPlaceholderSize;
}
-MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
+MaybeHandle<PreParsedScopeData> PreParsedScopeDataBuilder::Serialize(
Isolate* isolate) {
- if (!previously_produced_preparsed_scope_data_.is_null()) {
- DCHECK(!bailed_out_);
- DCHECK_EQ(data_for_inner_functions_.size(), 0);
- return previously_produced_preparsed_scope_data_;
- }
if (bailed_out_) {
return MaybeHandle<PreParsedScopeData>();
}
DCHECK(!ThisOrParentBailedOut());
- if (byte_data_->size() <= kPlaceholderSize) {
+ if (byte_data_->size() <= ByteData::kPlaceholderSize) {
// The data contains only the placeholder.
return MaybeHandle<PreParsedScopeData>();
}
@@ -322,7 +288,33 @@ MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
return data;
}
-bool ProducedPreParsedScopeData::ScopeNeedsData(Scope* scope) {
+ZonePreParsedScopeData* PreParsedScopeDataBuilder::Serialize(Zone* zone) {
+ if (bailed_out_) {
+ return nullptr;
+ }
+
+ DCHECK(!ThisOrParentBailedOut());
+
+ if (byte_data_->size() <= ByteData::kPlaceholderSize) {
+ // The data contains only the placeholder.
+ return nullptr;
+ }
+
+ int child_length = static_cast<int>(data_for_inner_functions_.size());
+ ZonePreParsedScopeData* result = new (zone) ZonePreParsedScopeData(
+ zone, byte_data_->begin(), byte_data_->end(), child_length);
+
+ int i = 0;
+ for (const auto& item : data_for_inner_functions_) {
+ ZonePreParsedScopeData* child = item->Serialize(zone);
+ result->set_child(i, child);
+ i++;
+ }
+
+ return result;
+}
+
+bool PreParsedScopeDataBuilder::ScopeNeedsData(Scope* scope) {
if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
// Default constructors don't need data (they cannot contain inner functions
// defined by the user). Other functions do.
@@ -344,9 +336,9 @@ bool ProducedPreParsedScopeData::ScopeNeedsData(Scope* scope) {
return false;
}
-bool ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(Scope* scope) {
+bool PreParsedScopeDataBuilder::ScopeIsSkippableFunctionScope(Scope* scope) {
// Lazy non-arrow function scopes are skippable. Lazy functions are exactly
- // those Scopes which have their own ProducedPreParsedScopeData object. This
+ // those Scopes which have their own PreParsedScopeDataBuilder object. This
// logic ensures that the scope allocation data is consistent with the
// skippable function data (both agree on where the lazy function boundaries
// are).
@@ -355,10 +347,10 @@ bool ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(Scope* scope) {
}
DeclarationScope* declaration_scope = scope->AsDeclarationScope();
return !declaration_scope->is_arrow_scope() &&
- declaration_scope->produced_preparsed_scope_data() != nullptr;
+ declaration_scope->preparsed_scope_data_builder() != nullptr;
}
-void ProducedPreParsedScopeData::SaveDataForScope(Scope* scope) {
+void PreParsedScopeDataBuilder::SaveDataForScope(Scope* scope) {
DCHECK_NE(scope->end_position(), kNoSourcePosition);
if (!ScopeNeedsData(scope)) {
@@ -392,7 +384,7 @@ void ProducedPreParsedScopeData::SaveDataForScope(Scope* scope) {
SaveDataForInnerScopes(scope);
}
-void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
+void PreParsedScopeDataBuilder::SaveDataForVariable(Variable* var) {
#ifdef DEBUG
// Store the variable name in debug mode; this way we can check that we
// restore data to the correct variable.
@@ -410,7 +402,7 @@ void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
byte_data_->WriteQuarter(variable_data);
}
-void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
+void PreParsedScopeDataBuilder::SaveDataForInnerScopes(Scope* scope) {
// Inner scopes are stored in the reverse order, but we'd like to write the
// data in the logical order. There might be many inner scopes, so we don't
// want to recurse here.
@@ -419,9 +411,9 @@ void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
inner = inner->sibling()) {
if (ScopeIsSkippableFunctionScope(inner)) {
// Don't save data about function scopes, since they'll have their own
- // ProducedPreParsedScopeData where their data is saved.
+ // PreParsedScopeDataBuilder where their data is saved.
DCHECK_NOT_NULL(
- inner->AsDeclarationScope()->produced_preparsed_scope_data());
+ inner->AsDeclarationScope()->preparsed_scope_data_builder());
continue;
}
scopes.push_back(inner);
@@ -431,91 +423,83 @@ void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
}
}
-ConsumedPreParsedScopeData::ByteData::ReadingScope::ReadingScope(
- ConsumedPreParsedScopeData* parent)
- : ReadingScope(parent->scope_data_.get(), parent->data_->scope_data()) {}
+class BuilderProducedPreParsedScopeData final
+ : public ProducedPreParsedScopeData {
+ public:
+ explicit BuilderProducedPreParsedScopeData(PreParsedScopeDataBuilder* builder)
+ : builder_(builder) {}
-int32_t ConsumedPreParsedScopeData::ByteData::ReadUint32() {
- DCHECK_NOT_NULL(data_);
- DCHECK_GE(RemainingBytes(), kUint32Size);
-#ifdef DEBUG
- // Check that there indeed is an integer following.
- DCHECK_EQ(data_->get(index_++), kUint32Size);
-#endif
- int32_t result = 0;
- byte* p = reinterpret_cast<byte*>(&result);
- for (int i = 0; i < 4; ++i) {
- *p++ = data_->get(index_++);
+ MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) final {
+ return builder_->Serialize(isolate);
}
- stored_quarters_ = 0;
- return result;
-}
-uint8_t ConsumedPreParsedScopeData::ByteData::ReadUint8() {
- DCHECK_NOT_NULL(data_);
- DCHECK_GE(RemainingBytes(), kUint8Size);
-#ifdef DEBUG
- // Check that there indeed is a byte following.
- DCHECK_EQ(data_->get(index_++), kUint8Size);
-#endif
- stored_quarters_ = 0;
- return data_->get(index_++);
-}
+ ZonePreParsedScopeData* Serialize(Zone* zone) final {
+ return builder_->Serialize(zone);
+ };
-uint8_t ConsumedPreParsedScopeData::ByteData::ReadQuarter() {
- DCHECK_NOT_NULL(data_);
- if (stored_quarters_ == 0) {
- DCHECK_GE(RemainingBytes(), kUint8Size);
-#ifdef DEBUG
- // Check that there indeed are quarters following.
- DCHECK_EQ(data_->get(index_++), kQuarterMarker);
-#endif
- stored_byte_ = data_->get(index_++);
- stored_quarters_ = 4;
- }
- // Read the first 2 bits from stored_byte_.
- uint8_t result = (stored_byte_ >> 6) & 3;
- DCHECK_LE(result, 3);
- --stored_quarters_;
- stored_byte_ <<= 2;
- return result;
-}
+ private:
+ PreParsedScopeDataBuilder* builder_;
+};
-size_t ConsumedPreParsedScopeData::ByteData::RemainingBytes() const {
- DCHECK_NOT_NULL(data_);
- return data_->length() - index_;
-}
+class OnHeapProducedPreParsedScopeData final
+ : public ProducedPreParsedScopeData {
+ public:
+ explicit OnHeapProducedPreParsedScopeData(Handle<PreParsedScopeData> data)
+ : data_(data) {}
-ConsumedPreParsedScopeData::ConsumedPreParsedScopeData()
- : isolate_(nullptr), scope_data_(new ByteData()), child_index_(0) {}
+ MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) final {
+ return data_;
+ }
-ConsumedPreParsedScopeData::~ConsumedPreParsedScopeData() {}
+ ZonePreParsedScopeData* Serialize(Zone* zone) final {
+ // Not required.
+ UNREACHABLE();
+ };
-void ConsumedPreParsedScopeData::SetData(Isolate* isolate,
- Handle<PreParsedScopeData> data) {
- DCHECK_NOT_NULL(isolate);
- DCHECK(data->IsPreParsedScopeData());
- isolate_ = isolate;
- data_ = data;
-#ifdef DEBUG
- ByteData::ReadingScope reading_scope(this);
- int scope_data_start = scope_data_->ReadUint32();
- scope_data_->SetPosition(scope_data_start);
- DCHECK_EQ(scope_data_->ReadUint32(), kMagicValue);
- // The first data item is scope_data_start. Skip over it.
- scope_data_->SetPosition(kPlaceholderSize);
-#endif
+ private:
+ Handle<PreParsedScopeData> data_;
+};
+
+class ZoneProducedPreParsedScopeData final : public ProducedPreParsedScopeData {
+ public:
+ explicit ZoneProducedPreParsedScopeData(ZonePreParsedScopeData* data)
+ : data_(data) {}
+
+ MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) final {
+ return data_->Serialize(isolate);
+ }
+
+ ZonePreParsedScopeData* Serialize(Zone* zone) final { return data_; };
+
+ private:
+ ZonePreParsedScopeData* data_;
+};
+
+ProducedPreParsedScopeData* ProducedPreParsedScopeData::For(
+ PreParsedScopeDataBuilder* builder, Zone* zone) {
+ return new (zone) BuilderProducedPreParsedScopeData(builder);
+}
+
+ProducedPreParsedScopeData* ProducedPreParsedScopeData::For(
+ Handle<PreParsedScopeData> data, Zone* zone) {
+ return new (zone) OnHeapProducedPreParsedScopeData(data);
+}
+
+ProducedPreParsedScopeData* ProducedPreParsedScopeData::For(
+ ZonePreParsedScopeData* data, Zone* zone) {
+ return new (zone) ZoneProducedPreParsedScopeData(data);
}
+template <class Data>
ProducedPreParsedScopeData*
-ConsumedPreParsedScopeData::GetDataForSkippableFunction(
+BaseConsumedPreParsedScopeData<Data>::GetDataForSkippableFunction(
Zone* zone, int start_position, int* end_position, int* num_parameters,
int* num_inner_functions, bool* uses_super_property,
LanguageMode* language_mode) {
// The skippable function *must* be the next function in the data. Use the
// start position as a sanity check.
- ByteData::ReadingScope reading_scope(this);
- CHECK_GE(scope_data_->RemainingBytes(), kSkippableFunctionDataSize);
+ typename ByteData::ReadingScope reading_scope(this);
+ CHECK_GE(scope_data_->RemainingBytes(), ByteData::kSkippableFunctionDataSize);
int start_position_from_data = scope_data_->ReadUint32();
CHECK_EQ(start_position, start_position_from_data);
@@ -531,28 +515,19 @@ ConsumedPreParsedScopeData::GetDataForSkippableFunction(
// Retrieve the corresponding PreParsedScopeData and associate it to the
// skipped function. If the skipped functions contains inner functions, those
// can be skipped when the skipped function is eagerly parsed.
- CHECK_GT(data_->length(), child_index_);
- Object* child_data = data_->child_data(child_index_++);
- if (!child_data->IsPreParsedScopeData()) {
- return nullptr;
- }
- Handle<PreParsedScopeData> child_data_handle(
- PreParsedScopeData::cast(child_data), isolate_);
- return new (zone) ProducedPreParsedScopeData(child_data_handle, zone);
+ return GetChildData(zone, child_index_++);
}
-void ConsumedPreParsedScopeData::RestoreScopeAllocationData(
+template <class Data>
+void BaseConsumedPreParsedScopeData<Data>::RestoreScopeAllocationData(
DeclarationScope* scope) {
- DCHECK(FLAG_preparser_scope_analysis);
DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
- DCHECK(!data_.is_null());
-
- ByteData::ReadingScope reading_scope(this);
+ typename ByteData::ReadingScope reading_scope(this);
#ifdef DEBUG
int magic_value_from_data = scope_data_->ReadUint32();
// Check that we've consumed all inner function data.
- DCHECK_EQ(magic_value_from_data, kMagicValue);
+ DCHECK_EQ(magic_value_from_data, ByteData::kMagicValue);
int start_position_from_data = scope_data_->ReadUint32();
int end_position_from_data = scope_data_->ReadUint32();
@@ -566,7 +541,8 @@ void ConsumedPreParsedScopeData::RestoreScopeAllocationData(
DCHECK_EQ(scope_data_->RemainingBytes(), 0);
}
-void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
+template <typename Data>
+void BaseConsumedPreParsedScopeData<Data>::RestoreData(Scope* scope) {
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->is_skipped_function()) {
return;
@@ -575,18 +551,12 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
// It's possible that scope is not present in the data at all (since PreParser
// doesn't create the corresponding scope). In this case, the Scope won't
// contain any variables for which we need the data.
- if (!ProducedPreParsedScopeData::ScopeNeedsData(scope)) {
+ if (!PreParsedScopeDataBuilder::ScopeNeedsData(scope)) {
return;
}
- if (scope_data_->RemainingBytes() < kUint8Size) {
- // Temporary debugging code for detecting inconsistent data. Write debug
- // information on the stack, then crash.
- isolate_->PushStackTraceAndDie();
- }
-
// scope_type is stored only in debug mode.
- CHECK_GE(scope_data_->RemainingBytes(), kUint8Size);
+ CHECK_GE(scope_data_->RemainingBytes(), ByteData::kUint8Size);
DCHECK_EQ(scope_data_->ReadUint8(), scope->scope_type());
uint32_t eval = scope_data_->ReadUint8();
@@ -613,7 +583,9 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
RestoreDataForInnerScopes(scope);
}
-void ConsumedPreParsedScopeData::RestoreDataForVariable(Variable* var) {
+template <typename Data>
+void BaseConsumedPreParsedScopeData<Data>::RestoreDataForVariable(
+ Variable* var) {
#ifdef DEBUG
const AstRawString* name = var->raw_name();
bool data_one_byte = scope_data_->ReadUint8();
@@ -647,7 +619,9 @@ void ConsumedPreParsedScopeData::RestoreDataForVariable(Variable* var) {
}
}
-void ConsumedPreParsedScopeData::RestoreDataForInnerScopes(Scope* scope) {
+template <typename Data>
+void BaseConsumedPreParsedScopeData<Data>::RestoreDataForInnerScopes(
+ Scope* scope) {
std::vector<Scope*> scopes;
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
@@ -658,5 +632,106 @@ void ConsumedPreParsedScopeData::RestoreDataForInnerScopes(Scope* scope) {
}
}
+#ifdef DEBUG
+template <class Data>
+void BaseConsumedPreParsedScopeData<Data>::VerifyDataStart() {
+ typename ByteData::ReadingScope reading_scope(this);
+ int scope_data_start = scope_data_->ReadUint32();
+ scope_data_->SetPosition(scope_data_start);
+ DCHECK_EQ(scope_data_->ReadUint32(), ByteData::kMagicValue);
+ // The first data item is scope_data_start. Skip over it.
+ scope_data_->SetPosition(ByteData::kPlaceholderSize);
+}
+#endif
+
+PodArray<uint8_t>* OnHeapConsumedPreParsedScopeData::GetScopeData() {
+ return data_->scope_data();
+}
+
+ProducedPreParsedScopeData* OnHeapConsumedPreParsedScopeData::GetChildData(
+ Zone* zone, int child_index) {
+ CHECK_GT(data_->length(), child_index);
+ Object* child_data = data_->child_data(child_index);
+ if (!child_data->IsPreParsedScopeData()) {
+ return nullptr;
+ }
+ Handle<PreParsedScopeData> child_data_handle(
+ PreParsedScopeData::cast(child_data), isolate_);
+ return ProducedPreParsedScopeData::For(child_data_handle, zone);
+}
+
+OnHeapConsumedPreParsedScopeData::OnHeapConsumedPreParsedScopeData(
+ Isolate* isolate, Handle<PreParsedScopeData> data)
+ : BaseConsumedPreParsedScopeData<PodArray<uint8_t>>(),
+ isolate_(isolate),
+ data_(data) {
+ DCHECK_NOT_NULL(isolate);
+ DCHECK(data->IsPreParsedScopeData());
+#ifdef DEBUG
+ VerifyDataStart();
+#endif
+}
+
+ZonePreParsedScopeData::ZonePreParsedScopeData(
+ Zone* zone, ZoneChunkList<uint8_t>::iterator byte_data_begin,
+ ZoneChunkList<uint8_t>::iterator byte_data_end, int child_length)
+ : byte_data_(byte_data_begin, byte_data_end, zone),
+ children_(child_length, zone) {}
+
+Handle<PreParsedScopeData> ZonePreParsedScopeData::Serialize(Isolate* isolate) {
+ int child_data_length = child_length();
+ Handle<PreParsedScopeData> result =
+ isolate->factory()->NewPreParsedScopeData(child_data_length);
+
+ Handle<PodArray<uint8_t>> scope_data_array = PodArray<uint8_t>::New(
+ isolate, static_cast<int>(byte_data()->size()), TENURED);
+ scope_data_array->copy_in(0, byte_data()->data(),
+ static_cast<int>(byte_data()->size()));
+ result->set_scope_data(*scope_data_array);
+
+ for (int i = 0; i < child_data_length; i++) {
+ ZonePreParsedScopeData* child = get_child(i);
+ if (child) {
+ Handle<PreParsedScopeData> child_data = child->Serialize(isolate);
+ result->set_child_data(i, *child_data);
+ }
+ }
+ return result;
+}
+
+ZoneConsumedPreParsedScopeData::ZoneConsumedPreParsedScopeData(
+ Zone* zone, ZonePreParsedScopeData* data)
+ : data_(data), scope_data_wrapper_(data_->byte_data()) {
+#ifdef DEBUG
+ VerifyDataStart();
+#endif
+}
+
+ZoneVectorWrapper* ZoneConsumedPreParsedScopeData::GetScopeData() {
+ return &scope_data_wrapper_;
+}
+
+ProducedPreParsedScopeData* ZoneConsumedPreParsedScopeData::GetChildData(
+ Zone* zone, int child_index) {
+ CHECK_GT(data_->child_length(), child_index);
+ ZonePreParsedScopeData* child_data = data_->get_child(child_index);
+ if (child_data == nullptr) {
+ return nullptr;
+ }
+ return ProducedPreParsedScopeData::For(child_data, zone);
+}
+
+std::unique_ptr<ConsumedPreParsedScopeData> ConsumedPreParsedScopeData::For(
+ Isolate* isolate, Handle<PreParsedScopeData> data) {
+ DCHECK(!data.is_null());
+ return base::make_unique<OnHeapConsumedPreParsedScopeData>(isolate, data);
+}
+
+std::unique_ptr<ConsumedPreParsedScopeData> ConsumedPreParsedScopeData::For(
+ Zone* zone, ZonePreParsedScopeData* data) {
+ if (data == nullptr) return {};
+ return base::make_unique<ZoneConsumedPreParsedScopeData>(zone, data);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
index 61d67291a4..25298c4331 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -5,23 +5,21 @@
#ifndef V8_PARSING_PREPARSED_SCOPE_DATA_H_
#define V8_PARSING_PREPARSED_SCOPE_DATA_H_
-#include <set>
-#include <unordered_map>
-#include <vector>
-
#include "src/globals.h"
#include "src/handles.h"
-#include "src/objects/shared-function-info.h"
+#include "src/maybe-handles.h"
#include "src/zone/zone-chunk-list.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
template <typename T>
-class Handle;
+class PodArray;
class PreParser;
class PreParsedScopeData;
+class ZonePreParsedScopeData;
/*
@@ -64,40 +62,15 @@ class PreParsedScopeData;
*/
-class ProducedPreParsedScopeData : public ZoneObject {
+class PreParsedScopeDataBuilder : public ZoneObject {
public:
- class ByteData : public ZoneObject {
- public:
- explicit ByteData(Zone* zone)
- : backing_store_(zone), free_quarters_in_last_byte_(0) {}
-
- void WriteUint32(uint32_t data);
- void WriteUint8(uint8_t data);
- void WriteQuarter(uint8_t data);
-
-#ifdef DEBUG
- // For overwriting previously written data at position 0.
- void OverwriteFirstUint32(uint32_t data);
-#endif
-
- Handle<PodArray<uint8_t>> Serialize(Isolate* isolate);
-
- size_t size() const { return backing_store_.size(); }
+ class ByteData;
- private:
- ZoneChunkList<uint8_t> backing_store_;
- uint8_t free_quarters_in_last_byte_;
- };
-
- // Create a ProducedPreParsedScopeData object which will collect data as we
+ // Create a PreParsedScopeDataBuilder object which will collect data as we
// parse.
- ProducedPreParsedScopeData(Zone* zone, ProducedPreParsedScopeData* parent);
-
- // Create a ProducedPreParsedScopeData which is just a proxy for a previous
- // produced PreParsedScopeData.
- ProducedPreParsedScopeData(Handle<PreParsedScopeData> data, Zone* zone);
+ PreParsedScopeDataBuilder(Zone* zone, PreParsedScopeDataBuilder* parent);
- ProducedPreParsedScopeData* parent() const { return parent_; }
+ PreParsedScopeDataBuilder* parent() const { return parent_; }
// For gathering the inner function data and splitting it up according to the
// laziness boundaries. Each lazy function gets its own
@@ -112,7 +85,7 @@ class ProducedPreParsedScopeData : public ZoneObject {
private:
DeclarationScope* function_scope_;
PreParser* preparser_;
- ProducedPreParsedScopeData* produced_preparsed_scope_data_;
+ PreParsedScopeDataBuilder* builder_;
DISALLOW_COPY_AND_ASSIGN(DataGatheringScope);
};
@@ -148,15 +121,15 @@ class ProducedPreParsedScopeData : public ZoneObject {
bool ContainsInnerFunctions() const;
- // If there is data (if the Scope contains skippable inner functions), move
- // the data into the heap and return a Handle to it; otherwise return a null
- // MaybeHandle.
- MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate);
-
static bool ScopeNeedsData(Scope* scope);
static bool ScopeIsSkippableFunctionScope(Scope* scope);
private:
+ friend class BuilderProducedPreParsedScopeData;
+
+ virtual MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate);
+ virtual ZonePreParsedScopeData* Serialize(Zone* zone);
+
void AddSkippableFunction(int start_position, int end_position,
int num_parameters, int num_inner_functions,
LanguageMode language_mode,
@@ -166,88 +139,72 @@ class ProducedPreParsedScopeData : public ZoneObject {
void SaveDataForVariable(Variable* var);
void SaveDataForInnerScopes(Scope* scope);
- ProducedPreParsedScopeData* parent_;
+ PreParsedScopeDataBuilder* parent_;
ByteData* byte_data_;
- ZoneChunkList<ProducedPreParsedScopeData*> data_for_inner_functions_;
+ ZoneChunkList<PreParsedScopeDataBuilder*> data_for_inner_functions_;
// Whether we've given up producing the data for this function.
bool bailed_out_;
- // ProducedPreParsedScopeData can also mask a Handle<PreParsedScopeData>
- // which was produced already earlier. This happens for deeper lazy functions.
- Handle<PreParsedScopeData> previously_produced_preparsed_scope_data_;
+ DISALLOW_COPY_AND_ASSIGN(PreParsedScopeDataBuilder);
+};
- DISALLOW_COPY_AND_ASSIGN(ProducedPreParsedScopeData);
+class ProducedPreParsedScopeData : public ZoneObject {
+ public:
+ // If there is data (if the Scope contains skippable inner functions), move
+ // the data into the heap and return a Handle to it; otherwise return a null
+ // MaybeHandle.
+ virtual MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) = 0;
+
+ // If there is data (if the Scope contains skippable inner functions), return
+ // an off-heap ZonePreParsedScopeData representing the data; otherwise
+ // return nullptr.
+ virtual ZonePreParsedScopeData* Serialize(Zone* zone) = 0;
+
+ // Create a ProducedPreParsedScopeData which is a proxy for a previous
+ // produced PreParsedScopeData in zone.
+ static ProducedPreParsedScopeData* For(PreParsedScopeDataBuilder* builder,
+ Zone* zone);
+
+ // Create a ProducedPreParsedScopeData which is a proxy for a previous
+ // produced PreParsedScopeData on the heap.
+ static ProducedPreParsedScopeData* For(Handle<PreParsedScopeData> data,
+ Zone* zone);
+
+ // Create a ProducedPreParsedScopeData which is a proxy for a previous
+ // produced PreParsedScopeData in zone.
+ static ProducedPreParsedScopeData* For(ZonePreParsedScopeData* data,
+ Zone* zone);
};
class ConsumedPreParsedScopeData {
public:
- class ByteData {
- public:
- ByteData()
- : data_(nullptr), index_(0), stored_quarters_(0), stored_byte_(0) {}
-
- // Reading from the ByteData is only allowed when a ReadingScope is on the
- // stack. This ensures that we have a DisallowHeapAllocation in place
- // whenever ByteData holds a raw pointer into the heap.
- class ReadingScope {
- public:
- ReadingScope(ByteData* consumed_data, PodArray<uint8_t>* data)
- : consumed_data_(consumed_data) {
- consumed_data->data_ = data;
- }
- explicit ReadingScope(ConsumedPreParsedScopeData* parent);
- ~ReadingScope() { consumed_data_->data_ = nullptr; }
-
- private:
- ByteData* consumed_data_;
- DisallowHeapAllocation no_gc;
- };
-
- void SetPosition(int position) { index_ = position; }
-
- int32_t ReadUint32();
- uint8_t ReadUint8();
- uint8_t ReadQuarter();
-
- size_t RemainingBytes() const;
-
- // private:
- PodArray<uint8_t>* data_;
- int index_;
- uint8_t stored_quarters_;
- uint8_t stored_byte_;
- };
-
- ConsumedPreParsedScopeData();
- ~ConsumedPreParsedScopeData();
+ // Creates a ConsumedPreParsedScopeData representing the data of an on-heap
+ // PreParsedScopeData |data|.
+ static std::unique_ptr<ConsumedPreParsedScopeData> For(
+ Isolate* isolate, Handle<PreParsedScopeData> data);
- void SetData(Isolate* isolate, Handle<PreParsedScopeData> data);
+ // Creates a ConsumedPreParsedScopeData representing the data of an off-heap
+ // ZonePreParsedScopeData |data|.
+ static std::unique_ptr<ConsumedPreParsedScopeData> For(
+ Zone* zone, ZonePreParsedScopeData* data);
- bool HasData() const { return !data_.is_null(); }
+ virtual ~ConsumedPreParsedScopeData() = default;
- ProducedPreParsedScopeData* GetDataForSkippableFunction(
+ virtual ProducedPreParsedScopeData* GetDataForSkippableFunction(
Zone* zone, int start_position, int* end_position, int* num_parameters,
int* num_inner_functions, bool* uses_super_property,
- LanguageMode* language_mode);
+ LanguageMode* language_mode) = 0;
// Restores the information needed for allocating the Scope's (and its
// subscopes') variables.
- void RestoreScopeAllocationData(DeclarationScope* scope);
+ virtual void RestoreScopeAllocationData(DeclarationScope* scope) = 0;
- private:
- void RestoreData(Scope* scope);
- void RestoreDataForVariable(Variable* var);
- void RestoreDataForInnerScopes(Scope* scope);
-
- Isolate* isolate_;
- Handle<PreParsedScopeData> data_;
- std::unique_ptr<ByteData> scope_data_;
- // When consuming the data, these indexes point to the data we're going to
- // consume next.
- int child_index_;
+ protected:
+ ConsumedPreParsedScopeData() = default;
+ private:
DISALLOW_COPY_AND_ASSIGN(ConsumedPreParsedScopeData);
};
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index d449c8d76b..0e74014542 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -123,22 +123,23 @@ PreParser::PreParseResult PreParser::PreParseFunction(
int script_id) {
DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
use_counts_ = use_counts;
- DCHECK(!track_unresolved_variables_);
- track_unresolved_variables_ = is_inner_function;
set_script_id(script_id);
#ifdef DEBUG
function_scope->set_is_being_lazily_parsed(true);
#endif
+ track_unresolved_variables_ =
+ ShouldTrackUnresolvedVariables(is_inner_function);
+
// Start collecting data for a new function which might contain skippable
// functions.
- std::unique_ptr<ProducedPreParsedScopeData::DataGatheringScope>
- produced_preparsed_scope_data_scope;
+ std::unique_ptr<PreParsedScopeDataBuilder::DataGatheringScope>
+ preparsed_scope_data_builder_scope;
if (FLAG_preparser_scope_analysis && !IsArrowFunction(kind)) {
- track_unresolved_variables_ = true;
- produced_preparsed_scope_data_scope.reset(
- new ProducedPreParsedScopeData::DataGatheringScope(function_scope,
- this));
+ DCHECK(track_unresolved_variables_);
+ preparsed_scope_data_builder_scope.reset(
+ new PreParsedScopeDataBuilder::DataGatheringScope(function_scope,
+ this));
}
// In the preparser, we use the function literal ids to count how many
@@ -166,7 +167,11 @@ PreParser::PreParseResult PreParser::PreParseFunction(
formals_classifier.reset(new ExpressionClassifier(this, &duplicate_finder));
// We return kPreParseSuccess in failure cases too - errors are retrieved
// separately by Parser::SkipLazyFunctionBody.
- ParseFormalParameterList(&formals, CHECK_OK_VALUE(kPreParseSuccess));
+ ParseFormalParameterList(
+ &formals,
+ CHECK_OK_VALUE(pending_error_handler()->ErrorUnidentifiableByPreParser()
+ ? kPreParseNotIdentifiableError
+ : kPreParseSuccess));
Expect(Token::RPAREN, CHECK_OK_VALUE(kPreParseSuccess));
int formals_end_position = scanner()->location().end_pos;
@@ -205,27 +210,22 @@ PreParser::PreParseResult PreParser::PreParseFunction(
}
}
- if (!IsArrowFunction(kind) && track_unresolved_variables_ &&
- result == kLazyParsingComplete) {
- // Declare arguments after parsing the function since lexical 'arguments'
- // masks the arguments object. Declare arguments before declaring the
- // function var since the arguments object masks 'function arguments'.
- function_scope->DeclareArguments(ast_value_factory());
-
- DeclareFunctionNameVar(function_name, function_type, function_scope);
- }
-
use_counts_ = nullptr;
- track_unresolved_variables_ = false;
if (result == kLazyParsingAborted) {
+ DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
return kPreParseAbort;
} else if (stack_overflow()) {
+ DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
return kPreParseStackOverflow;
+ } else if (pending_error_handler()->ErrorUnidentifiableByPreParser()) {
+ DCHECK(!*ok);
+ return kPreParseNotIdentifiableError;
} else if (!*ok) {
DCHECK(pending_error_handler()->has_pending_error());
} else {
DCHECK_EQ(Token::RBRACE, scanner()->peek());
+ DCHECK(result == kLazyParsingComplete);
if (!IsArrowFunction(kind)) {
// Validate parameter names. We can do this only after parsing the
@@ -234,17 +234,37 @@ PreParser::PreParseResult PreParser::PreParseFunction(
is_sloppy(function_scope->language_mode()) && formals.is_simple &&
!IsConciseMethod(kind);
ValidateFormalParameters(function_scope->language_mode(),
- allow_duplicate_parameters,
- CHECK_OK_VALUE(kPreParseSuccess));
+ allow_duplicate_parameters, ok);
+ if (!*ok) {
+ if (pending_error_handler()->ErrorUnidentifiableByPreParser()) {
+ return kPreParseNotIdentifiableError;
+ } else {
+ return kPreParseSuccess;
+ }
+ }
+
+ if (track_unresolved_variables_) {
+ // Declare arguments after parsing the function since lexical
+ // 'arguments' masks the arguments object. Declare arguments before
+ // declaring the function var since the arguments object masks 'function
+ // arguments'.
+ function_scope->DeclareArguments(ast_value_factory());
- *produced_preparsed_scope_data = produced_preparsed_scope_data_;
+ DeclareFunctionNameVar(function_name, function_type, function_scope);
+ }
+
+ *produced_preparsed_scope_data = ProducedPreParsedScopeData::For(
+ preparsed_scope_data_builder_, main_zone());
}
+ DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
if (is_strict(function_scope->language_mode())) {
int end_pos = scanner()->location().end_pos;
CheckStrictOctalLiteral(function_scope->start_position(), end_pos, ok);
}
}
+
+ DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
return kPreParseSuccess;
}
@@ -290,15 +310,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Start collecting data for a new function which might contain skippable
// functions.
- std::unique_ptr<ProducedPreParsedScopeData::DataGatheringScope>
- produced_preparsed_scope_data_scope;
+ std::unique_ptr<PreParsedScopeDataBuilder::DataGatheringScope>
+ preparsed_scope_data_builder_scope;
if (!function_state_->next_function_is_likely_called() &&
- produced_preparsed_scope_data_ != nullptr) {
+ preparsed_scope_data_builder_ != nullptr) {
DCHECK(FLAG_preparser_scope_analysis);
DCHECK(track_unresolved_variables_);
- produced_preparsed_scope_data_scope.reset(
- new ProducedPreParsedScopeData::DataGatheringScope(function_scope,
- this));
+ preparsed_scope_data_builder_scope.reset(
+ new PreParsedScopeDataBuilder::DataGatheringScope(function_scope,
+ this));
}
FunctionState function_state(&function_state_, &scope_, function_scope);
@@ -324,7 +344,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
int pos = function_token_pos == kNoSourcePosition ? peek_position()
: function_token_pos;
ParseFunctionBody(body, function_name, pos, formals, kind, function_type,
- CHECK_OK);
+ FunctionBodyType::kBlock, true, CHECK_OK);
// Parsing the body may change the language mode in our scope.
language_mode = function_scope->language_mode();
@@ -346,8 +366,8 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
- if (produced_preparsed_scope_data_scope) {
- produced_preparsed_scope_data_scope->MarkFunctionAsSkippable(
+ if (preparsed_scope_data_builder_scope) {
+ preparsed_scope_data_builder_scope->MarkFunctionAsSkippable(
end_position, GetLastFunctionLiteralId() - func_id);
}
if (V8_UNLIKELY(FLAG_log_function_events)) {
@@ -394,19 +414,19 @@ PreParserStatement PreParser::BuildParameterInitializationBlock(
DCHECK(scope()->is_function_scope());
if (FLAG_preparser_scope_analysis &&
scope()->AsDeclarationScope()->calls_sloppy_eval() &&
- produced_preparsed_scope_data_ != nullptr) {
+ preparsed_scope_data_builder_ != nullptr) {
// We cannot replicate the Scope structure constructed by the Parser,
// because we've lost information whether each individual parameter was
// simple or not. Give up trying to produce data to skip inner functions.
- if (produced_preparsed_scope_data_->parent() != nullptr) {
+ if (preparsed_scope_data_builder_->parent() != nullptr) {
// Lazy parsing started before the current function; the function which
// cannot contain skippable functions is the parent function. (Its inner
// functions cannot either; they are implicitly bailed out.)
- produced_preparsed_scope_data_->parent()->Bailout();
+ preparsed_scope_data_builder_->parent()->Bailout();
} else {
// Lazy parsing started at the current function; it cannot contain
// skippable functions.
- produced_preparsed_scope_data_->Bailout();
+ preparsed_scope_data_builder_->Bailout();
}
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 10c42fa940..65509a2029 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -10,7 +10,6 @@
#include "src/parsing/parser-base.h"
#include "src/parsing/preparser-logger.h"
#include "src/pending-compilation-error-handler.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -23,7 +22,7 @@ namespace internal {
// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
// used.
-class ProducedPreParsedScopeData;
+class PreParsedScopeDataBuilder;
class PreParserIdentifier {
public:
@@ -87,16 +86,18 @@ class PreParserIdentifier {
friend class PreParserFactory;
};
-
class PreParserExpression {
public:
+ using VariableZoneThreadedListType =
+ ZoneThreadedList<VariableProxy, VariableProxy::PreParserNext>;
+
PreParserExpression()
: code_(TypeField::encode(kNull)), variables_(nullptr) {}
static PreParserExpression Null() { return PreParserExpression(); }
static PreParserExpression Default(
- ZonePtrList<VariableProxy>* variables = nullptr) {
+ VariableZoneThreadedListType* variables = nullptr) {
return PreParserExpression(TypeField::encode(kExpression), variables);
}
@@ -125,9 +126,7 @@ class PreParserExpression {
right.variables_);
}
if (right.variables_ != nullptr) {
- for (auto variable : *right.variables_) {
- left.variables_->Add(variable, zone);
- }
+ left.variables_->Append(std::move(*right.variables_));
}
return PreParserExpression(TypeField::encode(kExpression),
left.variables_);
@@ -135,7 +134,8 @@ class PreParserExpression {
return PreParserExpression(TypeField::encode(kExpression));
}
- static PreParserExpression Assignment(ZonePtrList<VariableProxy>* variables) {
+ static PreParserExpression Assignment(
+ VariableZoneThreadedListType* variables) {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kAssignment),
variables);
@@ -146,13 +146,13 @@ class PreParserExpression {
}
static PreParserExpression ObjectLiteral(
- ZonePtrList<VariableProxy>* variables) {
+ VariableZoneThreadedListType* variables) {
return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
variables);
}
static PreParserExpression ArrayLiteral(
- ZonePtrList<VariableProxy>* variables) {
+ VariableZoneThreadedListType* variables) {
return PreParserExpression(TypeField::encode(kArrayLiteralExpression),
variables);
}
@@ -171,10 +171,9 @@ class PreParserExpression {
IsUseAsmField::encode(true));
}
- static PreParserExpression This(ZonePtrList<VariableProxy>* variables) {
+ static PreParserExpression This() {
return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kThisExpression),
- variables);
+ ExpressionTypeField::encode(kThisExpression));
}
static PreParserExpression ThisPropertyWithPrivateFieldKey() {
@@ -336,7 +335,7 @@ class PreParserExpression {
if (variables_ != nullptr) {
DCHECK(IsIdentifier());
DCHECK(AsIdentifier().IsPrivateName());
- DCHECK_EQ(1, variables_->length());
+ DCHECK_EQ(1, variables_->LengthForTest());
variables_->first()->set_is_private_field();
}
}
@@ -374,8 +373,9 @@ class PreParserExpression {
kAssignment
};
- explicit PreParserExpression(uint32_t expression_code,
- ZonePtrList<VariableProxy>* variables = nullptr)
+ explicit PreParserExpression(
+ uint32_t expression_code,
+ VariableZoneThreadedListType* variables = nullptr)
: code_(expression_code), variables_(variables) {}
void AddVariable(VariableProxy* variable, Zone* zone) {
@@ -383,9 +383,9 @@ class PreParserExpression {
return;
}
if (variables_ == nullptr) {
- variables_ = new (zone) ZonePtrList<VariableProxy>(1, zone);
+ variables_ = new (zone) VariableZoneThreadedListType();
}
- variables_->Add(variable, zone);
+ variables_->Add(variable);
}
// The first three bits are for the Type.
@@ -410,64 +410,65 @@ class PreParserExpression {
uint32_t code_;
// If the PreParser is used in the variable tracking mode, PreParserExpression
// accumulates variables in that expression.
- ZonePtrList<VariableProxy>* variables_;
+ VariableZoneThreadedListType* variables_;
friend class PreParser;
friend class PreParserFactory;
- template <typename T>
- friend class PreParserList;
+ friend class PreParserExpressionList;
};
// The pre-parser doesn't need to build lists of expressions, identifiers, or
// the like. If the PreParser is used in variable tracking mode, it needs to
// build lists of variables though.
-template <typename T>
-class PreParserList {
+class PreParserExpressionList {
+ using VariableZoneThreadedListType =
+ ZoneThreadedList<VariableProxy, VariableProxy::PreParserNext>;
+
public:
// These functions make list->Add(some_expression) work (and do nothing).
- PreParserList() : length_(0), variables_(nullptr) {}
- PreParserList* operator->() { return this; }
- void Add(const T& element, Zone* zone);
+ PreParserExpressionList() : PreParserExpressionList(0) {}
+ PreParserExpressionList* operator->() { return this; }
+ void Add(const PreParserExpression& expression, Zone* zone) {
+ if (expression.variables_ != nullptr) {
+ DCHECK(FLAG_lazy_inner_functions);
+ DCHECK_NOT_NULL(zone);
+ if (variables_ == nullptr) {
+ variables_ = new (zone) VariableZoneThreadedListType();
+ }
+ variables_->Append(std::move(*expression.variables_));
+ }
+ ++length_;
+ }
int length() const { return length_; }
- static PreParserList Null() { return PreParserList(-1); }
+ static PreParserExpressionList Null() { return PreParserExpressionList(-1); }
bool IsNull() const { return length_ == -1; }
- void Set(int index, const T& element) {}
+ void Set(int index, const PreParserExpression& element) {}
private:
- explicit PreParserList(int n) : length_(n), variables_(nullptr) {}
+ explicit PreParserExpressionList(int n) : length_(n), variables_(nullptr) {}
int length_;
- ZonePtrList<VariableProxy>* variables_;
+
+ VariableZoneThreadedListType* variables_;
friend class PreParser;
friend class PreParserFactory;
};
-template <>
-inline void PreParserList<PreParserExpression>::Add(
- const PreParserExpression& expression, Zone* zone) {
- if (expression.variables_ != nullptr) {
- DCHECK(FLAG_lazy_inner_functions);
- DCHECK_NOT_NULL(zone);
- if (variables_ == nullptr) {
- variables_ = new (zone) ZonePtrList<VariableProxy>(1, zone);
- }
- for (auto identifier : (*expression.variables_)) {
- variables_->Add(identifier, zone);
- }
- }
- ++length_;
-}
-
-template <typename T>
-void PreParserList<T>::Add(const T& element, Zone* zone) {
- ++length_;
-}
+class PreParserStatement;
-typedef PreParserList<PreParserExpression> PreParserExpressionList;
+class PreParserStatementList {
+ public:
+ PreParserStatementList() : PreParserStatementList(false) {}
+ PreParserStatementList* operator->() { return this; }
+ void Add(const PreParserStatement& element, Zone* zone) {}
+ static PreParserStatementList Null() { return PreParserStatementList(true); }
+ bool IsNull() const { return is_null_; }
-class PreParserStatement;
-typedef PreParserList<PreParserStatement> PreParserStatementList;
+ private:
+ explicit PreParserStatementList(bool is_null) : is_null_(is_null) {}
+ bool is_null_;
+};
class PreParserStatement {
public:
@@ -530,8 +531,6 @@ class PreParserStatement {
// and PreParser.
PreParserStatement* operator->() { return this; }
- // TODO(adamk): These should return something even lighter-weight than
- // PreParserStatementList.
PreParserStatementList statements() { return PreParserStatementList(); }
PreParserStatementList cases() { return PreParserStatementList(); }
@@ -563,11 +562,6 @@ class PreParserFactory {
explicit PreParserFactory(AstValueFactory* ast_value_factory, Zone* zone)
: ast_node_factory_(ast_value_factory, zone), zone_(zone) {}
- void set_zone(Zone* zone) {
- ast_node_factory_.set_zone(zone);
- zone_ = zone;
- }
-
AstNodeFactory* ast_node_factory() { return &ast_node_factory_; }
PreParserExpression NewStringLiteral(const PreParserIdentifier& identifier,
@@ -852,19 +846,22 @@ class PreParserFactory {
struct PreParserFormalParameters : FormalParametersBase {
struct Parameter : public ZoneObject {
- Parameter(ZonePtrList<VariableProxy>* variables, bool is_rest)
+ using VariableZoneThreadedListType =
+ ZoneThreadedList<VariableProxy, VariableProxy::PreParserNext>;
+
+ Parameter(VariableZoneThreadedListType* variables, bool is_rest)
: variables_(variables), is_rest(is_rest) {}
Parameter** next() { return &next_parameter; }
Parameter* const* next() const { return &next_parameter; }
- ZonePtrList<VariableProxy>* variables_;
+ VariableZoneThreadedListType* variables_;
Parameter* next_parameter = nullptr;
bool is_rest : 1;
};
explicit PreParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
- ThreadedList<Parameter> params;
+ base::ThreadedList<Parameter> params;
};
@@ -881,6 +878,48 @@ class PreParserTargetScope {
explicit PreParserTargetScope(ParserBase<PreParser>* preparser) {}
};
+class PreParserFuncNameInferrer {
+ public:
+ PreParserFuncNameInferrer(AstValueFactory* avf, Zone* zone) {}
+ void RemoveAsyncKeywordFromEnd() const {}
+ void Infer() const {}
+ void RemoveLastFunction() const {}
+
+ class State {
+ public:
+ explicit State(PreParserFuncNameInferrer* fni) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(State);
+ };
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PreParserFuncNameInferrer);
+};
+
+class PreParserSourceRange {
+ public:
+ PreParserSourceRange() {}
+ PreParserSourceRange(int start, int end) {}
+ static PreParserSourceRange Empty() { return PreParserSourceRange(); }
+ static PreParserSourceRange OpenEnded(int32_t start) { return Empty(); }
+ static const PreParserSourceRange& ContinuationOf(
+ const PreParserSourceRange& that, int end) {
+ return that;
+ }
+};
+
+class PreParserSourceRangeScope {
+ public:
+ PreParserSourceRangeScope(Scanner* scanner, PreParserSourceRange* range) {}
+ const PreParserSourceRange& Finalize() const { return range_; }
+
+ private:
+ PreParserSourceRange range_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PreParserSourceRangeScope);
+};
+
template <>
struct ParserTypes<PreParser> {
typedef ParserBase<PreParser> Base;
@@ -910,6 +949,10 @@ struct ParserTypes<PreParser> {
typedef PreParserTarget Target;
typedef PreParserTargetScope TargetScope;
+ typedef PreParserFuncNameInferrer FuncNameInferrer;
+ typedef PreParserSourceRange SourceRange;
+ typedef PreParserSourceRangeScope SourceRangeScope;
+ static constexpr bool ExpressionClassifierReportErrors = false;
};
@@ -937,6 +980,7 @@ class PreParser : public ParserBase<PreParser> {
enum PreParseResult {
kPreParseStackOverflow,
kPreParseAbort,
+ kPreParseNotIdentifiableError,
kPreParseSuccess
};
@@ -952,7 +996,7 @@ class PreParser : public ParserBase<PreParser> {
parsing_module, parsing_on_main_thread),
use_counts_(nullptr),
track_unresolved_variables_(false),
- produced_preparsed_scope_data_(nullptr) {}
+ preparsed_scope_data_builder_(nullptr) {}
static bool IsPreParser() { return true; }
@@ -980,13 +1024,17 @@ class PreParser : public ParserBase<PreParser> {
ProducedPreParsedScopeData** produced_preparser_scope_data,
int script_id);
- ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
- return produced_preparsed_scope_data_;
+ V8_INLINE static bool ShouldTrackUnresolvedVariables(bool is_inner_function) {
+ return FLAG_preparser_scope_analysis || is_inner_function;
+ }
+
+ PreParsedScopeDataBuilder* preparsed_scope_data_builder() const {
+ return preparsed_scope_data_builder_;
}
- void set_produced_preparsed_scope_data(
- ProducedPreParsedScopeData* produced_preparsed_scope_data) {
- produced_preparsed_scope_data_ = produced_preparsed_scope_data;
+ void set_preparsed_scope_data_builder(
+ PreParsedScopeDataBuilder* preparsed_scope_data_builder) {
+ preparsed_scope_data_builder_ = preparsed_scope_data_builder;
}
private:
@@ -1009,12 +1057,13 @@ class PreParser : public ParserBase<PreParser> {
return pending_error_handler_;
}
- V8_INLINE LazyParsingResult
- SkipFunction(const AstRawString* name, FunctionKind kind,
- FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, int* num_parameters,
- ProducedPreParsedScopeData** produced_preparsed_scope_data,
- bool is_inner_function, bool may_abort, bool* ok) {
+ V8_INLINE bool SkipFunction(
+ const AstRawString* name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters,
+ ProducedPreParsedScopeData** produced_preparsed_scope_data,
+ bool is_inner_function, bool may_abort,
+ FunctionLiteral::EagerCompileHint* hint, bool* ok) {
UNREACHABLE();
}
@@ -1509,6 +1558,10 @@ class PreParser : public ParserBase<PreParser> {
arg, error_type);
}
+ V8_INLINE void ReportUnidentifiableError() {
+ pending_error_handler()->SetUnidentifiableError();
+ }
+
V8_INLINE void ReportMessageAt(Scanner::Location source_location,
MessageTemplate::Template message,
const PreParserIdentifier& arg,
@@ -1558,16 +1611,12 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
- ZonePtrList<VariableProxy>* variables = nullptr;
if (track_unresolved_variables_) {
- VariableProxy* proxy = scope()->NewUnresolved(
- factory()->ast_node_factory(), ast_value_factory()->this_string(),
- pos, THIS_VARIABLE);
-
- variables = new (zone()) ZonePtrList<VariableProxy>(1, zone());
- variables->Add(proxy, zone());
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_string(), pos,
+ THIS_VARIABLE);
}
- return PreParserExpression::This(variables);
+ return PreParserExpression::This();
}
V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
@@ -1648,14 +1697,6 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Jump();
}
- V8_INLINE void AddParameterInitializationBlock(
- const PreParserFormalParameters& parameters, PreParserStatementList body,
- bool is_async, bool* ok) {
- if (!parameters.is_simple) {
- BuildParameterInitializationBlock(parameters, ok);
- }
- }
-
V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
const PreParserExpression& pattern,
const PreParserExpression& initializer,
@@ -1671,14 +1712,15 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void DeclareFormalParameters(
DeclarationScope* scope,
- const ThreadedList<PreParserFormalParameters::Parameter>& parameters,
+ const base::ThreadedList<PreParserFormalParameters::Parameter>&
+ parameters,
bool is_simple) {
if (!is_simple) scope->SetHasNonSimpleParameters();
if (track_unresolved_variables_) {
DCHECK(FLAG_lazy_inner_functions);
for (auto parameter : parameters) {
DCHECK_IMPLIES(is_simple, parameter->variables_ != nullptr);
- DCHECK_IMPLIES(is_simple, parameter->variables_->length() == 1);
+ DCHECK_IMPLIES(is_simple, parameter->variables_->LengthForTest() == 1);
// Make sure each parameter is added only once even if it's a
// destructuring parameter which contains multiple names.
bool add_parameter = true;
@@ -1733,7 +1775,7 @@ class PreParser : public ParserBase<PreParser> {
const PreParserExpression& value, const PreParserExpression& identifier) {
}
- V8_INLINE ZoneVector<typename ExpressionClassifier::Error>*
+ V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
GetReportedErrorList() const {
return function_state_->GetReportedErrorList();
}
@@ -1758,7 +1800,7 @@ class PreParser : public ParserBase<PreParser> {
bool track_unresolved_variables_;
PreParserLogger log_;
- ProducedPreParsedScopeData* produced_preparsed_scope_data_;
+ PreParsedScopeDataBuilder* preparsed_scope_data_builder_;
};
PreParserExpression PreParser::SpreadCall(const PreParserExpression& function,
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index d38fdd7c42..8472e9f4fc 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -4,6 +4,9 @@
#include "src/parsing/scanner-character-streams.h"
+#include <memory>
+#include <vector>
+
#include "include/v8.h"
#include "src/counters.h"
#include "src/globals.h"
@@ -15,21 +18,50 @@
namespace v8 {
namespace internal {
+class ScopedExternalStringLock {
+ public:
+ explicit ScopedExternalStringLock(ExternalString* string) {
+ DCHECK(string);
+ if (string->IsExternalOneByteString()) {
+ resource_ = ExternalOneByteString::cast(string)->resource();
+ } else {
+ DCHECK(string->IsExternalTwoByteString());
+ resource_ = ExternalTwoByteString::cast(string)->resource();
+ }
+ DCHECK(resource_);
+ resource_->Lock();
+ }
+
+ // Copying a lock increases the locking depth.
+ ScopedExternalStringLock(const ScopedExternalStringLock& other)
+ : resource_(other.resource_) {
+ resource_->Lock();
+ }
+
+ ~ScopedExternalStringLock() { resource_->Unlock(); }
+
+ private:
+ // Not nullptr.
+ const v8::String::ExternalStringResourceBase* resource_;
+};
+
namespace {
const unibrow::uchar kUtf8Bom = 0xFEFF;
} // namespace
template <typename Char>
-struct HeapStringType;
+struct CharTraits;
template <>
-struct HeapStringType<uint8_t> {
+struct CharTraits<uint8_t> {
typedef SeqOneByteString String;
+ typedef ExternalOneByteString ExternalString;
};
template <>
-struct HeapStringType<uint16_t> {
+struct CharTraits<uint16_t> {
typedef SeqTwoByteString String;
+ typedef ExternalTwoByteString ExternalString;
};
template <typename Char>
@@ -47,16 +79,21 @@ struct Range {
template <typename Char>
class OnHeapStream {
public:
- typedef typename HeapStringType<Char>::String String;
+ typedef typename CharTraits<Char>::String String;
OnHeapStream(Handle<String> string, size_t start_offset, size_t end)
: string_(string), start_offset_(start_offset), length_(end) {}
- Range<Char> GetDataAt(size_t pos) {
+ OnHeapStream(const OnHeapStream& other) : start_offset_(0), length_(0) {
+ UNREACHABLE();
+ }
+
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
return {&string_->GetChars()[start_offset_ + Min(length_, pos)],
&string_->GetChars()[start_offset_ + length_]};
}
+ static const bool kCanBeCloned = false;
static const bool kCanAccessHeap = true;
private:
@@ -69,14 +106,42 @@ class OnHeapStream {
// ExternalTwoByteString.
template <typename Char>
class ExternalStringStream {
+ typedef typename CharTraits<Char>::ExternalString ExternalString;
+
public:
- ExternalStringStream(const Char* data, size_t end)
- : data_(data), length_(end) {}
+ ExternalStringStream(ExternalString* string, size_t start_offset,
+ size_t length)
+ : lock_(string),
+ data_(string->GetChars() + start_offset),
+ length_(length) {}
+
+ ExternalStringStream(const ExternalStringStream& other)
+ : lock_(other.lock_), data_(other.data_), length_(other.length_) {}
- Range<Char> GetDataAt(size_t pos) {
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
return {&data_[Min(length_, pos)], &data_[length_]};
}
+ static const bool kCanBeCloned = true;
+ static const bool kCanAccessHeap = false;
+
+ private:
+ ScopedExternalStringLock lock_;
+ const Char* const data_;
+ const size_t length_;
+};
+
+// A Char stream backed by a C array. Testing only.
+template <typename Char>
+class TestingStream {
+ public:
+ TestingStream(const Char* data, size_t length)
+ : data_(data), length_(length) {}
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
+ return {&data_[Min(length_, pos)], &data_[length_]};
+ }
+
+ static const bool kCanBeCloned = true;
static const bool kCanAccessHeap = false;
private:
@@ -88,12 +153,16 @@ class ExternalStringStream {
template <typename Char>
class ChunkedStream {
public:
- ChunkedStream(ScriptCompiler::ExternalSourceStream* source,
- RuntimeCallStats* stats)
- : source_(source), stats_(stats) {}
+ explicit ChunkedStream(ScriptCompiler::ExternalSourceStream* source)
+ : source_(source) {}
+
+ ChunkedStream(const ChunkedStream& other) {
+ // TODO(rmcilroy): Implement cloning for chunked streams.
+ UNREACHABLE();
+ }
- Range<Char> GetDataAt(size_t pos) {
- Chunk chunk = FindChunk(pos);
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
+ Chunk chunk = FindChunk(pos, stats);
size_t buffer_end = chunk.length;
size_t buffer_pos = Min(buffer_end, pos - chunk.position);
return {&chunk.data[buffer_pos], &chunk.data[buffer_end]};
@@ -103,6 +172,7 @@ class ChunkedStream {
for (Chunk& chunk : chunks_) delete[] chunk.data;
}
+ static const bool kCanBeCloned = false;
static const bool kCanAccessHeap = false;
private:
@@ -116,13 +186,13 @@ class ChunkedStream {
size_t end_position() const { return position + length; }
};
- Chunk FindChunk(size_t position) {
- while (V8_UNLIKELY(chunks_.empty())) FetchChunk(size_t{0});
+ Chunk FindChunk(size_t position, RuntimeCallStats* stats) {
+ while (V8_UNLIKELY(chunks_.empty())) FetchChunk(size_t{0}, stats);
// Walk forwards while the position is in front of the current chunk.
while (position >= chunks_.back().end_position() &&
chunks_.back().length > 0) {
- FetchChunk(chunks_.back().end_position());
+ FetchChunk(chunks_.back().end_position(), stats);
}
// Walk backwards.
@@ -142,11 +212,11 @@ class ChunkedStream {
length / sizeof(Char));
}
- void FetchChunk(size_t position) {
+ void FetchChunk(size_t position, RuntimeCallStats* stats) {
const uint8_t* data = nullptr;
size_t length;
{
- RuntimeCallTimerScope scope(stats_,
+ RuntimeCallTimerScope scope(stats,
RuntimeCallCounterId::kGetMoreDataCallback);
length = source_->GetMoreData(&data);
}
@@ -154,102 +224,11 @@ class ChunkedStream {
}
ScriptCompiler::ExternalSourceStream* source_;
- RuntimeCallStats* stats_;
protected:
std::vector<struct Chunk> chunks_;
};
-template <typename Char>
-class Utf8ChunkedStream : public ChunkedStream<uint16_t> {
- public:
- Utf8ChunkedStream(ScriptCompiler::ExternalSourceStream* source,
- RuntimeCallStats* stats)
- : ChunkedStream<uint16_t>(source, stats) {}
-
- STATIC_ASSERT(sizeof(Char) == sizeof(uint16_t));
- void ProcessChunk(const uint8_t* data, size_t position, size_t length) final {
- if (length == 0) {
- unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state_);
- if (t != unibrow::Utf8::kBufferEmpty) {
- DCHECK_EQ(t, unibrow::Utf8::kBadChar);
- incomplete_char_ = 0;
- uint16_t* result = new uint16_t[1];
- result[0] = unibrow::Utf8::kBadChar;
- chunks_.emplace_back(result, position, 1);
- position++;
- }
- chunks_.emplace_back(nullptr, position, 0);
- delete[] data;
- return;
- }
-
- // First count the number of complete characters that can be produced.
-
- unibrow::Utf8::State state = state_;
- uint32_t incomplete_char = incomplete_char_;
- bool seen_bom = seen_bom_;
-
- size_t i = 0;
- size_t chars = 0;
- while (i < length) {
- unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(data[i], &i, &state,
- &incomplete_char);
- if (!seen_bom && t == kUtf8Bom && position + chars == 0) {
- seen_bom = true;
- // BOM detected at beginning of the stream. Don't copy it.
- } else if (t != unibrow::Utf8::kIncomplete) {
- chars++;
- if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
- }
- }
-
- // Process the data.
-
- // If there aren't any complete characters, update the state without
- // producing a chunk.
- if (chars == 0) {
- state_ = state;
- incomplete_char_ = incomplete_char;
- seen_bom_ = seen_bom;
- delete[] data;
- return;
- }
-
- // Update the state and produce a chunk with complete characters.
- uint16_t* result = new uint16_t[chars];
- uint16_t* cursor = result;
- i = 0;
-
- while (i < length) {
- unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(data[i], &i, &state_,
- &incomplete_char_);
- if (V8_LIKELY(t < kUtf8Bom)) {
- *(cursor++) = static_cast<uc16>(t); // The by most frequent case.
- } else if (t == unibrow::Utf8::kIncomplete) {
- continue;
- } else if (!seen_bom_ && t == kUtf8Bom && position == 0 &&
- cursor == result) {
- // BOM detected at beginning of the stream. Don't copy it.
- seen_bom_ = true;
- } else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
- *(cursor++) = static_cast<uc16>(t);
- } else {
- *(cursor++) = unibrow::Utf16::LeadSurrogate(t);
- *(cursor++) = unibrow::Utf16::TrailSurrogate(t);
- }
- }
-
- chunks_.emplace_back(result, position, chars);
- delete[] data;
- }
-
- private:
- uint32_t incomplete_char_ = 0;
- unibrow::Utf8::State state_ = unibrow::Utf8::State::kAccept;
- bool seen_bom_ = false;
-};
-
// Provides a buffered utf-16 view on the bytes from the underlying ByteStream.
// Chars are buffered if either the underlying stream isn't utf-16 or the
// underlying utf-16 stream might move (is on-heap).
@@ -261,6 +240,16 @@ class BufferedCharacterStream : public Utf16CharacterStream {
buffer_pos_ = pos;
}
+ bool can_be_cloned() const final {
+ return ByteStream<uint16_t>::kCanBeCloned;
+ }
+
+ std::unique_ptr<Utf16CharacterStream> Clone() const override {
+ CHECK(can_be_cloned());
+ return std::unique_ptr<Utf16CharacterStream>(
+ new BufferedCharacterStream<ByteStream>(*this));
+ }
+
protected:
bool ReadBlock() final {
size_t position = pos();
@@ -268,7 +257,8 @@ class BufferedCharacterStream : public Utf16CharacterStream {
buffer_start_ = &buffer_[0];
buffer_cursor_ = buffer_start_;
- Range<uint8_t> range = byte_stream_.GetDataAt(position);
+ Range<uint8_t> range =
+ byte_stream_.GetDataAt(position, runtime_call_stats());
if (range.length() == 0) {
buffer_end_ = buffer_start_;
return false;
@@ -280,9 +270,14 @@ class BufferedCharacterStream : public Utf16CharacterStream {
return true;
}
- bool can_access_heap() final { return ByteStream<uint8_t>::kCanAccessHeap; }
+ bool can_access_heap() const final {
+ return ByteStream<uint8_t>::kCanAccessHeap;
+ }
private:
+ BufferedCharacterStream(const BufferedCharacterStream<ByteStream>& other)
+ : byte_stream_(other.byte_stream_) {}
+
static const size_t kBufferSize = 512;
uc16 buffer_[kBufferSize];
ByteStream<uint8_t> byte_stream_;
@@ -298,11 +293,25 @@ class UnbufferedCharacterStream : public Utf16CharacterStream {
buffer_pos_ = pos;
}
+ bool can_access_heap() const final {
+ return ByteStream<uint16_t>::kCanAccessHeap;
+ }
+
+ bool can_be_cloned() const final {
+ return ByteStream<uint16_t>::kCanBeCloned;
+ }
+
+ std::unique_ptr<Utf16CharacterStream> Clone() const override {
+ return std::unique_ptr<Utf16CharacterStream>(
+ new UnbufferedCharacterStream<ByteStream>(*this));
+ }
+
protected:
bool ReadBlock() final {
size_t position = pos();
buffer_pos_ = position;
- Range<uint16_t> range = byte_stream_.GetDataAt(position);
+ Range<uint16_t> range =
+ byte_stream_.GetDataAt(position, runtime_call_stats());
buffer_start_ = range.start;
buffer_end_ = range.end;
buffer_cursor_ = buffer_start_;
@@ -313,7 +322,8 @@ class UnbufferedCharacterStream : public Utf16CharacterStream {
return true;
}
- bool can_access_heap() final { return ByteStream<uint16_t>::kCanAccessHeap; }
+ UnbufferedCharacterStream(const UnbufferedCharacterStream<ByteStream>& other)
+ : byte_stream_(other.byte_stream_) {}
ByteStream<uint16_t> byte_stream_;
};
@@ -346,7 +356,7 @@ class RelocatingCharacterStream
}
void UpdateBufferPointers() {
- Range<uint16_t> range = byte_stream_.GetDataAt(0);
+ Range<uint16_t> range = byte_stream_.GetDataAt(0, runtime_call_stats());
if (range.start != buffer_start_) {
buffer_cursor_ = (buffer_cursor_ - buffer_start_) + range.start;
buffer_start_ = range.start;
@@ -412,16 +422,20 @@ bool BufferedUtf16CharacterStream::ReadBlock() {
class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
public:
Utf8ExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source_stream,
- RuntimeCallStats* stats)
+ ScriptCompiler::ExternalSourceStream* source_stream)
: current_({0, {0, 0, 0, unibrow::Utf8::State::kAccept}}),
- source_stream_(source_stream),
- stats_(stats) {}
+ source_stream_(source_stream) {}
~Utf8ExternalStreamingStream() final {
for (size_t i = 0; i < chunks_.size(); i++) delete[] chunks_[i].data;
}
- bool can_access_heap() final { return false; }
+ bool can_access_heap() const final { return false; }
+
+ bool can_be_cloned() const final { return false; }
+
+ std::unique_ptr<Utf16CharacterStream> Clone() const override {
+ UNREACHABLE();
+ }
protected:
size_t FillBuffer(size_t position) final;
@@ -468,7 +482,6 @@ class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
std::vector<Chunk> chunks_;
Position current_;
ScriptCompiler::ExternalSourceStream* source_stream_;
- RuntimeCallStats* stats_;
};
bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
@@ -562,7 +575,7 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
}
bool Utf8ExternalStreamingStream::FetchChunk() {
- RuntimeCallTimerScope scope(stats_,
+ RuntimeCallTimerScope scope(runtime_call_stats(),
RuntimeCallCounterId::kGetMoreDataCallback);
DCHECK_EQ(current_.chunk_no, chunks_.size());
DCHECK(chunks_.empty() || chunks_.back().length != 0);
@@ -704,14 +717,12 @@ Utf16CharacterStream* ScannerStream::For(Isolate* isolate, Handle<String> data,
}
if (data->IsExternalOneByteString()) {
return new BufferedCharacterStream<ExternalStringStream>(
- static_cast<size_t>(start_pos),
- ExternalOneByteString::cast(*data)->GetChars() + start_offset,
- static_cast<size_t>(end_pos));
+ static_cast<size_t>(start_pos), ExternalOneByteString::cast(*data),
+ start_offset, static_cast<size_t>(end_pos));
} else if (data->IsExternalTwoByteString()) {
return new UnbufferedCharacterStream<ExternalStringStream>(
- static_cast<size_t>(start_pos),
- ExternalTwoByteString::cast(*data)->GetChars() + start_offset,
- static_cast<size_t>(end_pos));
+ static_cast<size_t>(start_pos), ExternalTwoByteString::cast(*data),
+ start_offset, static_cast<size_t>(end_pos));
} else if (data->IsSeqOneByteString()) {
return new BufferedCharacterStream<OnHeapStream>(
static_cast<size_t>(start_pos), Handle<SeqOneByteString>::cast(data),
@@ -734,24 +745,23 @@ std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
const char* data, size_t length) {
return std::unique_ptr<Utf16CharacterStream>(
- new BufferedCharacterStream<ExternalStringStream>(
+ new BufferedCharacterStream<TestingStream>(
static_cast<size_t>(0), reinterpret_cast<const uint8_t*>(data),
static_cast<size_t>(length)));
}
Utf16CharacterStream* ScannerStream::For(
ScriptCompiler::ExternalSourceStream* source_stream,
- v8::ScriptCompiler::StreamedSource::Encoding encoding,
- RuntimeCallStats* stats) {
+ v8::ScriptCompiler::StreamedSource::Encoding encoding) {
switch (encoding) {
case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
return new UnbufferedCharacterStream<ChunkedStream>(
- static_cast<size_t>(0), source_stream, stats);
+ static_cast<size_t>(0), source_stream);
case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
return new BufferedCharacterStream<ChunkedStream>(static_cast<size_t>(0),
- source_stream, stats);
+ source_stream);
case v8::ScriptCompiler::StreamedSource::UTF8:
- return new Utf8ExternalStreamingStream(source_stream, stats);
+ return new Utf8ExternalStreamingStream(source_stream);
}
UNREACHABLE();
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 091ef5b8ea..4c85f5383f 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -24,8 +24,7 @@ class V8_EXPORT_PRIVATE ScannerStream {
int start_pos, int end_pos);
static Utf16CharacterStream* For(
ScriptCompiler::ExternalSourceStream* source_stream,
- ScriptCompiler::StreamedSource::Encoding encoding,
- RuntimeCallStats* stats);
+ ScriptCompiler::StreamedSource::Encoding encoding);
static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data);
static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data,
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
index 809ef655a7..9647957062 100644
--- a/deps/v8/src/parsing/scanner-inl.h
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -5,25 +5,354 @@
#ifndef V8_PARSING_SCANNER_INL_H_
#define V8_PARSING_SCANNER_INL_H_
+#include "src/char-predicates-inl.h"
#include "src/parsing/scanner.h"
#include "src/unicode-cache-inl.h"
namespace v8 {
namespace internal {
+// Make sure tokens are stored as a single byte.
+STATIC_ASSERT(sizeof(Token::Value) == 1);
+
+// Table of one-character tokens, by character (0x00..0x7F only).
+// clang-format off
+static const Token::Value one_char_tokens[] = {
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::LPAREN, // 0x28
+ Token::RPAREN, // 0x29
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::COMMA, // 0x2C
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::COLON, // 0x3A
+ Token::SEMICOLON, // 0x3B
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::CONDITIONAL, // 0x3F
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::LBRACK, // 0x5B
+ Token::ILLEGAL,
+ Token::RBRACK, // 0x5D
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::ILLEGAL,
+ Token::LBRACE, // 0x7B
+ Token::ILLEGAL,
+ Token::RBRACE, // 0x7D
+ Token::BIT_NOT, // 0x7E
+ Token::ILLEGAL
+};
+// clang-format on
+
+// ----------------------------------------------------------------------------
+// Keyword Matcher
+
+#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
+ KEYWORD_GROUP('a') \
+ KEYWORD("arguments", Token::ARGUMENTS) \
+ KEYWORD("as", Token::AS) \
+ KEYWORD("async", Token::ASYNC) \
+ KEYWORD("await", Token::AWAIT) \
+ KEYWORD("anonymous", Token::ANONYMOUS) \
+ KEYWORD_GROUP('b') \
+ KEYWORD("break", Token::BREAK) \
+ KEYWORD_GROUP('c') \
+ KEYWORD("case", Token::CASE) \
+ KEYWORD("catch", Token::CATCH) \
+ KEYWORD("class", Token::CLASS) \
+ KEYWORD("const", Token::CONST) \
+ KEYWORD("constructor", Token::CONSTRUCTOR) \
+ KEYWORD("continue", Token::CONTINUE) \
+ KEYWORD_GROUP('d') \
+ KEYWORD("debugger", Token::DEBUGGER) \
+ KEYWORD("default", Token::DEFAULT) \
+ KEYWORD("delete", Token::DELETE) \
+ KEYWORD("do", Token::DO) \
+ KEYWORD_GROUP('e') \
+ KEYWORD("else", Token::ELSE) \
+ KEYWORD("enum", Token::ENUM) \
+ KEYWORD("eval", Token::EVAL) \
+ KEYWORD("export", Token::EXPORT) \
+ KEYWORD("extends", Token::EXTENDS) \
+ KEYWORD_GROUP('f') \
+ KEYWORD("false", Token::FALSE_LITERAL) \
+ KEYWORD("finally", Token::FINALLY) \
+ KEYWORD("for", Token::FOR) \
+ KEYWORD("from", Token::FROM) \
+ KEYWORD("function", Token::FUNCTION) \
+ KEYWORD_GROUP('g') \
+ KEYWORD("get", Token::GET) \
+ KEYWORD_GROUP('i') \
+ KEYWORD("if", Token::IF) \
+ KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("import", Token::IMPORT) \
+ KEYWORD("in", Token::IN) \
+ KEYWORD("instanceof", Token::INSTANCEOF) \
+ KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('l') \
+ KEYWORD("let", Token::LET) \
+ KEYWORD_GROUP('m') \
+ KEYWORD("meta", Token::META) \
+ KEYWORD_GROUP('n') \
+ KEYWORD("name", Token::NAME) \
+ KEYWORD("new", Token::NEW) \
+ KEYWORD("null", Token::NULL_LITERAL) \
+ KEYWORD_GROUP('o') \
+ KEYWORD("of", Token::OF) \
+ KEYWORD_GROUP('p') \
+ KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("prototype", Token::PROTOTYPE) \
+ KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('r') \
+ KEYWORD("return", Token::RETURN) \
+ KEYWORD_GROUP('s') \
+ KEYWORD("set", Token::SET) \
+ KEYWORD("static", Token::STATIC) \
+ KEYWORD("super", Token::SUPER) \
+ KEYWORD("switch", Token::SWITCH) \
+ KEYWORD_GROUP('t') \
+ KEYWORD("target", Token::TARGET) \
+ KEYWORD("this", Token::THIS) \
+ KEYWORD("throw", Token::THROW) \
+ KEYWORD("true", Token::TRUE_LITERAL) \
+ KEYWORD("try", Token::TRY) \
+ KEYWORD("typeof", Token::TYPEOF) \
+ KEYWORD_GROUP('u') \
+ KEYWORD("undefined", Token::UNDEFINED) \
+ KEYWORD_GROUP('v') \
+ KEYWORD("var", Token::VAR) \
+ KEYWORD("void", Token::VOID) \
+ KEYWORD_GROUP('w') \
+ KEYWORD("while", Token::WHILE) \
+ KEYWORD("with", Token::WITH) \
+ KEYWORD_GROUP('y') \
+ KEYWORD("yield", Token::YIELD) \
+ KEYWORD_GROUP('_') \
+ KEYWORD("__proto__", Token::PROTO_UNDERSCORED) \
+ KEYWORD_GROUP('#') \
+ KEYWORD("#constructor", Token::PRIVATE_CONSTRUCTOR)
+
+V8_INLINE Token::Value KeywordOrIdentifierToken(const uint8_t* input,
+ int input_length) {
+ DCHECK_GE(input_length, 1);
+ const int kMinLength = 2;
+ const int kMaxLength = 12;
+ if (input_length < kMinLength || input_length > kMaxLength) {
+ return Token::IDENTIFIER;
+ }
+ switch (input[0]) {
+ default:
+#define KEYWORD_GROUP_CASE(ch) \
+ break; \
+ case ch:
+#define KEYWORD(keyword, token) \
+ { \
+ /* 'keyword' is a char array, so sizeof(keyword) is */ \
+ /* strlen(keyword) plus 1 for the NUL char. */ \
+ const int keyword_length = sizeof(keyword) - 1; \
+ STATIC_ASSERT(keyword_length >= kMinLength); \
+ STATIC_ASSERT(keyword_length <= kMaxLength); \
+ DCHECK_EQ(input[0], keyword[0]); \
+ DCHECK(token == Token::FUTURE_STRICT_RESERVED_WORD || \
+ 0 == strncmp(keyword, Token::String(token), sizeof(keyword))); \
+ if (input_length == keyword_length && input[1] == keyword[1] && \
+ (keyword_length <= 2 || input[2] == keyword[2]) && \
+ (keyword_length <= 3 || input[3] == keyword[3]) && \
+ (keyword_length <= 4 || input[4] == keyword[4]) && \
+ (keyword_length <= 5 || input[5] == keyword[5]) && \
+ (keyword_length <= 6 || input[6] == keyword[6]) && \
+ (keyword_length <= 7 || input[7] == keyword[7]) && \
+ (keyword_length <= 8 || input[8] == keyword[8]) && \
+ (keyword_length <= 9 || input[9] == keyword[9]) && \
+ (keyword_length <= 10 || input[10] == keyword[10])) { \
+ return token; \
+ } \
+ }
+ KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
+ }
+ return Token::IDENTIFIER;
+#undef KEYWORDS
+#undef KEYWORD
+#undef KEYWORD_GROUP_CASE
+}
+
+V8_INLINE Token::Value Scanner::ScanIdentifierOrKeyword() {
+ LiteralScope literal(this);
+ return ScanIdentifierOrKeywordInner(&literal);
+}
+
+V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner(
+ LiteralScope* literal) {
+ DCHECK(unicode_cache_->IsIdentifierStart(c0_));
+ bool escaped = false;
+ if (IsInRange(c0_, 'a', 'z') || c0_ == '_') {
+ do {
+ AddLiteralChar(static_cast<char>(c0_));
+ Advance();
+ } while (IsInRange(c0_, 'a', 'z') || c0_ == '_');
+
+ if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
+ // Identifier starting with lowercase or _.
+ do {
+ AddLiteralChar(static_cast<char>(c0_));
+ Advance();
+ } while (IsAsciiIdentifier(c0_));
+
+ if (c0_ <= kMaxAscii && c0_ != '\\') {
+ literal->Complete();
+ return Token::IDENTIFIER;
+ }
+ } else if (c0_ <= kMaxAscii && c0_ != '\\') {
+ // Only a-z+ or _: could be a keyword or identifier.
+ Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
+ Token::Value token =
+ KeywordOrIdentifierToken(chars.start(), chars.length());
+ if (token == Token::IDENTIFIER ||
+ token == Token::FUTURE_STRICT_RESERVED_WORD ||
+ Token::IsContextualKeyword(token))
+ literal->Complete();
+ return token;
+ }
+ } else if (IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
+ do {
+ AddLiteralChar(static_cast<char>(c0_));
+ Advance();
+ } while (IsAsciiIdentifier(c0_));
+
+ if (c0_ <= kMaxAscii && c0_ != '\\') {
+ literal->Complete();
+ return Token::IDENTIFIER;
+ }
+ } else if (c0_ == '\\') {
+ escaped = true;
+ uc32 c = ScanIdentifierUnicodeEscape();
+ DCHECK(!unicode_cache_->IsIdentifierStart(-1));
+ if (c == '\\' || !unicode_cache_->IsIdentifierStart(c)) {
+ return Token::ILLEGAL;
+ }
+ AddLiteralChar(c);
+ }
+
+ return ScanIdentifierOrKeywordInnerSlow(literal, escaped);
+}
+
V8_INLINE Token::Value Scanner::SkipWhiteSpace() {
int start_position = source_pos();
- while (true) {
- // We won't skip behind the end of input.
- DCHECK(!unicode_cache_->IsWhiteSpace(kEndOfInput));
+ // We won't skip behind the end of input.
+ DCHECK(!unicode_cache_->IsWhiteSpaceOrLineTerminator(kEndOfInput));
- // Advance as long as character is a WhiteSpace or LineTerminator.
- // Remember if the latter is the case.
- if (unibrow::IsLineTerminator(c0_)) {
+ // Advance as long as character is a WhiteSpace or LineTerminator.
+ while (unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_)) {
+ if (!next().after_line_terminator && unibrow::IsLineTerminator(c0_)) {
next().after_line_terminator = true;
- } else if (!unicode_cache_->IsWhiteSpace(c0_)) {
- break;
}
Advance();
}
@@ -37,6 +366,191 @@ V8_INLINE Token::Value Scanner::SkipWhiteSpace() {
return Token::WHITESPACE;
}
+V8_INLINE Token::Value Scanner::ScanSingleToken() {
+ Token::Value token;
+ do {
+ next().location.beg_pos = source_pos();
+
+ if (static_cast<unsigned>(c0_) <= 0x7F) {
+ Token::Value token = one_char_tokens[c0_];
+ if (token != Token::ILLEGAL) {
+ Advance();
+ return token;
+ }
+ }
+
+ switch (c0_) {
+ case '"':
+ case '\'':
+ return ScanString();
+
+ case '<':
+ // < <= << <<= <!--
+ Advance();
+ if (c0_ == '=') return Select(Token::LTE);
+ if (c0_ == '<') return Select('=', Token::ASSIGN_SHL, Token::SHL);
+ if (c0_ == '!') {
+ token = ScanHtmlComment();
+ continue;
+ }
+ return Token::LT;
+
+ case '>':
+ // > >= >> >>= >>> >>>=
+ Advance();
+ if (c0_ == '=') return Select(Token::GTE);
+ if (c0_ == '>') {
+ // >> >>= >>> >>>=
+ Advance();
+ if (c0_ == '=') return Select(Token::ASSIGN_SAR);
+ if (c0_ == '>') return Select('=', Token::ASSIGN_SHR, Token::SHR);
+ return Token::SAR;
+ }
+ return Token::GT;
+
+ case '=':
+ // = == === =>
+ Advance();
+ if (c0_ == '=') return Select('=', Token::EQ_STRICT, Token::EQ);
+ if (c0_ == '>') return Select(Token::ARROW);
+ return Token::ASSIGN;
+
+ case '!':
+ // ! != !==
+ Advance();
+ if (c0_ == '=') return Select('=', Token::NE_STRICT, Token::NE);
+ return Token::NOT;
+
+ case '+':
+ // + ++ +=
+ Advance();
+ if (c0_ == '+') return Select(Token::INC);
+ if (c0_ == '=') return Select(Token::ASSIGN_ADD);
+ return Token::ADD;
+
+ case '-':
+ // - -- --> -=
+ Advance();
+ if (c0_ == '-') {
+ Advance();
+ if (c0_ == '>' && next().after_line_terminator) {
+ // For compatibility with SpiderMonkey, we skip lines that
+ // start with an HTML comment end '-->'.
+ token = SkipSingleHTMLComment();
+ continue;
+ }
+ return Token::DEC;
+ }
+ if (c0_ == '=') return Select(Token::ASSIGN_SUB);
+ return Token::SUB;
+
+ case '*':
+ // * *=
+ Advance();
+ if (c0_ == '*') return Select('=', Token::ASSIGN_EXP, Token::EXP);
+ if (c0_ == '=') return Select(Token::ASSIGN_MUL);
+ return Token::MUL;
+
+ case '%':
+ // % %=
+ return Select('=', Token::ASSIGN_MOD, Token::MOD);
+
+ case '/':
+ // / // /* /=
+ Advance();
+ if (c0_ == '/') {
+ uc32 c = Peek();
+ if (c == '#' || c == '@') {
+ Advance();
+ Advance();
+ token = SkipSourceURLComment();
+ continue;
+ }
+ token = SkipSingleLineComment();
+ continue;
+ }
+ if (c0_ == '*') {
+ token = SkipMultiLineComment();
+ continue;
+ }
+ if (c0_ == '=') return Select(Token::ASSIGN_DIV);
+ return Token::DIV;
+
+ case '&':
+ // & && &=
+ Advance();
+ if (c0_ == '&') return Select(Token::AND);
+ if (c0_ == '=') return Select(Token::ASSIGN_BIT_AND);
+ return Token::BIT_AND;
+
+ case '|':
+ // | || |=
+ Advance();
+ if (c0_ == '|') return Select(Token::OR);
+ if (c0_ == '=') return Select(Token::ASSIGN_BIT_OR);
+ return Token::BIT_OR;
+
+ case '^':
+ // ^ ^=
+ return Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
+
+ case '.':
+ // . Number
+ Advance();
+ if (IsDecimalDigit(c0_)) return ScanNumber(true);
+ if (c0_ == '.') {
+ if (Peek() == '.') {
+ Advance();
+ Advance();
+ return Token::ELLIPSIS;
+ }
+ }
+ return Token::PERIOD;
+
+ case '`':
+ Advance();
+ return ScanTemplateSpan();
+
+ case '#':
+ return ScanPrivateName();
+
+ default:
+ if (unicode_cache_->IsIdentifierStart(c0_) ||
+ (CombineSurrogatePair() &&
+ unicode_cache_->IsIdentifierStart(c0_))) {
+ Token::Value token = ScanIdentifierOrKeyword();
+ if (!Token::IsContextualKeyword(token)) return token;
+
+ next().contextual_token = token;
+ return Token::IDENTIFIER;
+ }
+ if (IsDecimalDigit(c0_)) return ScanNumber(false);
+ if (c0_ == kEndOfInput) return Token::EOS;
+ token = SkipWhiteSpace();
+ continue;
+ }
+ // Continue scanning for tokens as long as we're just skipping whitespace.
+ } while (token == Token::WHITESPACE);
+
+ return token;
+}
+
+void Scanner::Scan() {
+ next().literal_chars.Drop();
+ next().raw_literal_chars.Drop();
+ next().contextual_token = Token::UNINITIALIZED;
+ next().invalid_template_escape_message = MessageTemplate::kNone;
+
+ next().token = ScanSingleToken();
+ next().location.end_pos = source_pos();
+
+#ifdef DEBUG
+ SanityCheckTokenDesc(current());
+ SanityCheckTokenDesc(next());
+ SanityCheckTokenDesc(next_next());
+#endif
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 781832c2e6..525b1bc681 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -11,7 +11,6 @@
#include <cmath>
#include "src/ast/ast-value-factory.h"
-#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
#include "src/objects/bigint.h"
#include "src/parsing/duplicate-finder.h" // For Scanner::FindSymbol
@@ -134,7 +133,6 @@ const size_t Scanner::BookmarkScope::kBookmarkWasApplied =
void Scanner::BookmarkScope::Set() {
DCHECK_EQ(bookmark_, kNoBookmark);
- DCHECK_EQ(scanner_->next_next().token, Token::UNINITIALIZED);
// The first token is a bit special, since current_ will still be
// uninitialized. In this case, store kBookmarkAtFirstPos and special-case it
@@ -160,11 +158,11 @@ void Scanner::BookmarkScope::Apply() {
bookmark_ = kBookmarkWasApplied;
}
-bool Scanner::BookmarkScope::HasBeenSet() {
+bool Scanner::BookmarkScope::HasBeenSet() const {
return bookmark_ != kNoBookmark && bookmark_ != kBookmarkWasApplied;
}
-bool Scanner::BookmarkScope::HasBeenApplied() {
+bool Scanner::BookmarkScope::HasBeenApplied() const {
return bookmark_ == kBookmarkWasApplied;
}
@@ -175,12 +173,11 @@ Scanner::Scanner(UnicodeCache* unicode_cache, Utf16CharacterStream* source,
bool is_module)
: unicode_cache_(unicode_cache),
source_(source),
- octal_pos_(Location::invalid()),
- octal_message_(MessageTemplate::kNone),
found_html_comment_(false),
- allow_harmony_bigint_(false),
allow_harmony_numeric_separator_(false),
- is_module_(is_module) {
+ is_module_(is_module),
+ octal_pos_(Location::invalid()),
+ octal_message_(MessageTemplate::kNone) {
DCHECK_NOT_NULL(source);
}
@@ -234,146 +231,7 @@ uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value, int beg_pos) {
return x;
}
-
-// Ensure that tokens can be stored in a byte.
-STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
-
-// Table of one-character tokens, by character (0x00..0x7F only).
-// clang-format off
-static const byte one_char_tokens[] = {
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LPAREN, // 0x28
- Token::RPAREN, // 0x29
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::COMMA, // 0x2C
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::COLON, // 0x3A
- Token::SEMICOLON, // 0x3B
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::CONDITIONAL, // 0x3F
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LBRACK, // 0x5B
- Token::ILLEGAL,
- Token::RBRACK, // 0x5D
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LBRACE, // 0x7B
- Token::ILLEGAL,
- Token::RBRACE, // 0x7D
- Token::BIT_NOT, // 0x7E
- Token::ILLEGAL
-};
-// clang-format on
-
Token::Value Scanner::Next() {
- if (next().token == Token::EOS) next().location = current().location;
// Rotate through tokens.
TokenDesc* previous = current_;
current_ = next_;
@@ -395,7 +253,6 @@ Token::Value Scanner::Next() {
return current().token;
}
-
Token::Value Scanner::PeekAhead() {
DCHECK(next().token != Token::DIV);
DCHECK(next().token != Token::ASSIGN_DIV);
@@ -534,250 +391,6 @@ Token::Value Scanner::ScanHtmlComment() {
return SkipSingleHTMLComment();
}
-void Scanner::Scan() {
- next().literal_chars.Drop();
- next().raw_literal_chars.Drop();
- next().invalid_template_escape_message = MessageTemplate::kNone;
-
- Token::Value token;
- do {
- if (static_cast<unsigned>(c0_) <= 0x7F) {
- Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
- if (token != Token::ILLEGAL) {
- int pos = source_pos();
- next().token = token;
- next().contextual_token = Token::UNINITIALIZED;
- next().location.beg_pos = pos;
- next().location.end_pos = pos + 1;
- Advance();
- return;
- }
- }
-
- // Remember the position of the next token
- next().location.beg_pos = source_pos();
-
- switch (c0_) {
- case '"':
- case '\'':
- token = ScanString();
- break;
-
- case '<':
- // < <= << <<= <!--
- Advance();
- if (c0_ == '=') {
- token = Select(Token::LTE);
- } else if (c0_ == '<') {
- token = Select('=', Token::ASSIGN_SHL, Token::SHL);
- } else if (c0_ == '!') {
- token = ScanHtmlComment();
- } else {
- token = Token::LT;
- }
- break;
-
- case '>':
- // > >= >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') {
- token = Select(Token::GTE);
- } else if (c0_ == '>') {
- // >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') {
- token = Select(Token::ASSIGN_SAR);
- } else if (c0_ == '>') {
- token = Select('=', Token::ASSIGN_SHR, Token::SHR);
- } else {
- token = Token::SAR;
- }
- } else {
- token = Token::GT;
- }
- break;
-
- case '=':
- // = == === =>
- Advance();
- if (c0_ == '=') {
- token = Select('=', Token::EQ_STRICT, Token::EQ);
- } else if (c0_ == '>') {
- token = Select(Token::ARROW);
- } else {
- token = Token::ASSIGN;
- }
- break;
-
- case '!':
- // ! != !==
- Advance();
- if (c0_ == '=') {
- token = Select('=', Token::NE_STRICT, Token::NE);
- } else {
- token = Token::NOT;
- }
- break;
-
- case '+':
- // + ++ +=
- Advance();
- if (c0_ == '+') {
- token = Select(Token::INC);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_ADD);
- } else {
- token = Token::ADD;
- }
- break;
-
- case '-':
- // - -- --> -=
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '>' && HasLineTerminatorBeforeNext()) {
- // For compatibility with SpiderMonkey, we skip lines that
- // start with an HTML comment end '-->'.
- token = SkipSingleHTMLComment();
- } else {
- token = Token::DEC;
- }
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_SUB);
- } else {
- token = Token::SUB;
- }
- break;
-
- case '*':
- // * *=
- Advance();
- if (c0_ == '*') {
- token = Select('=', Token::ASSIGN_EXP, Token::EXP);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_MUL);
- } else {
- token = Token::MUL;
- }
- break;
-
- case '%':
- // % %=
- token = Select('=', Token::ASSIGN_MOD, Token::MOD);
- break;
-
- case '/':
- // / // /* /=
- Advance();
- if (c0_ == '/') {
- uc32 c = Peek();
- if (c == '#' || c == '@') {
- Advance();
- Advance();
- token = SkipSourceURLComment();
- } else {
- token = SkipSingleLineComment();
- }
- } else if (c0_ == '*') {
- token = SkipMultiLineComment();
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_DIV);
- } else {
- token = Token::DIV;
- }
- break;
-
- case '&':
- // & && &=
- Advance();
- if (c0_ == '&') {
- token = Select(Token::AND);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_BIT_AND);
- } else {
- token = Token::BIT_AND;
- }
- break;
-
- case '|':
- // | || |=
- Advance();
- if (c0_ == '|') {
- token = Select(Token::OR);
- } else if (c0_ == '=') {
- token = Select(Token::ASSIGN_BIT_OR);
- } else {
- token = Token::BIT_OR;
- }
- break;
-
- case '^':
- // ^ ^=
- token = Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
- break;
-
- case '.':
- // . Number
- Advance();
- if (IsDecimalDigit(c0_)) {
- token = ScanNumber(true);
- } else {
- token = Token::PERIOD;
- if (c0_ == '.') {
- if (Peek() == '.') {
- Advance();
- Advance();
- token = Token::ELLIPSIS;
- }
- }
- }
- break;
-
- case '`':
- token = ScanTemplateStart();
- break;
-
- case '#':
- token = ScanPrivateName();
- break;
-
- default:
- if (unicode_cache_->IsIdentifierStart(c0_) ||
- (CombineSurrogatePair() &&
- unicode_cache_->IsIdentifierStart(c0_))) {
- token = ScanIdentifierOrKeyword();
- } else if (IsDecimalDigit(c0_)) {
- token = ScanNumber(false);
- } else if (c0_ == kEndOfInput) {
- token = Token::EOS;
- } else {
- token = SkipWhiteSpace();
- if (token == Token::ILLEGAL) Advance();
- }
- break;
- }
-
- // Continue scanning for tokens as long as we're just skipping
- // whitespace.
- } while (token == Token::WHITESPACE);
-
- next().location.end_pos = source_pos();
- if (Token::IsContextualKeyword(token)) {
- next().token = Token::IDENTIFIER;
- next().contextual_token = token;
- } else {
- next().token = token;
- next().contextual_token = Token::UNINITIALIZED;
- }
-
-#ifdef DEBUG
- SanityCheckTokenDesc(current());
- SanityCheckTokenDesc(next());
- SanityCheckTokenDesc(next_next());
-#endif
-}
-
#ifdef DEBUG
void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
// Most tokens should not have literal_chars or even raw_literal chars.
@@ -916,7 +529,7 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
// can be reported later (in strict mode).
// We don't report the error immediately, because the octal escape can
// occur before the "use strict" directive.
- if (c != '0' || i > 0 || c0_ == '8' || c0_ == '9') {
+ if (c != '0' || i > 0 || IsNonOctalDecimalDigit(c0_)) {
octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
octal_message_ = capture_raw ? MessageTemplate::kTemplateOctalLiteral
: MessageTemplate::kStrictOctalEscape;
@@ -993,7 +606,7 @@ Token::Value Scanner::ScanTemplateSpan() {
Token::Value result = Token::TEMPLATE_SPAN;
LiteralScope literal(this);
- StartRawLiteral();
+ next().raw_literal_chars.Start();
const bool capture_raw = true;
while (true) {
uc32 c = c0_;
@@ -1053,14 +666,6 @@ Token::Value Scanner::ScanTemplateSpan() {
return result;
}
-Token::Value Scanner::ScanTemplateStart() {
- DCHECK_EQ(next_next().token, Token::UNINITIALIZED);
- DCHECK_EQ(c0_, '`');
- next().location.beg_pos = source_pos();
- Advance(); // Consume `
- return ScanTemplateSpan();
-}
-
Handle<String> Scanner::SourceUrl(Isolate* isolate) const {
Handle<String> tmp;
if (source_url_.length() > 0) {
@@ -1200,11 +805,11 @@ bool Scanner::ScanImplicitOctalDigits(int start_pos,
while (true) {
// (possible) octal number
- if (c0_ == '8' || c0_ == '9') {
+ if (IsNonOctalDecimalDigit(c0_)) {
*kind = DECIMAL_WITH_LEADING_ZERO;
return true;
}
- if (c0_ < '0' || '7' < c0_) {
+ if (!IsOctalDigit(c0_)) {
// Octal literal finished.
octal_pos_ = Location(start_pos, source_pos());
octal_message_ = MessageTemplate::kStrictOctalLiteral;
@@ -1272,7 +877,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
AddLiteralCharAdvance();
kind = BINARY;
if (!ScanBinaryDigits()) return Token::ILLEGAL;
- } else if ('0' <= c0_ && c0_ <= '7') {
+ } else if (IsOctalDigit(c0_)) {
kind = IMPLICIT_OCTAL;
if (!ScanImplicitOctalDigits(start_pos, &kind)) {
return Token::ILLEGAL;
@@ -1280,7 +885,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (kind == DECIMAL_WITH_LEADING_ZERO) {
at_start = false;
}
- } else if (c0_ == '8' || c0_ == '9') {
+ } else if (IsNonOctalDecimalDigit(c0_)) {
kind = DECIMAL_WITH_LEADING_ZERO;
} else if (allow_harmony_numeric_separator() && c0_ == '_') {
ReportScannerError(Location(source_pos(), source_pos() + 1),
@@ -1326,7 +931,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
}
bool is_bigint = false;
- if (allow_harmony_bigint() && c0_ == 'n' && !seen_period &&
+ if (c0_ == 'n' && !seen_period &&
(kind == DECIMAL || kind == HEX || kind == OCTAL || kind == BINARY)) {
// Check that the literal is within our limits for BigInt length.
// For simplicity, use 4 bits per character to calculate the maximum
@@ -1399,197 +1004,8 @@ uc32 Scanner::ScanUnicodeEscape() {
return ScanHexNumber<capture_raw, unicode>(4);
}
-
-// ----------------------------------------------------------------------------
-// Keyword Matcher
-
-#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
- KEYWORD_GROUP('a') \
- KEYWORD("arguments", Token::ARGUMENTS) \
- KEYWORD("as", Token::AS) \
- KEYWORD("async", Token::ASYNC) \
- KEYWORD("await", Token::AWAIT) \
- KEYWORD("anonymous", Token::ANONYMOUS) \
- KEYWORD_GROUP('b') \
- KEYWORD("break", Token::BREAK) \
- KEYWORD_GROUP('c') \
- KEYWORD("case", Token::CASE) \
- KEYWORD("catch", Token::CATCH) \
- KEYWORD("class", Token::CLASS) \
- KEYWORD("const", Token::CONST) \
- KEYWORD("constructor", Token::CONSTRUCTOR) \
- KEYWORD("continue", Token::CONTINUE) \
- KEYWORD_GROUP('d') \
- KEYWORD("debugger", Token::DEBUGGER) \
- KEYWORD("default", Token::DEFAULT) \
- KEYWORD("delete", Token::DELETE) \
- KEYWORD("do", Token::DO) \
- KEYWORD_GROUP('e') \
- KEYWORD("else", Token::ELSE) \
- KEYWORD("enum", Token::ENUM) \
- KEYWORD("eval", Token::EVAL) \
- KEYWORD("export", Token::EXPORT) \
- KEYWORD("extends", Token::EXTENDS) \
- KEYWORD_GROUP('f') \
- KEYWORD("false", Token::FALSE_LITERAL) \
- KEYWORD("finally", Token::FINALLY) \
- KEYWORD("for", Token::FOR) \
- KEYWORD("from", Token::FROM) \
- KEYWORD("function", Token::FUNCTION) \
- KEYWORD_GROUP('g') \
- KEYWORD("get", Token::GET) \
- KEYWORD_GROUP('i') \
- KEYWORD("if", Token::IF) \
- KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("import", Token::IMPORT) \
- KEYWORD("in", Token::IN) \
- KEYWORD("instanceof", Token::INSTANCEOF) \
- KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('l') \
- KEYWORD("let", Token::LET) \
- KEYWORD_GROUP('m') \
- KEYWORD("meta", Token::META) \
- KEYWORD_GROUP('n') \
- KEYWORD("name", Token::NAME) \
- KEYWORD("new", Token::NEW) \
- KEYWORD("null", Token::NULL_LITERAL) \
- KEYWORD_GROUP('o') \
- KEYWORD("of", Token::OF) \
- KEYWORD_GROUP('p') \
- KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("prototype", Token::PROTOTYPE) \
- KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('r') \
- KEYWORD("return", Token::RETURN) \
- KEYWORD_GROUP('s') \
- KEYWORD("set", Token::SET) \
- KEYWORD("static", Token::STATIC) \
- KEYWORD("super", Token::SUPER) \
- KEYWORD("switch", Token::SWITCH) \
- KEYWORD_GROUP('t') \
- KEYWORD("target", Token::TARGET) \
- KEYWORD("this", Token::THIS) \
- KEYWORD("throw", Token::THROW) \
- KEYWORD("true", Token::TRUE_LITERAL) \
- KEYWORD("try", Token::TRY) \
- KEYWORD("typeof", Token::TYPEOF) \
- KEYWORD_GROUP('u') \
- KEYWORD("undefined", Token::UNDEFINED) \
- KEYWORD_GROUP('v') \
- KEYWORD("var", Token::VAR) \
- KEYWORD("void", Token::VOID) \
- KEYWORD_GROUP('w') \
- KEYWORD("while", Token::WHILE) \
- KEYWORD("with", Token::WITH) \
- KEYWORD_GROUP('y') \
- KEYWORD("yield", Token::YIELD) \
- KEYWORD_GROUP('_') \
- KEYWORD("__proto__", Token::PROTO_UNDERSCORED) \
- KEYWORD_GROUP('#') \
- KEYWORD("#constructor", Token::PRIVATE_CONSTRUCTOR)
-
-static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
- int input_length) {
- DCHECK_GE(input_length, 1);
- const int kMinLength = 2;
- const int kMaxLength = 12;
- if (input_length < kMinLength || input_length > kMaxLength) {
- return Token::IDENTIFIER;
- }
- switch (input[0]) {
- default:
-#define KEYWORD_GROUP_CASE(ch) \
- break; \
- case ch:
-#define KEYWORD(keyword, token) \
- { \
- /* 'keyword' is a char array, so sizeof(keyword) is */ \
- /* strlen(keyword) plus 1 for the NUL char. */ \
- const int keyword_length = sizeof(keyword) - 1; \
- STATIC_ASSERT(keyword_length >= kMinLength); \
- STATIC_ASSERT(keyword_length <= kMaxLength); \
- DCHECK_EQ(input[0], keyword[0]); \
- DCHECK(token == Token::FUTURE_STRICT_RESERVED_WORD || \
- 0 == strncmp(keyword, Token::String(token), sizeof(keyword))); \
- if (input_length == keyword_length && input[1] == keyword[1] && \
- (keyword_length <= 2 || input[2] == keyword[2]) && \
- (keyword_length <= 3 || input[3] == keyword[3]) && \
- (keyword_length <= 4 || input[4] == keyword[4]) && \
- (keyword_length <= 5 || input[5] == keyword[5]) && \
- (keyword_length <= 6 || input[6] == keyword[6]) && \
- (keyword_length <= 7 || input[7] == keyword[7]) && \
- (keyword_length <= 8 || input[8] == keyword[8]) && \
- (keyword_length <= 9 || input[9] == keyword[9]) && \
- (keyword_length <= 10 || input[10] == keyword[10])) { \
- return token; \
- } \
- }
- KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
- }
- return Token::IDENTIFIER;
-#undef KEYWORDS
-#undef KEYWORD
-#undef KEYWORD_GROUP_CASE
-}
-
-Token::Value Scanner::ScanIdentifierOrKeyword() {
- LiteralScope literal(this);
- return ScanIdentifierOrKeywordInner(&literal);
-}
-
-Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
- DCHECK(unicode_cache_->IsIdentifierStart(c0_));
- bool escaped = false;
- if (IsInRange(c0_, 'a', 'z') || c0_ == '_') {
- do {
- AddLiteralChar(static_cast<char>(c0_));
- Advance();
- } while (IsInRange(c0_, 'a', 'z') || c0_ == '_');
-
- if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
- // Identifier starting with lowercase or _.
- do {
- AddLiteralChar(static_cast<char>(c0_));
- Advance();
- } while (IsAsciiIdentifier(c0_));
-
- if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal->Complete();
- return Token::IDENTIFIER;
- }
- } else if (c0_ <= kMaxAscii && c0_ != '\\') {
- // Only a-z+ or _: could be a keyword or identifier.
- Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
- Token::Value token =
- KeywordOrIdentifierToken(chars.start(), chars.length());
- if (token == Token::IDENTIFIER ||
- token == Token::FUTURE_STRICT_RESERVED_WORD ||
- Token::IsContextualKeyword(token))
- literal->Complete();
- return token;
- }
- } else if (IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
- do {
- AddLiteralChar(static_cast<char>(c0_));
- Advance();
- } while (IsAsciiIdentifier(c0_));
-
- if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal->Complete();
- return Token::IDENTIFIER;
- }
- } else if (c0_ == '\\') {
- escaped = true;
- uc32 c = ScanIdentifierUnicodeEscape();
- DCHECK(!unicode_cache_->IsIdentifierStart(-1));
- if (c == '\\' || !unicode_cache_->IsIdentifierStart(c)) {
- return Token::ILLEGAL;
- }
- AddLiteralChar(c);
- }
-
+Token::Value Scanner::ScanIdentifierOrKeywordInnerSlow(LiteralScope* literal,
+ bool escaped) {
while (true) {
if (c0_ == '\\') {
escaped = true;
@@ -1645,18 +1061,12 @@ bool Scanner::ScanRegExpPattern() {
// Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
bool in_character_class = false;
- bool seen_equal = (next().token == Token::ASSIGN_DIV);
-
- // Previous token is either '/' or '/=', in the second case, the
- // pattern starts at =.
- next().location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
- next().location.end_pos = source_pos() - (seen_equal ? 1 : 0);
// Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
// the scanner should pass uninterpreted bodies to the RegExp
// constructor.
LiteralScope literal(this);
- if (seen_equal) {
+ if (next().token == Token::ASSIGN_DIV) {
AddLiteralChar('=');
}
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index e592debd8e..83002b53c8 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -21,13 +21,13 @@
namespace v8 {
namespace internal {
-
class AstRawString;
class AstValueFactory;
class DuplicateFinder;
class ExternalOneByteString;
class ExternalTwoByteString;
class ParserRecorder;
+class RuntimeCallStats;
class UnicodeCache;
// ---------------------------------------------------------------------
@@ -38,7 +38,7 @@ class Utf16CharacterStream {
public:
static const uc32 kEndOfInput = -1;
- virtual ~Utf16CharacterStream() {}
+ virtual ~Utf16CharacterStream() = default;
inline uc32 Peek() {
if (V8_LIKELY(buffer_cursor_ < buffer_end_)) {
@@ -109,8 +109,21 @@ class Utf16CharacterStream {
}
}
+ // Returns true if the stream can be cloned with Clone.
+ // TODO(rmcilroy): Remove this once ChunkedStreams can be cloned.
+ virtual bool can_be_cloned() const = 0;
+
+ // Clones the character stream to enable another independent scanner to access
+ // the same underlying stream.
+ virtual std::unique_ptr<Utf16CharacterStream> Clone() const = 0;
+
// Returns true if the stream could access the V8 heap after construction.
- virtual bool can_access_heap() = 0;
+ virtual bool can_access_heap() const = 0;
+
+ RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
+ void set_runtime_call_stats(RuntimeCallStats* runtime_call_stats) {
+ runtime_call_stats_ = runtime_call_stats;
+ }
protected:
Utf16CharacterStream(const uint16_t* buffer_start,
@@ -172,6 +185,7 @@ class Utf16CharacterStream {
const uint16_t* buffer_cursor_;
const uint16_t* buffer_end_;
size_t buffer_pos_;
+ RuntimeCallStats* runtime_call_stats_;
};
// ----------------------------------------------------------------------------
@@ -186,12 +200,12 @@ class Scanner {
: scanner_(scanner), bookmark_(kNoBookmark) {
DCHECK_NOT_NULL(scanner_);
}
- ~BookmarkScope() {}
+ ~BookmarkScope() = default;
void Set();
void Apply();
- bool HasBeenSet();
- bool HasBeenApplied();
+ bool HasBeenSet() const;
+ bool HasBeenApplied() const;
private:
static const size_t kNoBookmark;
@@ -233,19 +247,21 @@ class Scanner {
// Returns the token following peek()
Token::Value PeekAhead();
// Returns the current token again.
- Token::Value current_token() { return current().token; }
+ Token::Value current_token() const { return current().token; }
- Token::Value current_contextual_token() { return current().contextual_token; }
- Token::Value next_contextual_token() { return next().contextual_token; }
+ Token::Value current_contextual_token() const {
+ return current().contextual_token;
+ }
+ Token::Value next_contextual_token() const { return next().contextual_token; }
// Returns the location information for the current token
// (the token last returned by Next()).
- Location location() const { return current().location; }
+ const Location& location() const { return current().location; }
// This error is specifically an invalid hex or unicode escape sequence.
bool has_error() const { return scanner_error_ != MessageTemplate::kNone; }
MessageTemplate::Template error() const { return scanner_error_; }
- Location error_location() const { return scanner_error_location_; }
+ const Location& error_location() const { return scanner_error_location_; }
bool has_invalid_template_escape() const {
return current().invalid_template_escape_message != MessageTemplate::kNone;
@@ -264,13 +280,14 @@ class Scanner {
// One token look-ahead (past the token returned by Next()).
Token::Value peek() const { return next().token; }
- Location peek_location() const { return next().location; }
+ const Location& peek_location() const { return next().location; }
bool literal_contains_escapes() const {
return LiteralContainsEscapes(current());
}
const AstRawString* CurrentSymbol(AstValueFactory* ast_value_factory) const;
+
const AstRawString* NextSymbol(AstValueFactory* ast_value_factory) const;
const AstRawString* CurrentRawSymbol(
AstValueFactory* ast_value_factory) const;
@@ -286,7 +303,7 @@ class Scanner {
inline bool CurrentMatchesContextual(Token::Value token) const {
DCHECK(Token::IsContextualKeyword(token));
- return current().contextual_token == token;
+ return current_contextual_token() == token;
}
// Match the token against the contextual keyword or literal buffer.
@@ -297,7 +314,7 @@ class Scanner {
// (which was escape-processed already).
// Conveniently, !current().literal_chars.is_used() for all proper
// keywords, so this second condition should exit early in common cases.
- return (current().contextual_token == token) ||
+ return (current_contextual_token() == token) ||
(current().literal_chars.is_used() &&
current().literal_chars.Equals(Vector<const char>(
Token::String(token), Token::StringLength(token))));
@@ -308,11 +325,11 @@ class Scanner {
current().literal_chars.Equals(
Vector<const char>("use strict", strlen("use strict")));
}
- bool IsGetOrSet(bool* is_get, bool* is_set) const {
- *is_get = CurrentMatchesContextual(Token::GET);
- *is_set = CurrentMatchesContextual(Token::SET);
- return *is_get || *is_set;
- }
+
+ bool IsGet() { return CurrentMatchesContextual(Token::GET); }
+
+ bool IsSet() { return CurrentMatchesContextual(Token::SET); }
+
bool IsLet() const {
return CurrentMatches(Token::LET) ||
CurrentMatchesContextualEscaped(Token::LET);
@@ -324,7 +341,7 @@ class Scanner {
bool IsDuplicateSymbol(DuplicateFinder* duplicate_finder,
AstValueFactory* ast_value_factory) const;
- UnicodeCache* unicode_cache() { return unicode_cache_; }
+ UnicodeCache* unicode_cache() const { return unicode_cache_; }
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
@@ -362,10 +379,9 @@ class Scanner {
Maybe<RegExp::Flags> ScanRegExpFlags();
// Scans the input as a template literal
- Token::Value ScanTemplateStart();
Token::Value ScanTemplateContinuation() {
DCHECK_EQ(next().token, Token::RBRACE);
- next().location.beg_pos = source_pos() - 1; // We already consumed }
+ DCHECK_EQ(source_pos() - 1, next().location.beg_pos);
return ScanTemplateSpan();
}
@@ -374,8 +390,6 @@ class Scanner {
bool FoundHtmlComment() const { return found_html_comment_; }
- bool allow_harmony_bigint() const { return allow_harmony_bigint_; }
- void set_allow_harmony_bigint(bool allow) { allow_harmony_bigint_ = allow; }
bool allow_harmony_private_fields() const {
return allow_harmony_private_fields_;
}
@@ -389,34 +403,19 @@ class Scanner {
allow_harmony_numeric_separator_ = allow;
}
+ const Utf16CharacterStream* stream() const { return source_; }
+
private:
// Scoped helper for saving & restoring scanner error state.
// This is used for tagged template literals, in which normally forbidden
// escape sequences are allowed.
class ErrorState;
- // Scoped helper for literal recording. Automatically drops the literal
- // if aborting the scanning before it's complete.
- class LiteralScope {
- public:
- explicit LiteralScope(Scanner* self) : scanner_(self), complete_(false) {
- scanner_->StartLiteral();
- }
- ~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
- }
- void Complete() { complete_ = true; }
-
- private:
- Scanner* scanner_;
- bool complete_;
- };
-
// LiteralBuffer - Collector of chars of literals.
class LiteralBuffer {
public:
LiteralBuffer()
- : position_(0), is_one_byte_(true), is_used_(false), backing_store_() {}
+ : backing_store_(), position_(0), is_one_byte_(true), is_used_(false) {}
~LiteralBuffer() { backing_store_.Dispose(); }
@@ -506,14 +505,32 @@ class Scanner {
void ExpandBuffer();
void ConvertToTwoByte();
+ Vector<byte> backing_store_;
int position_;
bool is_one_byte_;
bool is_used_;
- Vector<byte> backing_store_;
DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
};
+ // Scoped helper for literal recording. Automatically drops the literal
+ // if aborting the scanning before it's complete.
+ class LiteralScope {
+ public:
+ explicit LiteralScope(Scanner* scanner)
+ : buffer_(&scanner->next().literal_chars), complete_(false) {
+ buffer_->Start();
+ }
+ ~LiteralScope() {
+ if (!complete_) buffer_->Drop();
+ }
+ void Complete() { complete_ = true; }
+
+ private:
+ LiteralBuffer* buffer_;
+ bool complete_;
+ };
+
// The current and look-ahead token.
struct TokenDesc {
Location location = {0, 0};
@@ -538,7 +555,7 @@ class Scanner {
};
static const int kCharacterLookaheadBufferSize = 1;
- const int kMaxAscii = 127;
+ static const int kMaxAscii = 127;
// Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
template <bool capture_raw>
@@ -574,11 +591,6 @@ class Scanner {
// Seek to the next_ token at the given position.
void SeekNext(size_t position);
- // Literal buffer support
- inline void StartLiteral() { next().literal_chars.Start(); }
-
- inline void StartRawLiteral() { next().raw_literal_chars.Start(); }
-
V8_INLINE void AddLiteralChar(uc32 c) { next().literal_chars.AddChar(c); }
V8_INLINE void AddLiteralChar(char c) { next().literal_chars.AddChar(c); }
@@ -587,14 +599,7 @@ class Scanner {
next().raw_literal_chars.AddChar(c);
}
- // Stops scanning of a literal and drop the collected characters,
- // e.g., due to an encountered error.
- inline void DropLiteral() {
- next().literal_chars.Drop();
- next().raw_literal_chars.Drop();
- }
-
- inline void AddLiteralCharAdvance() {
+ V8_INLINE void AddLiteralCharAdvance() {
AddLiteralChar(c0_);
Advance();
}
@@ -714,7 +719,8 @@ class Scanner {
uc32 ScanUnlimitedLengthHexNumber(int max_value, int beg_pos);
// Scans a single JavaScript token.
- void Scan();
+ V8_INLINE Token::Value ScanSingleToken();
+ V8_INLINE void Scan();
V8_INLINE Token::Value SkipWhiteSpace();
Token::Value SkipSingleHTMLComment();
@@ -738,8 +744,10 @@ class Scanner {
bool ScanImplicitOctalDigits(int start_pos, NumberKind* kind);
Token::Value ScanNumber(bool seen_period);
- Token::Value ScanIdentifierOrKeyword();
- Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
+ V8_INLINE Token::Value ScanIdentifierOrKeyword();
+ V8_INLINE Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
+ Token::Value ScanIdentifierOrKeywordInnerSlow(LiteralScope* literal,
+ bool escaped);
Token::Value ScanString();
Token::Value ScanPrivateName();
@@ -779,13 +787,7 @@ class Scanner {
void SanityCheckTokenDesc(const TokenDesc&) const;
#endif
- UnicodeCache* unicode_cache_;
-
- // Values parsed from magic comments.
- LiteralBuffer source_url_;
- LiteralBuffer source_mapping_url_;
-
- TokenDesc token_storage_[3];
+ UnicodeCache* const unicode_cache_;
TokenDesc& next() { return *next_; }
@@ -800,23 +802,28 @@ class Scanner {
// Input stream. Must be initialized to an Utf16CharacterStream.
Utf16CharacterStream* const source_;
- // Last-seen positions of potentially problematic tokens.
- Location octal_pos_;
- MessageTemplate::Template octal_message_;
-
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
+ TokenDesc token_storage_[3];
+
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
// Harmony flags to allow ESNext features.
- bool allow_harmony_bigint_;
bool allow_harmony_private_fields_;
bool allow_harmony_numeric_separator_;
const bool is_module_;
+ // Values parsed from magic comments.
+ LiteralBuffer source_url_;
+ LiteralBuffer source_mapping_url_;
+
+ // Last-seen positions of potentially problematic tokens.
+ Location octal_pos_;
+ MessageTemplate::Template octal_message_;
+
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
};
diff --git a/deps/v8/src/parsing/token.cc b/deps/v8/src/parsing/token.cc
index 258c7b5d09..4cbf244a2b 100644
--- a/deps/v8/src/parsing/token.cc
+++ b/deps/v8/src/parsing/token.cc
@@ -29,7 +29,6 @@ const uint8_t Token::string_length_[NUM_TOKENS] = {TOKEN_LIST(T, T, T)};
const int8_t Token::precedence_[NUM_TOKENS] = {TOKEN_LIST(T, T, T)};
#undef T
-
#define KT(a, b, c) 'T',
#define KK(a, b, c) 'K',
#define KC(a, b, c) 'C',
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 660f24361c..e1c6239e36 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -7,6 +7,7 @@
#include "src/base/logging.h"
#include "src/globals.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -32,6 +33,27 @@ namespace internal {
#define IGNORE_TOKEN(name, string, precedence)
+/* Binary operators sorted by precedence */
+#define BINARY_OP_TOKEN_LIST(T, E) \
+ E(T, BIT_OR, "|", 6) \
+ E(T, BIT_XOR, "^", 7) \
+ E(T, BIT_AND, "&", 8) \
+ E(T, SHL, "<<", 11) \
+ E(T, SAR, ">>", 11) \
+ E(T, SHR, ">>>", 11) \
+ E(T, ADD, "+", 12) \
+ E(T, SUB, "-", 12) \
+ E(T, MUL, "*", 13) \
+ E(T, DIV, "/", 13) \
+ E(T, MOD, "%", 13) \
+ E(T, EXP, "**", 14)
+
+#define EXPAND_BINOP_ASSIGN_TOKEN(T, name, string, precedence) \
+ T(ASSIGN_##name, string "=", 2)
+
+#define EXPAND_BINOP_TOKEN(T, name, string, precedence) \
+ T(name, string, precedence)
+
#define TOKEN_LIST(T, K, C) \
/* End of source indicator. */ \
T(EOS, "EOS", 0) \
@@ -57,18 +79,7 @@ namespace internal {
/* contiguous and sorted in the same order! */ \
T(INIT, "=init", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
- T(ASSIGN_BIT_OR, "|=", 2) \
- T(ASSIGN_BIT_XOR, "^=", 2) \
- T(ASSIGN_BIT_AND, "&=", 2) \
- T(ASSIGN_SHL, "<<=", 2) \
- T(ASSIGN_SAR, ">>=", 2) \
- T(ASSIGN_SHR, ">>>=", 2) \
- T(ASSIGN_ADD, "+=", 2) \
- T(ASSIGN_SUB, "-=", 2) \
- T(ASSIGN_MUL, "*=", 2) \
- T(ASSIGN_DIV, "/=", 2) \
- T(ASSIGN_MOD, "%=", 2) \
- T(ASSIGN_EXP, "**=", 2) \
+ BINARY_OP_TOKEN_LIST(T, EXPAND_BINOP_ASSIGN_TOKEN) \
\
/* Binary operators sorted by precedence. */ \
/* IsBinaryOp() relies on this block of enum values */ \
@@ -76,25 +87,14 @@ namespace internal {
T(COMMA, ",", 1) \
T(OR, "||", 4) \
T(AND, "&&", 5) \
- T(BIT_OR, "|", 6) \
- T(BIT_XOR, "^", 7) \
- T(BIT_AND, "&", 8) \
- T(SHL, "<<", 11) \
- T(SAR, ">>", 11) \
- T(SHR, ">>>", 11) \
- T(ADD, "+", 12) \
- T(SUB, "-", 12) \
- T(MUL, "*", 13) \
- T(DIV, "/", 13) \
- T(MOD, "%", 13) \
- T(EXP, "**", 14) \
+ BINARY_OP_TOKEN_LIST(T, EXPAND_BINOP_TOKEN) \
\
/* Compare operators sorted by precedence. */ \
/* IsCompareOp() relies on this block of enum values */ \
/* being contiguous and sorted in the same order! */ \
T(EQ, "==", 9) \
- T(NE, "!=", 9) \
T(EQ_STRICT, "===", 9) \
+ T(NE, "!=", 9) \
T(NE_STRICT, "!==", 9) \
T(LT, "<", 10) \
T(GT, ">", 10) \
@@ -131,7 +131,6 @@ namespace internal {
K(NEW, "new", 0) \
K(RETURN, "return", 0) \
K(SWITCH, "switch", 0) \
- K(THIS, "this", 0) \
K(THROW, "throw", 0) \
K(TRY, "try", 0) \
/* TYPEOF */ \
@@ -139,6 +138,7 @@ namespace internal {
/* VOID */ \
K(WHILE, "while", 0) \
K(WITH, "with", 0) \
+ K(THIS, "this", 0) \
\
/* Literals (ECMA-262, section 7.8, page 16). */ \
K(NULL_LITERAL, "null", 0) \
@@ -146,33 +146,34 @@ namespace internal {
K(FALSE_LITERAL, "false", 0) \
T(NUMBER, nullptr, 0) \
T(SMI, nullptr, 0) \
- T(STRING, nullptr, 0) \
T(BIGINT, nullptr, 0) \
+ T(STRING, nullptr, 0) \
\
+ /* BEGIN AnyIdentifier */ \
/* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, nullptr, 0) \
- T(PRIVATE_NAME, nullptr, 0) \
- \
- /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
- T(FUTURE_STRICT_RESERVED_WORD, nullptr, 0) \
K(ASYNC, "async", 0) \
/* `await` is a reserved word in module code only */ \
K(AWAIT, "await", 0) \
+ K(YIELD, "yield", 0) \
+ K(LET, "let", 0) \
+ K(STATIC, "static", 0) \
+ /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
+ T(FUTURE_STRICT_RESERVED_WORD, nullptr, 0) \
+ T(ESCAPED_STRICT_RESERVED_WORD, nullptr, 0) \
+ K(ENUM, "enum", 0) \
+ /* END AnyIdentifier */ \
K(CLASS, "class", 0) \
K(CONST, "const", 0) \
- K(ENUM, "enum", 0) \
K(EXPORT, "export", 0) \
K(EXTENDS, "extends", 0) \
K(IMPORT, "import", 0) \
- K(LET, "let", 0) \
- K(STATIC, "static", 0) \
- K(YIELD, "yield", 0) \
K(SUPER, "super", 0) \
+ T(PRIVATE_NAME, nullptr, 0) \
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
T(ESCAPED_KEYWORD, nullptr, 0) \
- T(ESCAPED_STRICT_RESERVED_WORD, nullptr, 0) \
\
/* Scanner-internal use only. */ \
T(WHITESPACE, nullptr, 0) \
@@ -205,127 +206,103 @@ class Token {
public:
// All token values.
#define T(name, string, precedence) name,
- enum Value { TOKEN_LIST(T, T, T) NUM_TOKENS };
+ enum Value : uint8_t { TOKEN_LIST(T, T, T) NUM_TOKENS };
#undef T
// Returns a string corresponding to the C++ token name
// (e.g. "LT" for the token LT).
- static const char* Name(Value tok) {
- DCHECK(tok < NUM_TOKENS); // tok is unsigned
- return name_[tok];
+ static const char* Name(Value token) {
+ DCHECK_GT(NUM_TOKENS, token); // token is unsigned
+ return name_[token];
}
+ static char TypeForTesting(Value token) { return token_type[token]; }
+
// Predicates
- static bool IsKeyword(Value tok) {
- return token_type[tok] == 'K';
+ static bool IsKeyword(Value token) { return token_type[token] == 'K'; }
+ static bool IsContextualKeyword(Value token) {
+ return IsInRange(token, GET, ANONYMOUS);
}
- static bool IsContextualKeyword(Value tok) { return token_type[tok] == 'C'; }
- static bool IsIdentifier(Value tok, LanguageMode language_mode,
+ static bool IsIdentifier(Value token, LanguageMode language_mode,
bool is_generator, bool disallow_await) {
- switch (tok) {
- case IDENTIFIER:
- case ASYNC:
- return true;
- case ESCAPED_STRICT_RESERVED_WORD:
- case FUTURE_STRICT_RESERVED_WORD:
- case LET:
- case STATIC:
- return is_sloppy(language_mode);
- case YIELD:
- return !is_generator && is_sloppy(language_mode);
- case AWAIT:
- return !disallow_await;
- default:
- return false;
+ if (IsInRange(token, IDENTIFIER, ASYNC)) return true;
+ if (IsInRange(token, LET, ESCAPED_STRICT_RESERVED_WORD)) {
+ return is_sloppy(language_mode);
}
- UNREACHABLE();
+ if (token == AWAIT) return !disallow_await;
+ if (token == YIELD) return !is_generator && is_sloppy(language_mode);
+ return false;
}
- static bool IsAssignmentOp(Value tok) {
- return INIT <= tok && tok <= ASSIGN_EXP;
+ static bool IsAnyIdentifier(Value token) {
+ return IsInRange(token, IDENTIFIER, ENUM);
}
- static bool IsBinaryOp(Value op) { return COMMA <= op && op <= EXP; }
+ static bool IsStrictReservedWord(Value token) {
+ return IsInRange(token, LET, ESCAPED_STRICT_RESERVED_WORD);
+ }
- static bool IsCompareOp(Value op) {
- return EQ <= op && op <= IN;
+ static bool IsLiteral(Value token) {
+ return IsInRange(token, NULL_LITERAL, STRING);
}
- static bool IsOrderedRelationalCompareOp(Value op) {
- return op == LT || op == LTE || op == GT || op == GTE;
+ static bool IsAssignmentOp(Value token) {
+ return IsInRange(token, INIT, ASSIGN_EXP);
}
+ static bool IsGetOrSet(Value op) { return IsInRange(op, GET, SET); }
+
+ static bool IsBinaryOp(Value op) { return IsInRange(op, COMMA, EXP); }
- static bool IsEqualityOp(Value op) {
- return op == EQ || op == EQ_STRICT;
+ static bool IsCompareOp(Value op) { return IsInRange(op, EQ, IN); }
+
+ static bool IsOrderedRelationalCompareOp(Value op) {
+ return IsInRange(op, LT, GTE);
}
+ static bool IsEqualityOp(Value op) { return IsInRange(op, EQ, EQ_STRICT); }
+
static Value BinaryOpForAssignment(Value op) {
- DCHECK(IsAssignmentOp(op));
- switch (op) {
- case Token::ASSIGN_BIT_OR:
- return Token::BIT_OR;
- case Token::ASSIGN_BIT_XOR:
- return Token::BIT_XOR;
- case Token::ASSIGN_BIT_AND:
- return Token::BIT_AND;
- case Token::ASSIGN_SHL:
- return Token::SHL;
- case Token::ASSIGN_SAR:
- return Token::SAR;
- case Token::ASSIGN_SHR:
- return Token::SHR;
- case Token::ASSIGN_ADD:
- return Token::ADD;
- case Token::ASSIGN_SUB:
- return Token::SUB;
- case Token::ASSIGN_MUL:
- return Token::MUL;
- case Token::ASSIGN_DIV:
- return Token::DIV;
- case Token::ASSIGN_MOD:
- return Token::MOD;
- case Token::ASSIGN_EXP:
- return Token::EXP;
- default:
- UNREACHABLE();
- }
+ DCHECK(IsInRange(op, ASSIGN_BIT_OR, ASSIGN_EXP));
+ Value result = static_cast<Value>(op - ASSIGN_BIT_OR + BIT_OR);
+ DCHECK(IsBinaryOp(result));
+ return result;
}
static bool IsBitOp(Value op) {
- return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
+ return IsInRange(op, BIT_OR, SHR) || op == BIT_NOT;
}
static bool IsUnaryOp(Value op) {
- return (NOT <= op && op <= VOID) || op == ADD || op == SUB;
+ return IsInRange(op, NOT, VOID) || IsInRange(op, ADD, SUB);
}
- static bool IsCountOp(Value op) {
- return op == INC || op == DEC;
- }
+ static bool IsCountOp(Value op) { return IsInRange(op, INC, DEC); }
+
+ static bool IsShiftOp(Value op) { return IsInRange(op, SHL, SHR); }
- static bool IsShiftOp(Value op) {
- return (SHL <= op) && (op <= SHR);
+ static bool IsTrivialExpressionToken(Value op) {
+ return IsInRange(op, THIS, IDENTIFIER);
}
// Returns a string corresponding to the JS token string
// (.e., "<" for the token LT) or nullptr if the token doesn't
// have a (unique) string (e.g. an IDENTIFIER).
- static const char* String(Value tok) {
- DCHECK(tok < NUM_TOKENS); // tok is unsigned.
- return string_[tok];
+ static const char* String(Value token) {
+ DCHECK_GT(NUM_TOKENS, token); // token is unsigned
+ return string_[token];
}
- static uint8_t StringLength(Value tok) {
- DCHECK(tok < NUM_TOKENS);
- return string_length_[tok];
+ static uint8_t StringLength(Value token) {
+ DCHECK_GT(NUM_TOKENS, token); // token is unsigned
+ return string_length_[token];
}
// Returns the precedence > 0 for binary and compare
// operators; returns 0 otherwise.
- static int Precedence(Value tok) {
- DCHECK(tok < NUM_TOKENS); // tok is unsigned.
- return precedence_[tok];
+ static int Precedence(Value token) {
+ DCHECK_GT(NUM_TOKENS, token); // token is unsigned
+ return precedence_[token];
}
private:
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/pending-compilation-error-handler.h
index b828fff17c..f18a8369e4 100644
--- a/deps/v8/src/pending-compilation-error-handler.h
+++ b/deps/v8/src/pending-compilation-error-handler.h
@@ -62,6 +62,12 @@ class PendingCompilationErrorHandler {
Handle<String> FormatErrorMessageForTest(Isolate* isolate) const;
+ bool SetUnidentifiableError() { return unidentifiable_error_ = true; }
+
+ bool ResetUnidentifiableError() { return unidentifiable_error_ = false; }
+
+ bool ErrorUnidentifiableByPreParser() { return unidentifiable_error_; }
+
private:
class MessageDetails {
public:
@@ -97,6 +103,7 @@ class PendingCompilationErrorHandler {
bool has_pending_error_;
bool stack_overflow_;
+ bool unidentifiable_error_ = false;
MessageDetails error_details_;
ParseErrorType error_type_;
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
index 3b11cf30c2..91f0dca10f 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/perf-jit.h
@@ -39,7 +39,7 @@ namespace internal {
class PerfJitLogger : public CodeEventLogger {
public:
explicit PerfJitLogger(Isolate* isolate);
- virtual ~PerfJitLogger();
+ ~PerfJitLogger() override;
void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
void CodeDisableOptEvent(AbstractCode* code,
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 24d9d2b8f3..5daa55604e 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -44,6 +44,7 @@
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -211,6 +212,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
MemOperand::MemOperand(Register rn, int32_t offset)
: ra_(rn), offset_(offset), rb_(no_reg) {}
@@ -218,22 +226,30 @@ MemOperand::MemOperand(Register ra, Register rb)
: ra_(ra), offset_(0), rb_(rb) {}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
- case HeapObjectRequest::kHeapNumber:
+ case HeapObjectRequest::kHeapNumber: {
object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
- case HeapObjectRequest::kCodeStub:
+ }
+ case HeapObjectRequest::kCodeStub: {
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
+ }
+ case HeapObjectRequest::kStringConstant: {
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
+ }
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
Address constant_pool = kNullAddress;
- set_target_address_at(pc, constant_pool,
- reinterpret_cast<Address>(object.location()),
+ set_target_address_at(pc, constant_pool, object.address(),
SKIP_ICACHE_FLUSH);
}
}
@@ -2070,13 +2086,7 @@ void Assembler::dp(uintptr_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- if (options().disable_reloc_info_for_patching) return;
- if (RelocInfo::IsNone(rmode) ||
- // Don't record external references unless the heap will be serialized.
- (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code())) {
- return;
- }
+ if (!ShouldRecordRelocInfo(rmode)) return;
DeferredRelocInfo rinfo(pc_offset(), rmode, data);
relocations_.push_back(rinfo);
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index b737320cbb..9f3ff0dc7e 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -217,7 +217,7 @@ const int kNumSafepointRegisters = 32;
// The following constants describe the stack frame linkage area as
// defined by the ABI. Note that kNumRequiredStackFrameSlots must
// satisfy alignment requirements (rounding up if required).
-#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN // ppc64le linux
// [0] back chain
// [1] condition register save area
// [2] link register save area
@@ -230,7 +230,7 @@ const int kNumSafepointRegisters = 32;
const int kNumRequiredStackFrameSlots = 12;
const int kStackFrameLRSlot = 2;
const int kStackFrameExtraParamSlot = 12;
-#elif V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#else // AIX
// [0] back chain
// [1] condition register save area
// [2] link register save area
@@ -242,21 +242,9 @@ const int kStackFrameExtraParamSlot = 12;
// [13] Parameter8 save area
// [14] Parameter9 slot (if necessary)
// ...
-#if V8_TARGET_ARCH_PPC64
const int kNumRequiredStackFrameSlots = 14;
-#else
-const int kNumRequiredStackFrameSlots = 16;
-#endif
const int kStackFrameLRSlot = 2;
const int kStackFrameExtraParamSlot = 14;
-#else
-// [0] back chain
-// [1] link register save area
-// [2] Parameter9 slot (if necessary)
-// ...
-const int kNumRequiredStackFrameSlots = 4;
-const int kStackFrameLRSlot = 1;
-const int kStackFrameExtraParamSlot = 2;
#endif
// Define the list of registers actually saved at safepoints.
@@ -373,7 +361,7 @@ C_REGISTERS(DECLARE_C_REGISTER)
// Machine instruction Operands
// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
+class Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
@@ -394,6 +382,7 @@ class Operand BASE_EMBEDDED {
V8_INLINE explicit Operand(Register rm);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
@@ -442,7 +431,7 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions
// On PowerPC we have base register + 16bit signed value
// Alternatively we can have a 16bit signed value immediate
-class MemOperand BASE_EMBEDDED {
+class MemOperand {
public:
explicit MemOperand(Register rn, int32_t offset = 0);
@@ -1632,8 +1621,7 @@ class Assembler : public AssemblerBase {
friend class EnsureSpace;
};
-
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index cfa2709fd5..7e287b08b8 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -115,7 +115,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
IsolateAddressId::kPendingExceptionAddress, isolate())));
__ StoreP(r3, MemOperand(ip));
- __ LoadRoot(r3, Heap::kExceptionRootIndex);
+ __ LoadRoot(r3, RootIndex::kException);
__ b(&exit);
// Invoke: Link this frame into the handler chain.
@@ -439,7 +439,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
- __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r14, RootIndex::kTheHoleValue);
__ Move(r15, ExternalReference::scheduled_exception_address(isolate));
__ LoadP(r15, MemOperand(r15));
__ cmp(r14, r15);
@@ -490,13 +490,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
// new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// call data
__ push(call_data);
Register scratch = call_data;
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
// return value
__ push(scratch);
// return value default
@@ -577,7 +577,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Push data from AccessorInfo.
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ push(scratch);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 65963b9af6..b27890d1f5 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -7,7 +7,6 @@
#include <memory>
#include "src/codegen.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/ppc/simulator-ppc.h"
@@ -16,16 +15,17 @@ namespace internal {
#define __ masm.
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
return nullptr;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
// Called from C
__ function_descriptor();
@@ -36,13 +36,14 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 0f2679008c..a6cecf7dc2 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -1201,13 +1201,15 @@ typedef uint32_t Instr;
/* Compare Logical */ \
V(cmpl, CMPL, 0x7C000040)
-#define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
- /* Store Byte Conditional Indexed */ \
- V(stbcx, STBCX, 0x7C00056D) \
- /* Store Halfword Conditional Indexed Xform */ \
- V(sthcx, STHCX, 0x7C0005AD) \
- /* Store Word Conditional Indexed & record CR0 */ \
- V(stwcx, STWCX, 0x7C00012D)
+#define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
+ /* Store Byte Conditional Indexed */ \
+ V(stbcx, STBCX, 0x7C00056D) \
+ /* Store Halfword Conditional Indexed Xform */ \
+ V(sthcx, STHCX, 0x7C0005AD) \
+ /* Store Word Conditional Indexed & record CR0 */ \
+ V(stwcx, STWCX, 0x7C00012D) \
+ /* Store Doubleword Conditional Indexed & record CR0 */ \
+ V(stdcx, STDCX, 0x7C0001AD)
#define PPC_X_OPCODE_EH_L_FORM_LIST(V) \
/* Load Byte And Reserve Indexed */ \
@@ -1215,15 +1217,15 @@ typedef uint32_t Instr;
/* Load Halfword And Reserve Indexed Xform */ \
V(lharx, LHARX, 0x7C0000E8) \
/* Load Word and Reserve Indexed */ \
- V(lwarx, LWARX, 0x7C000028)
+ V(lwarx, LWARX, 0x7C000028) \
+ /* Load Doubleword And Reserve Indexed */ \
+ V(ldarx, LDARX, 0x7C0000A8)
#define PPC_X_OPCODE_UNUSED_LIST(V) \
/* Bit Permute Doubleword */ \
V(bpermd, BPERMD, 0x7C0001F8) \
/* Extend Sign Word */ \
V(extsw, EXTSW, 0x7C0007B4) \
- /* Load Doubleword And Reserve Indexed */ \
- V(ldarx, LDARX, 0x7C0000A8) \
/* Load Word Algebraic with Update Indexed */ \
V(lwaux, LWAUX, 0x7C0002EA) \
/* Load Word Algebraic Indexed */ \
@@ -1232,8 +1234,6 @@ typedef uint32_t Instr;
V(prtyd, PRTYD, 0x7C000174) \
/* Store Doubleword Byte-Reverse Indexed */ \
V(stdbrx, STDBRX, 0x7C000528) \
- /* Store Doubleword Conditional Indexed & record CR0 */ \
- V(stdcx, STDCX, 0x7C0001AD) \
/* Trap Doubleword */ \
V(td, TD, 0x7C000088) \
/* Branch Conditional to Branch Target Address Register */ \
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 1b8a1139a3..ae56f3616d 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -665,6 +665,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "stwcx 'rs, 'ra, 'rb");
return;
}
+ case STDCX: {
+ Format(instr, "stdcx 'rs, 'ra, 'rb");
+ return;
+ }
}
// ?? are all of these xo_form?
@@ -898,6 +902,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "ldux 'rt, 'ra, 'rb");
return;
}
+ case LDARX: {
+ Format(instr, "ldarx 'rt, 'ra, 'rb");
+ return;
+ }
case STDX: {
Format(instr, "stdx 'rt, 'ra, 'rb");
return;
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 857ab7a883..505aaef93d 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -88,9 +88,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments (on the stack, not including receiver)
// r4 : the target to call
- // r5 : arguments list (FixedArray)
// r7 : arguments list length (untagged)
- Register registers[] = {r4, r3, r5, r7};
+ // r5 : arguments list (FixedArray)
+ Register registers[] = {r4, r3, r7, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -125,9 +125,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// r3 : number of arguments (on the stack, not including receiver)
// r4 : the target to call
// r6 : the new target
- // r5 : arguments list (FixedArray)
// r7 : arguments list length (untagged)
- Register registers[] = {r4, r6, r3, r5, r7};
+ // r5 : arguments list (FixedArray)
+ Register registers[] = {r4, r6, r3, r7, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -194,7 +194,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // JSFunction
@@ -238,10 +238,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
- r6, // new target
+ r7, // address of the first argument
r4, // constructor to call
+ r6, // new target
r5, // allocation site feedback if available, undefined otherwise
- r7 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 9ae1e7139a..9565d04a4d 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -128,14 +128,14 @@ void TurboAssembler::Jump(Register target) {
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
+ RootIndex::kBuiltinsConstantsTable));
const uint32_t offset =
FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
- LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
LoadP(destination, MemOperand(destination, offset), r0);
}
@@ -395,7 +395,7 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
addi(location, location, Operand(stack_offset));
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
@@ -483,8 +483,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -496,7 +494,6 @@ void TurboAssembler::CallRecordWriteStub(
pop(slot_parameter);
pop(object_parameter);
- Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -1322,7 +1319,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ LoadRoot(r6, RootIndex::kUndefinedValue);
}
Label done;
@@ -1446,8 +1443,7 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
cmpi(type_reg, Operand(type));
}
-
-void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
DCHECK(obj != r0);
LoadRoot(r0, index);
cmp(obj, r0);
@@ -1894,7 +1890,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
- CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ CompareRoot(object, RootIndex::kUndefinedValue);
beq(&done_checking);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 364b60d037..897ac5553e 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -90,6 +90,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -321,11 +324,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register exclusion3 = no_reg);
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index) override {
+ void LoadRoot(Register destination, RootIndex index) override {
LoadRoot(destination, index, al);
}
- void LoadRoot(Register destination, Heap::RootListIndex index,
- Condition cond);
+ void LoadRoot(Register destination, RootIndex index, Condition cond);
void SwapP(Register src, Register dst, Register scratch);
void SwapP(Register src, MemOperand dst, Register scratch);
@@ -662,10 +664,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used acros.
class MacroAssembler : public TurboAssembler {
public:
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -846,21 +852,20 @@ class MacroAssembler : public TurboAssembler {
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
- void CompareRoot(Register obj, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index) {
+ void CompareRoot(Register obj, RootIndex index);
+ void PushRoot(RootIndex index) {
LoadRoot(r0, index);
Push(r0);
}
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
CompareRoot(with, index);
beq(if_equal);
}
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal) {
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
CompareRoot(with, index);
bne(if_not_equal);
}
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 0fd03df30c..900e03f6bb 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -870,6 +870,27 @@ void Simulator::TrashCallerSaveRegisters() {
#endif
}
+int Simulator::WriteExDW(intptr_t addr, uint64_t value, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
+ *ptr = value;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+uint64_t Simulator::ReadExDWU(intptr_t addr, Instruction* instr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
+ return *ptr;
+}
uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
// All supported PPC targets allow unaligned accesses, so we don't need to
@@ -2320,6 +2341,16 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
SetCR0(WriteExW(ra_val + rb_val, rs_val, instr));
break;
}
+ case STDCX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int64_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ SetCR0(WriteExDW(ra_val + rb_val, rs_val, instr));
+ break;
+ }
case TW: {
// used for call redirection in simulation mode
SoftwareInterrupt(instr);
@@ -3087,6 +3118,15 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
set_register(rt, ReadExWU(ra_val + rb_val, instr));
break;
}
+ case LDARX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadExDWU(ra_val + rb_val, instr));
+ break;
+ }
case DCBF: {
// todo - simulate dcbf
break;
@@ -3305,11 +3345,11 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
return;
}
case FSQRT: {
- lazily_initialize_fast_sqrt(isolate_);
+ lazily_initialize_fast_sqrt();
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- double frt_val = fast_sqrt(frb_val, isolate_);
+ double frt_val = fast_sqrt(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 22f34015a6..7b26906c29 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -265,6 +265,8 @@ class Simulator : public SimulatorBase {
intptr_t* ReadDW(intptr_t addr);
void WriteDW(intptr_t addr, int64_t value);
+ inline int WriteExDW(intptr_t addr, uint64_t value, Instruction* instr);
+ inline uint64_t ReadExDWU(intptr_t addr, Instruction* instr);
void Trace(Instruction* instr);
void SetCR0(intptr_t result, bool setSO = false);
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index e5b3139785..51cb0eb47f 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -75,11 +75,6 @@ AllocationTraceTree::AllocationTraceTree()
root_(this, 0) {
}
-
-AllocationTraceTree::~AllocationTraceTree() {
-}
-
-
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
const Vector<unsigned>& path) {
AllocationTraceNode* node = root();
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index cd9e120db2..bff9a62750 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -57,7 +57,7 @@ class AllocationTraceNode {
class AllocationTraceTree {
public:
AllocationTraceTree();
- ~AllocationTraceTree();
+ ~AllocationTraceTree() = default;
AllocationTraceNode* AddPathFromEnd(const Vector<unsigned>& path);
AllocationTraceNode* root() { return &root_; }
unsigned next_node_id() { return next_node_id_++; }
diff --git a/deps/v8/src/profiler/circular-queue-inl.h b/deps/v8/src/profiler/circular-queue-inl.h
index 413b236d37..855e217805 100644
--- a/deps/v8/src/profiler/circular-queue-inl.h
+++ b/deps/v8/src/profiler/circular-queue-inl.h
@@ -16,11 +16,8 @@ SamplingCircularQueue<T, L>::SamplingCircularQueue()
dequeue_pos_(buffer_) {
}
-
-template<typename T, unsigned L>
-SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
-}
-
+template <typename T, unsigned L>
+SamplingCircularQueue<T, L>::~SamplingCircularQueue() = default;
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::Peek() {
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 78bb3b4a25..6e2acdfde7 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -106,7 +106,7 @@ class TickSampleEventRecord {
public:
// The parameterless constructor is used when we dequeue data from
// the ticks buffer.
- TickSampleEventRecord() { }
+ TickSampleEventRecord() = default;
explicit TickSampleEventRecord(unsigned order) : order(order) { }
unsigned order;
@@ -135,10 +135,10 @@ class ProfilerEventsProcessor : public base::Thread {
public:
ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
base::TimeDelta period);
- virtual ~ProfilerEventsProcessor();
+ ~ProfilerEventsProcessor() override;
// Thread control.
- virtual void Run();
+ void Run() override;
void StopSynchronously();
V8_INLINE bool running() { return !!base::Relaxed_Load(&running_); }
void Enqueue(const CodeEventsContainer& event);
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 3a1df29bd4..0978e76cff 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -23,14 +23,9 @@ HeapProfiler::~HeapProfiler() = default;
void HeapProfiler::DeleteAllSnapshots() {
snapshots_.clear();
- MaybeClearStringsStorage();
+ names_.reset(new StringsStorage());
}
-void HeapProfiler::MaybeClearStringsStorage() {
- if (snapshots_.empty() && !sampling_heap_profiler_ && !allocation_tracker_) {
- names_.reset(new StringsStorage());
- }
-}
void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
snapshots_.erase(
@@ -131,7 +126,6 @@ bool HeapProfiler::StartSamplingHeapProfiler(
void HeapProfiler::StopSamplingHeapProfiler() {
sampling_heap_profiler_.reset();
- MaybeClearStringsStorage();
}
@@ -165,7 +159,6 @@ void HeapProfiler::StopHeapObjectsTracking() {
ids_->StopHeapObjectsTracking();
if (allocation_tracker_) {
allocation_tracker_.reset();
- MaybeClearStringsStorage();
heap()->RemoveHeapObjectAllocationTracker(this);
}
}
@@ -236,10 +229,7 @@ void HeapProfiler::QueryObjects(Handle<Context> context,
PersistentValueVector<v8::Object>* objects) {
// We should return accurate information about live objects, so we need to
// collect all garbage first.
- heap()->CollectAllAvailableGarbage(
- GarbageCollectionReason::kLowMemoryNotification);
- heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- GarbageCollectionReason::kHeapProfiler);
+ heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
HeapIterator heap_iterator(heap());
HeapObject* heap_obj;
while ((heap_obj = heap_iterator.next()) != nullptr) {
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 099c0e24fa..acbdc6aa7a 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -27,7 +27,7 @@ class StringsStorage;
class HeapProfiler : public HeapObjectAllocationTracker {
public:
explicit HeapProfiler(Heap* heap);
- ~HeapProfiler();
+ ~HeapProfiler() override;
HeapSnapshot* TakeSnapshot(
v8::ActivityControl* control,
@@ -92,8 +92,6 @@ class HeapProfiler : public HeapObjectAllocationTracker {
v8::PersistentValueVector<v8::Object>* objects);
private:
- void MaybeClearStringsStorage();
-
Heap* heap() const;
// Mapping from HeapObject addresses to objects' uids.
diff --git a/deps/v8/src/profiler/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index edf6559706..6ddb6d4658 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -13,51 +13,41 @@
namespace v8 {
namespace internal {
-
HeapEntry* HeapGraphEdge::from() const {
return &snapshot()->entries()[from_index()];
}
-
-Isolate* HeapGraphEdge::isolate() const {
- return snapshot()->profiler()->isolate();
-}
-
+Isolate* HeapGraphEdge::isolate() const { return to_entry_->isolate(); }
HeapSnapshot* HeapGraphEdge::snapshot() const {
return to_entry_->snapshot();
}
-
-int HeapEntry::index() const {
- return static_cast<int>(this - &snapshot_->entries().front());
-}
-
-
int HeapEntry::set_children_index(int index) {
- children_index_ = index;
+ // Note: children_count_ and children_end_index_ are parts of a union.
int next_index = index + children_count_;
- children_count_ = 0;
+ children_end_index_ = index;
return next_index;
}
void HeapEntry::add_child(HeapGraphEdge* edge) {
- *(children_begin() + children_count_++) = edge;
+ snapshot_->children()[children_end_index_++] = edge;
}
-HeapGraphEdge* HeapEntry::child(int i) { return *(children_begin() + i); }
+HeapGraphEdge* HeapEntry::child(int i) { return children_begin()[i]; }
+
+std::vector<HeapGraphEdge*>::iterator HeapEntry::children_begin() const {
+ return index_ == 0 ? snapshot_->children().begin()
+ : snapshot_->entries()[index_ - 1].children_end();
+}
-std::deque<HeapGraphEdge*>::iterator HeapEntry::children_begin() {
- DCHECK_GE(children_index_, 0);
- SLOW_DCHECK(
- children_index_ < static_cast<int>(snapshot_->children().size()) ||
- (children_index_ == static_cast<int>(snapshot_->children().size()) &&
- children_count_ == 0));
- return snapshot_->children().begin() + children_index_;
+std::vector<HeapGraphEdge*>::iterator HeapEntry::children_end() const {
+ DCHECK_GE(children_end_index_, 0);
+ return snapshot_->children().begin() + children_end_index_;
}
-std::deque<HeapGraphEdge*>::iterator HeapEntry::children_end() {
- return children_begin() + children_count_;
+int HeapEntry::children_count() const {
+ return static_cast<int>(children_end() - children_begin());
}
Isolate* HeapEntry::isolate() const { return snapshot_->profiler()->isolate(); }
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 5d98a98b8e..57f620f4ec 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -33,10 +33,11 @@
namespace v8 {
namespace internal {
-
-HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
- : bit_field_(TypeField::encode(type) | FromIndexField::encode(from)),
- to_index_(to),
+HeapGraphEdge::HeapGraphEdge(Type type, const char* name, HeapEntry* from,
+ HeapEntry* to)
+ : bit_field_(TypeField::encode(type) |
+ FromIndexField::encode(from->index())),
+ to_entry_(to),
name_(name) {
DCHECK(type == kContextVariable
|| type == kProperty
@@ -45,55 +46,53 @@ HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
|| type == kWeak);
}
-
-HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
- : bit_field_(TypeField::encode(type) | FromIndexField::encode(from)),
- to_index_(to),
+HeapGraphEdge::HeapGraphEdge(Type type, int index, HeapEntry* from,
+ HeapEntry* to)
+ : bit_field_(TypeField::encode(type) |
+ FromIndexField::encode(from->index())),
+ to_entry_(to),
index_(index) {
DCHECK(type == kElement || type == kHidden);
}
-
-void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
- to_entry_ = &snapshot->entries()[to_index_];
-}
-
-
-const int HeapEntry::kNoEntry = -1;
-
-HeapEntry::HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- size_t self_size,
+HeapEntry::HeapEntry(HeapSnapshot* snapshot, int index, Type type,
+ const char* name, SnapshotObjectId id, size_t self_size,
unsigned trace_node_id)
: type_(type),
+ index_(index),
children_count_(0),
- children_index_(-1),
self_size_(self_size),
snapshot_(snapshot),
name_(name),
id_(id),
- trace_node_id_(trace_node_id) { }
-
+ trace_node_id_(trace_node_id) {
+ DCHECK_GE(index, 0);
+}
void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
const char* name,
HeapEntry* entry) {
- HeapGraphEdge edge(type, name, this->index(), entry->index());
- snapshot_->edges().push_back(edge);
++children_count_;
+ snapshot_->edges().emplace_back(type, name, this, entry);
}
-
void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
int index,
HeapEntry* entry) {
- HeapGraphEdge edge(type, index, this->index(), entry->index());
- snapshot_->edges().push_back(edge);
++children_count_;
+ snapshot_->edges().emplace_back(type, index, this, entry);
}
+void HeapEntry::SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ const char* description,
+ HeapEntry* child,
+ StringsStorage* names) {
+ int index = children_count_ + 1;
+ const char* name = description
+ ? names->GetFormatted("%d / %s", index, description)
+ : names->GetName(index);
+ SetNamedReference(type, name, child);
+}
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
@@ -154,7 +153,6 @@ void HeapEntry::Print(
}
}
-
const char* HeapEntry::TypeAsString() {
switch (type()) {
case kHidden: return "/hidden/";
@@ -176,34 +174,24 @@ const char* HeapEntry::TypeAsString() {
}
}
-
-HeapSnapshot::HeapSnapshot(HeapProfiler* profiler)
- : profiler_(profiler),
- root_index_(HeapEntry::kNoEntry),
- gc_roots_index_(HeapEntry::kNoEntry),
- max_snapshot_js_object_id_(0) {
+HeapSnapshot::HeapSnapshot(HeapProfiler* profiler) : profiler_(profiler) {
// It is very important to keep objects that form a heap snapshot
// as small as possible. Check assumptions about data structure sizes.
- STATIC_ASSERT(((kPointerSize == 4) && (sizeof(HeapGraphEdge) == 12)) ||
- ((kPointerSize == 8) && (sizeof(HeapGraphEdge) == 24)));
- STATIC_ASSERT(((kPointerSize == 4) && (sizeof(HeapEntry) == 28)) ||
- ((kPointerSize == 8) && (sizeof(HeapEntry) == 40)));
- for (int i = 0; i < static_cast<int>(Root::kNumberOfRoots); ++i) {
- gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
- }
+ STATIC_ASSERT((kPointerSize == 4 && sizeof(HeapGraphEdge) == 12) ||
+ (kPointerSize == 8 && sizeof(HeapGraphEdge) == 24));
+ STATIC_ASSERT((kPointerSize == 4 && sizeof(HeapEntry) == 28) ||
+ (kPointerSize == 8 && sizeof(HeapEntry) == 40));
+ memset(&gc_subroot_entries_, 0, sizeof(gc_subroot_entries_));
}
-
void HeapSnapshot::Delete() {
profiler_->RemoveSnapshot(this);
}
-
void HeapSnapshot::RememberLastJSObjectId() {
max_snapshot_js_object_id_ = profiler_->heap_object_map()->last_assigned_id();
}
-
void HeapSnapshot::AddSyntheticRootEntries() {
AddRootEntry();
AddGcRootsEntry();
@@ -215,42 +203,30 @@ void HeapSnapshot::AddSyntheticRootEntries() {
DCHECK_EQ(HeapObjectsMap::kFirstAvailableObjectId, id);
}
-
-HeapEntry* HeapSnapshot::AddRootEntry() {
- DCHECK_EQ(root_index_, HeapEntry::kNoEntry);
+void HeapSnapshot::AddRootEntry() {
+ DCHECK_NULL(root_entry_);
DCHECK(entries_.empty()); // Root entry must be the first one.
- HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
- "",
- HeapObjectsMap::kInternalRootObjectId,
- 0,
- 0);
- root_index_ = entry->index();
- DCHECK_EQ(root_index_, 0);
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddGcRootsEntry() {
- DCHECK_EQ(gc_roots_index_, HeapEntry::kNoEntry);
- HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
- "(GC roots)",
- HeapObjectsMap::kGcRootsObjectId,
- 0,
- 0);
- gc_roots_index_ = entry->index();
- return entry;
-}
-
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(Root root, SnapshotObjectId id) {
- DCHECK_EQ(gc_subroot_indexes_[static_cast<int>(root)], HeapEntry::kNoEntry);
- HeapEntry* entry =
+ root_entry_ = AddEntry(HeapEntry::kSynthetic, "",
+ HeapObjectsMap::kInternalRootObjectId, 0, 0);
+ DCHECK_EQ(1u, entries_.size());
+ DCHECK_EQ(root_entry_, &entries_.front());
+}
+
+void HeapSnapshot::AddGcRootsEntry() {
+ DCHECK_NULL(gc_roots_entry_);
+ gc_roots_entry_ = AddEntry(HeapEntry::kSynthetic, "(GC roots)",
+ HeapObjectsMap::kGcRootsObjectId, 0, 0);
+}
+
+void HeapSnapshot::AddGcSubrootEntry(Root root, SnapshotObjectId id) {
+ DCHECK_NULL(gc_subroot_entries_[static_cast<int>(root)]);
+ gc_subroot_entries_[static_cast<int>(root)] =
AddEntry(HeapEntry::kSynthetic, RootVisitor::RootName(root), id, 0, 0);
- gc_subroot_indexes_[static_cast<int>(root)] = entry->index();
- return entry;
}
-void HeapSnapshot::AddLocation(int entry, int scriptId, int line, int col) {
- locations_.emplace_back(entry, scriptId, line, col);
+void HeapSnapshot::AddLocation(HeapEntry* entry, int scriptId, int line,
+ int col) {
+ locations_.emplace_back(entry->index(), scriptId, line, col);
}
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
@@ -258,52 +234,35 @@ HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
SnapshotObjectId id,
size_t size,
unsigned trace_node_id) {
- DCHECK(sorted_entries_.empty());
- entries_.emplace_back(this, type, name, id, size, trace_node_id);
+ DCHECK(!is_complete());
+ entries_.emplace_back(this, static_cast<int>(entries_.size()), type, name, id,
+ size, trace_node_id);
return &entries_.back();
}
-
void HeapSnapshot::FillChildren() {
DCHECK(children().empty());
- children().resize(edges().size());
int children_index = 0;
for (HeapEntry& entry : entries()) {
children_index = entry.set_children_index(children_index);
}
DCHECK_EQ(edges().size(), static_cast<size_t>(children_index));
+ children().resize(edges().size());
for (HeapGraphEdge& edge : edges()) {
- edge.ReplaceToIndexWithEntry(this);
edge.from()->add_child(&edge);
}
}
HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
- std::vector<HeapEntry*>* entries_by_id = GetSortedEntriesList();
-
- auto it = std::lower_bound(
- entries_by_id->begin(), entries_by_id->end(), id,
- [](HeapEntry* first, SnapshotObjectId val) { return first->id() < val; });
-
- if (it == entries_by_id->end() || (*it)->id() != id) return nullptr;
- return *it;
-}
-
-struct SortByIds {
- bool operator()(const HeapEntry* entry1_ptr, const HeapEntry* entry2_ptr) {
- return entry1_ptr->id() < entry2_ptr->id();
- }
-};
-
-std::vector<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
- if (sorted_entries_.empty()) {
- sorted_entries_.reserve(entries_.size());
+ if (entries_by_id_cache_.empty()) {
+ CHECK(is_complete());
+ entries_by_id_cache_.reserve(entries_.size());
for (HeapEntry& entry : entries_) {
- sorted_entries_.push_back(&entry);
+ entries_by_id_cache_.emplace(entry.id(), &entry);
}
- std::sort(sorted_entries_.begin(), sorted_entries_.end(), SortByIds());
}
- return &sorted_entries_;
+ auto it = entries_by_id_cache_.find(id);
+ return it != entries_by_id_cache_.end() ? it->second : nullptr;
}
void HeapSnapshot::Print(int max_depth) {
@@ -427,8 +386,8 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
entries_map_.occupancy());
}
- heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- GarbageCollectionReason::kHeapProfiler);
+ heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kHeapProfiler);
HeapIterator iterator(heap_);
for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
@@ -540,61 +499,6 @@ SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
return id << 1;
}
-HeapEntriesMap::HeapEntriesMap() : entries_() {}
-
-int HeapEntriesMap::Map(HeapThing thing) {
- base::HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
- if (cache_entry == nullptr) return HeapEntry::kNoEntry;
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-void HeapEntriesMap::Pair(HeapThing thing, int entry) {
- base::HashMap::Entry* cache_entry =
- entries_.LookupOrInsert(thing, Hash(thing));
- DCHECK_NULL(cache_entry->value);
- cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
-}
-
-HeapObjectsSet::HeapObjectsSet() : entries_() {}
-
-void HeapObjectsSet::Clear() {
- entries_.Clear();
-}
-
-
-bool HeapObjectsSet::Contains(Object* obj) {
- if (!obj->IsHeapObject()) return false;
- HeapObject* object = HeapObject::cast(obj);
- return entries_.Lookup(object, HeapEntriesMap::Hash(object)) != nullptr;
-}
-
-
-void HeapObjectsSet::Insert(Object* obj) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- entries_.LookupOrInsert(object, HeapEntriesMap::Hash(object));
-}
-
-
-const char* HeapObjectsSet::GetTag(Object* obj) {
- HeapObject* object = HeapObject::cast(obj);
- base::HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object));
- return cache_entry != nullptr
- ? reinterpret_cast<const char*>(cache_entry->value)
- : nullptr;
-}
-
-
-V8_NOINLINE void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- base::HashMap::Entry* cache_entry =
- entries_.LookupOrInsert(object, HeapEntriesMap::Hash(object));
- cache_entry->value = const_cast<char*>(tag);
-}
-
V8HeapExplorer::V8HeapExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
v8::HeapProfiler::ObjectNameResolver* resolver)
@@ -603,18 +507,14 @@ V8HeapExplorer::V8HeapExplorer(HeapSnapshot* snapshot,
names_(snapshot_->profiler()->names()),
heap_object_map_(snapshot_->profiler()->heap_object_map()),
progress_(progress),
- filler_(nullptr),
+ generator_(nullptr),
global_object_name_resolver_(resolver) {}
-V8HeapExplorer::~V8HeapExplorer() {
-}
-
-
HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
return AddEntry(reinterpret_cast<HeapObject*>(ptr));
}
-void V8HeapExplorer::ExtractLocation(int entry, HeapObject* object) {
+void V8HeapExplorer::ExtractLocation(HeapEntry* entry, HeapObject* object) {
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
ExtractLocationForJSFunction(entry, func);
@@ -632,7 +532,8 @@ void V8HeapExplorer::ExtractLocation(int entry, HeapObject* object) {
}
}
-void V8HeapExplorer::ExtractLocationForJSFunction(int entry, JSFunction* func) {
+void V8HeapExplorer::ExtractLocationForJSFunction(HeapEntry* entry,
+ JSFunction* func) {
if (!func->shared()->script()->IsScript()) return;
Script* script = Script::cast(func->shared()->script());
int scriptId = script->id();
@@ -659,25 +560,22 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
if (object->IsJSGlobalObject()) {
- const char* tag = objects_tags_.GetTag(object);
- if (tag != nullptr) {
- name = names_->GetFormatted("%s / %s", name, tag);
+ auto it = objects_tags_.find(JSGlobalObject::cast(object));
+ if (it != objects_tags_.end()) {
+ name = names_->GetFormatted("%s / %s", name, it->second);
}
}
return AddEntry(object, HeapEntry::kObject, name);
} else if (object->IsString()) {
String* string = String::cast(object);
- if (string->IsConsString())
- return AddEntry(object,
- HeapEntry::kConsString,
- "(concatenated string)");
- if (string->IsSlicedString())
- return AddEntry(object,
- HeapEntry::kSlicedString,
- "(sliced string)");
- return AddEntry(object,
- HeapEntry::kString,
- names_->GetName(String::cast(object)));
+ if (string->IsConsString()) {
+ return AddEntry(object, HeapEntry::kConsString, "(concatenated string)");
+ } else if (string->IsSlicedString()) {
+ return AddEntry(object, HeapEntry::kSlicedString, "(sliced string)");
+ } else {
+ return AddEntry(object, HeapEntry::kString,
+ names_->GetName(String::cast(object)));
+ }
} else if (object->IsSymbol()) {
if (Symbol::cast(object)->is_private())
return AddEntry(object, HeapEntry::kHidden, "private symbol");
@@ -689,16 +587,12 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
String* name = SharedFunctionInfo::cast(object)->Name();
- return AddEntry(object,
- HeapEntry::kCode,
- names_->GetName(name));
+ return AddEntry(object, HeapEntry::kCode, names_->GetName(name));
} else if (object->IsScript()) {
Object* name = Script::cast(object)->name();
- return AddEntry(object,
- HeapEntry::kCode,
- name->IsString()
- ? names_->GetName(String::cast(name))
- : "");
+ return AddEntry(
+ object, HeapEntry::kCode,
+ name->IsString() ? names_->GetName(String::cast(name)) : "");
} else if (object->IsNativeContext()) {
return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
} else if (object->IsContext()) {
@@ -712,14 +606,12 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
}
-
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name) {
return AddEntry(object->address(), type, name, object->Size());
}
-
HeapEntry* V8HeapExplorer::AddEntry(Address address,
HeapEntry::Type type,
const char* name,
@@ -735,66 +627,6 @@ HeapEntry* V8HeapExplorer::AddEntry(Address address,
return snapshot_->AddEntry(type, name, object_id, size, trace_node_id);
}
-
-class SnapshotFiller {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- names_(snapshot->profiler()->names()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index]
- : nullptr;
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != nullptr ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
- int index,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
- const char* reference_name,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type, int parent,
- const char* description,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- const char* name = description
- ? names_->GetFormatted("%d / %s", index, description)
- : names_->GetName(index);
- parent_entry->SetNamedReference(type, name, child_entry);
- }
-
- private:
- HeapSnapshot* snapshot_;
- StringsStorage* names_;
- HeapEntriesMap* entries_;
-};
-
-
const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
switch (object->map()->instance_type()) {
case MAP_TYPE:
@@ -811,9 +643,10 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
case ODDBALL_TYPE: return "system / Oddball";
case ALLOCATION_SITE_TYPE:
return "system / AllocationSite";
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: return "system / "#Name;
- STRUCT_LIST(MAKE_STRUCT_CASE)
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ return "system / " #Name;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
default: return "system";
}
@@ -829,7 +662,7 @@ int V8HeapExplorer::EstimateObjectsCount() {
class IndexedReferencesExtractor : public ObjectVisitor {
public:
IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject* parent_obj,
- int parent)
+ HeapEntry* parent)
: generator_(generator),
parent_obj_(parent_obj),
parent_start_(HeapObject::RawField(parent_obj_, 0)),
@@ -855,8 +688,8 @@ class IndexedReferencesExtractor : public ObjectVisitor {
continue;
}
HeapObject* heap_object;
- if ((*p)->ToWeakHeapObject(&heap_object) ||
- (*p)->ToStrongHeapObject(&heap_object)) {
+ if ((*p)->GetHeapObjectIfWeak(&heap_object) ||
+ (*p)->GetHeapObjectIfStrong(&heap_object)) {
generator_->SetHiddenReference(parent_obj_, parent_, next_index,
heap_object, index * kPointerSize);
}
@@ -868,10 +701,10 @@ class IndexedReferencesExtractor : public ObjectVisitor {
HeapObject* parent_obj_;
Object** parent_start_;
Object** parent_end_;
- int parent_;
+ HeapEntry* parent_;
};
-void V8HeapExplorer::ExtractReferences(int entry, HeapObject* obj) {
+void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject* obj) {
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
} else if (obj->IsJSArrayBuffer()) {
@@ -935,38 +768,35 @@ void V8HeapExplorer::ExtractReferences(int entry, HeapObject* obj) {
}
}
-
-void V8HeapExplorer::ExtractJSGlobalProxyReferences(
- int entry, JSGlobalProxy* proxy) {
- SetInternalReference(proxy, entry,
- "native_context", proxy->native_context(),
+void V8HeapExplorer::ExtractJSGlobalProxyReferences(HeapEntry* entry,
+ JSGlobalProxy* proxy) {
+ SetInternalReference(entry, "native_context", proxy->native_context(),
JSGlobalProxy::kNativeContextOffset);
}
-
-void V8HeapExplorer::ExtractJSObjectReferences(
- int entry, JSObject* js_obj) {
+void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
+ JSObject* js_obj) {
HeapObject* obj = js_obj;
ExtractPropertyReferences(js_obj, entry);
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
PrototypeIterator iter(heap_->isolate(), js_obj);
ReadOnlyRoots roots(heap_);
- SetPropertyReference(obj, entry, roots.proto_string(), iter.GetCurrent());
+ SetPropertyReference(entry, roots.proto_string(), iter.GetCurrent());
if (obj->IsJSBoundFunction()) {
JSBoundFunction* js_fun = JSBoundFunction::cast(obj);
TagObject(js_fun->bound_arguments(), "(bound arguments)");
- SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
+ SetInternalReference(entry, "bindings", js_fun->bound_arguments(),
JSBoundFunction::kBoundArgumentsOffset);
- SetInternalReference(js_obj, entry, "bound_this", js_fun->bound_this(),
+ SetInternalReference(entry, "bound_this", js_fun->bound_this(),
JSBoundFunction::kBoundThisOffset);
- SetInternalReference(js_obj, entry, "bound_function",
+ SetInternalReference(entry, "bound_function",
js_fun->bound_target_function(),
JSBoundFunction::kBoundTargetFunctionOffset);
FixedArray* bindings = js_fun->bound_arguments();
for (int i = 0; i < bindings->length(); i++) {
const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
- SetNativeBindReference(js_obj, entry, reference_name, bindings->get(i));
+ SetNativeBindReference(entry, reference_name, bindings->get(i));
}
} else if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
@@ -974,124 +804,123 @@ void V8HeapExplorer::ExtractJSObjectReferences(
Object* proto_or_map = js_fun->prototype_or_initial_map();
if (!proto_or_map->IsTheHole(heap_->isolate())) {
if (!proto_or_map->IsMap()) {
- SetPropertyReference(obj, entry, roots.prototype_string(),
- proto_or_map, nullptr,
+ SetPropertyReference(entry, roots.prototype_string(), proto_or_map,
+ nullptr,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
- SetPropertyReference(obj, entry, roots.prototype_string(),
+ SetPropertyReference(entry, roots.prototype_string(),
js_fun->prototype());
- SetInternalReference(obj, entry, "initial_map", proto_or_map,
+ SetInternalReference(entry, "initial_map", proto_or_map,
JSFunction::kPrototypeOrInitialMapOffset);
}
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
TagObject(js_fun->feedback_cell(), "(function feedback cell)");
- SetInternalReference(js_fun, entry, "feedback_cell",
- js_fun->feedback_cell(),
+ SetInternalReference(entry, "feedback_cell", js_fun->feedback_cell(),
JSFunction::kFeedbackCellOffset);
TagObject(shared_info, "(shared function info)");
- SetInternalReference(js_fun, entry,
- "shared", shared_info,
+ SetInternalReference(entry, "shared", shared_info,
JSFunction::kSharedFunctionInfoOffset);
TagObject(js_fun->context(), "(context)");
- SetInternalReference(js_fun, entry,
- "context", js_fun->context(),
+ SetInternalReference(entry, "context", js_fun->context(),
JSFunction::kContextOffset);
TagCodeObject(js_fun->code());
- SetInternalReference(js_fun, entry, "code", js_fun->code(),
+ SetInternalReference(entry, "code", js_fun->code(),
JSFunction::kCodeOffset);
} else if (obj->IsJSGlobalObject()) {
JSGlobalObject* global_obj = JSGlobalObject::cast(obj);
- SetInternalReference(global_obj, entry, "native_context",
- global_obj->native_context(),
+ SetInternalReference(entry, "native_context", global_obj->native_context(),
JSGlobalObject::kNativeContextOffset);
- SetInternalReference(global_obj, entry, "global_proxy",
- global_obj->global_proxy(),
+ SetInternalReference(entry, "global_proxy", global_obj->global_proxy(),
JSGlobalObject::kGlobalProxyOffset);
STATIC_ASSERT(JSGlobalObject::kSize - JSObject::kHeaderSize ==
2 * kPointerSize);
} else if (obj->IsJSArrayBufferView()) {
JSArrayBufferView* view = JSArrayBufferView::cast(obj);
- SetInternalReference(view, entry, "buffer", view->buffer(),
+ SetInternalReference(entry, "buffer", view->buffer(),
JSArrayBufferView::kBufferOffset);
}
TagObject(js_obj->raw_properties_or_hash(), "(object properties)");
- SetInternalReference(obj, entry, "properties",
- js_obj->raw_properties_or_hash(),
+ SetInternalReference(entry, "properties", js_obj->raw_properties_or_hash(),
JSObject::kPropertiesOrHashOffset);
TagObject(js_obj->elements(), "(object elements)");
- SetInternalReference(obj, entry,
- "elements", js_obj->elements(),
+ SetInternalReference(entry, "elements", js_obj->elements(),
JSObject::kElementsOffset);
}
-
-void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
+void V8HeapExplorer::ExtractStringReferences(HeapEntry* entry, String* string) {
if (string->IsConsString()) {
ConsString* cs = ConsString::cast(string);
- SetInternalReference(cs, entry, "first", cs->first(),
- ConsString::kFirstOffset);
- SetInternalReference(cs, entry, "second", cs->second(),
+ SetInternalReference(entry, "first", cs->first(), ConsString::kFirstOffset);
+ SetInternalReference(entry, "second", cs->second(),
ConsString::kSecondOffset);
} else if (string->IsSlicedString()) {
SlicedString* ss = SlicedString::cast(string);
- SetInternalReference(ss, entry, "parent", ss->parent(),
+ SetInternalReference(entry, "parent", ss->parent(),
SlicedString::kParentOffset);
} else if (string->IsThinString()) {
ThinString* ts = ThinString::cast(string);
- SetInternalReference(ts, entry, "actual", ts->actual(),
+ SetInternalReference(entry, "actual", ts->actual(),
ThinString::kActualOffset);
}
}
-
-void V8HeapExplorer::ExtractSymbolReferences(int entry, Symbol* symbol) {
- SetInternalReference(symbol, entry,
- "name", symbol->name(),
- Symbol::kNameOffset);
+void V8HeapExplorer::ExtractSymbolReferences(HeapEntry* entry, Symbol* symbol) {
+ SetInternalReference(entry, "name", symbol->name(), Symbol::kNameOffset);
}
-
-void V8HeapExplorer::ExtractJSCollectionReferences(int entry,
+void V8HeapExplorer::ExtractJSCollectionReferences(HeapEntry* entry,
JSCollection* collection) {
- SetInternalReference(collection, entry, "table", collection->table(),
+ SetInternalReference(entry, "table", collection->table(),
JSCollection::kTableOffset);
}
-void V8HeapExplorer::ExtractJSWeakCollectionReferences(int entry,
+void V8HeapExplorer::ExtractJSWeakCollectionReferences(HeapEntry* entry,
JSWeakCollection* obj) {
- SetInternalReference(obj, entry, "table", obj->table(),
+ SetInternalReference(entry, "table", obj->table(),
JSWeakCollection::kTableOffset);
}
void V8HeapExplorer::ExtractEphemeronHashTableReferences(
- int entry, EphemeronHashTable* table) {
+ HeapEntry* entry, EphemeronHashTable* table) {
for (int i = 0, capacity = table->Capacity(); i < capacity; ++i) {
int key_index = EphemeronHashTable::EntryToIndex(i) +
EphemeronHashTable::kEntryKeyIndex;
int value_index = EphemeronHashTable::EntryToValueIndex(i);
Object* key = table->get(key_index);
Object* value = table->get(value_index);
- SetWeakReference(table, entry, key_index, key,
+ SetWeakReference(entry, key_index, key,
table->OffsetOfElementAt(key_index));
- SetInternalReference(table, entry, value_index, value,
- table->OffsetOfElementAt(value_index));
+ SetWeakReference(entry, value_index, value,
+ table->OffsetOfElementAt(value_index));
HeapEntry* key_entry = GetEntry(key);
- int key_entry_index = key_entry->index();
HeapEntry* value_entry = GetEntry(value);
if (key_entry && value_entry) {
const char* edge_name =
names_->GetFormatted("key %s in WeakMap", key_entry->name());
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kInternal, key_entry_index, edge_name, value_entry);
+ key_entry->SetNamedAutoIndexReference(HeapGraphEdge::kInternal, edge_name,
+ value_entry, names_);
}
}
}
-void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
+// These static arrays are used to prevent excessive code-size in
+// ExtractContextReferences below, which would happen if we called
+// SetInternalReference for every native context field in a macro.
+static const struct {
+ int index;
+ const char* name;
+} native_context_names[] = {
+#define CONTEXT_FIELD_INDEX_NAME(index, _, name) {Context::index, #name},
+ NATIVE_CONTEXT_FIELDS(CONTEXT_FIELD_INDEX_NAME)
+#undef CONTEXT_FIELD_INDEX
+};
+
+void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
+ Context* context) {
if (!context->IsNativeContext() && context->is_declaration_context()) {
ScopeInfo* scope_info = context->scope_info();
// Add context allocated locals.
@@ -1099,39 +928,49 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
for (int i = 0; i < context_locals; ++i) {
String* local_name = scope_info->ContextLocalName(i);
int idx = Context::MIN_CONTEXT_SLOTS + i;
- SetContextReference(context, entry, local_name, context->get(idx),
+ SetContextReference(entry, local_name, context->get(idx),
Context::OffsetOfElementAt(idx));
}
if (scope_info->HasFunctionName()) {
String* name = String::cast(scope_info->FunctionName());
int idx = scope_info->FunctionContextSlotIndex(name);
if (idx >= 0) {
- SetContextReference(context, entry, name, context->get(idx),
+ SetContextReference(entry, name, context->get(idx),
Context::OffsetOfElementAt(idx));
}
}
}
-#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- if (Context::index < Context::FIRST_WEAK_SLOT || \
- Context::index == Context::MAP_CACHE_INDEX) { \
- SetInternalReference(context, entry, #name, context->get(Context::index), \
- FixedArray::OffsetOfElementAt(Context::index)); \
- } else { \
- SetWeakReference(context, entry, #name, context->get(Context::index), \
- FixedArray::OffsetOfElementAt(Context::index)); \
- }
- EXTRACT_CONTEXT_FIELD(SCOPE_INFO_INDEX, ScopeInfo, scope_info);
- EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
- EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, HeapObject, extension);
- EXTRACT_CONTEXT_FIELD(NATIVE_CONTEXT_INDEX, Context, native_context);
+ SetInternalReference(
+ entry, "scope_info", context->get(Context::SCOPE_INFO_INDEX),
+ FixedArray::OffsetOfElementAt(Context::SCOPE_INFO_INDEX));
+ SetInternalReference(entry, "previous", context->get(Context::PREVIOUS_INDEX),
+ FixedArray::OffsetOfElementAt(Context::PREVIOUS_INDEX));
+ SetInternalReference(entry, "extension",
+ context->get(Context::EXTENSION_INDEX),
+ FixedArray::OffsetOfElementAt(Context::EXTENSION_INDEX));
+ SetInternalReference(
+ entry, "native_context", context->get(Context::NATIVE_CONTEXT_INDEX),
+ FixedArray::OffsetOfElementAt(Context::NATIVE_CONTEXT_INDEX));
+
if (context->IsNativeContext()) {
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->embedder_data(), "(context data)");
- NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD)
- EXTRACT_CONTEXT_FIELD(OPTIMIZED_CODE_LIST, unused, optimized_code_list);
- EXTRACT_CONTEXT_FIELD(DEOPTIMIZED_CODE_LIST, unused, deoptimized_code_list);
-#undef EXTRACT_CONTEXT_FIELD
+ for (size_t i = 0; i < arraysize(native_context_names); i++) {
+ int index = native_context_names[i].index;
+ const char* name = native_context_names[i].name;
+ SetInternalReference(entry, name, context->get(index),
+ FixedArray::OffsetOfElementAt(index));
+ }
+
+ SetWeakReference(
+ entry, "optimized_code_list",
+ context->get(Context::OPTIMIZED_CODE_LIST),
+ FixedArray::OffsetOfElementAt(Context::OPTIMIZED_CODE_LIST));
+ SetWeakReference(
+ entry, "deoptimized_code_list",
+ context->get(Context::DEOPTIMIZED_CODE_LIST),
+ FixedArray::OffsetOfElementAt(Context::DEOPTIMIZED_CODE_LIST));
STATIC_ASSERT(Context::OPTIMIZED_CODE_LIST == Context::FIRST_WEAK_SLOT);
STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 ==
Context::NATIVE_CONTEXT_SLOTS);
@@ -1140,17 +979,15 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
}
-
-void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
+void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
MaybeObject* maybe_raw_transitions_or_prototype_info = map->raw_transitions();
HeapObject* raw_transitions_or_prototype_info;
- if (maybe_raw_transitions_or_prototype_info->ToWeakHeapObject(
+ if (maybe_raw_transitions_or_prototype_info->GetHeapObjectIfWeak(
&raw_transitions_or_prototype_info)) {
DCHECK(raw_transitions_or_prototype_info->IsMap());
- SetWeakReference(map, entry, "transition",
- raw_transitions_or_prototype_info,
+ SetWeakReference(entry, "transition", raw_transitions_or_prototype_info,
Map::kTransitionsOrPrototypeInfoOffset);
- } else if (maybe_raw_transitions_or_prototype_info->ToStrongHeapObject(
+ } else if (maybe_raw_transitions_or_prototype_info->GetHeapObjectIfStrong(
&raw_transitions_or_prototype_info)) {
if (raw_transitions_or_prototype_info->IsTransitionArray()) {
TransitionArray* transitions =
@@ -1160,55 +997,52 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
"(prototype transitions)");
}
TagObject(transitions, "(transition array)");
- SetInternalReference(map, entry, "transitions", transitions,
+ SetInternalReference(entry, "transitions", transitions,
Map::kTransitionsOrPrototypeInfoOffset);
} else if (raw_transitions_or_prototype_info->IsTuple3() ||
raw_transitions_or_prototype_info->IsFixedArray()) {
TagObject(raw_transitions_or_prototype_info, "(transition)");
- SetInternalReference(map, entry, "transition",
+ SetInternalReference(entry, "transition",
raw_transitions_or_prototype_info,
Map::kTransitionsOrPrototypeInfoOffset);
} else if (map->is_prototype_map()) {
TagObject(raw_transitions_or_prototype_info, "prototype_info");
- SetInternalReference(map, entry, "prototype_info",
+ SetInternalReference(entry, "prototype_info",
raw_transitions_or_prototype_info,
Map::kTransitionsOrPrototypeInfoOffset);
}
}
DescriptorArray* descriptors = map->instance_descriptors();
TagObject(descriptors, "(map descriptors)");
- SetInternalReference(map, entry, "descriptors", descriptors,
+ SetInternalReference(entry, "descriptors", descriptors,
Map::kDescriptorsOffset);
- SetInternalReference(map, entry, "prototype", map->prototype(),
+ SetInternalReference(entry, "prototype", map->prototype(),
Map::kPrototypeOffset);
if (FLAG_unbox_double_fields) {
- SetInternalReference(map, entry, "layout_descriptor",
- map->layout_descriptor(),
+ SetInternalReference(entry, "layout_descriptor", map->layout_descriptor(),
Map::kLayoutDescriptorOffset);
}
Object* constructor_or_backpointer = map->constructor_or_backpointer();
if (constructor_or_backpointer->IsMap()) {
TagObject(constructor_or_backpointer, "(back pointer)");
- SetInternalReference(map, entry, "back_pointer", constructor_or_backpointer,
+ SetInternalReference(entry, "back_pointer", constructor_or_backpointer,
Map::kConstructorOrBackPointerOffset);
} else if (constructor_or_backpointer->IsFunctionTemplateInfo()) {
TagObject(constructor_or_backpointer, "(constructor function data)");
- SetInternalReference(map, entry, "constructor_function_data",
+ SetInternalReference(entry, "constructor_function_data",
constructor_or_backpointer,
Map::kConstructorOrBackPointerOffset);
} else {
- SetInternalReference(map, entry, "constructor", constructor_or_backpointer,
+ SetInternalReference(entry, "constructor", constructor_or_backpointer,
Map::kConstructorOrBackPointerOffset);
}
TagObject(map->dependent_code(), "(dependent code)");
- SetInternalReference(map, entry, "dependent_code", map->dependent_code(),
+ SetInternalReference(entry, "dependent_code", map->dependent_code(),
Map::kDependentCodeOffset);
}
-
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
- int entry, SharedFunctionInfo* shared) {
- HeapObject* obj = shared;
+ HeapEntry* entry, SharedFunctionInfo* shared) {
String* shared_name = shared->DebugName();
const char* name = nullptr;
if (shared_name != ReadOnlyRoots(heap_).empty_string()) {
@@ -1223,59 +1057,51 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
if (shared->name_or_scope_info()->IsScopeInfo()) {
TagObject(shared->name_or_scope_info(), "(function scope info)");
}
- SetInternalReference(obj, entry, "name_or_scope_info",
+ SetInternalReference(entry, "name_or_scope_info",
shared->name_or_scope_info(),
SharedFunctionInfo::kNameOrScopeInfoOffset);
- SetInternalReference(obj, entry, "script_or_debug_info",
+ SetInternalReference(entry, "script_or_debug_info",
shared->script_or_debug_info(),
SharedFunctionInfo::kScriptOrDebugInfoOffset);
- SetInternalReference(obj, entry,
- "function_data", shared->function_data(),
+ SetInternalReference(entry, "function_data", shared->function_data(),
SharedFunctionInfo::kFunctionDataOffset);
SetInternalReference(
- obj, entry, "raw_outer_scope_info_or_feedback_metadata",
+ entry, "raw_outer_scope_info_or_feedback_metadata",
shared->raw_outer_scope_info_or_feedback_metadata(),
SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset);
}
-void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
- HeapObject* obj = script;
- SetInternalReference(obj, entry,
- "source", script->source(),
+void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script* script) {
+ SetInternalReference(entry, "source", script->source(),
Script::kSourceOffset);
- SetInternalReference(obj, entry,
- "name", script->name(),
- Script::kNameOffset);
- SetInternalReference(obj, entry,
- "context_data", script->context_data(),
+ SetInternalReference(entry, "name", script->name(), Script::kNameOffset);
+ SetInternalReference(entry, "context_data", script->context_data(),
Script::kContextOffset);
TagObject(script->line_ends(), "(script line ends)");
- SetInternalReference(obj, entry,
- "line_ends", script->line_ends(),
+ SetInternalReference(entry, "line_ends", script->line_ends(),
Script::kLineEndsOffset);
}
-
void V8HeapExplorer::ExtractAccessorInfoReferences(
- int entry, AccessorInfo* accessor_info) {
- SetInternalReference(accessor_info, entry, "name", accessor_info->name(),
+ HeapEntry* entry, AccessorInfo* accessor_info) {
+ SetInternalReference(entry, "name", accessor_info->name(),
AccessorInfo::kNameOffset);
- SetInternalReference(accessor_info, entry, "expected_receiver_type",
+ SetInternalReference(entry, "expected_receiver_type",
accessor_info->expected_receiver_type(),
AccessorInfo::kExpectedReceiverTypeOffset);
- SetInternalReference(accessor_info, entry, "getter", accessor_info->getter(),
+ SetInternalReference(entry, "getter", accessor_info->getter(),
AccessorInfo::kGetterOffset);
- SetInternalReference(accessor_info, entry, "setter", accessor_info->setter(),
+ SetInternalReference(entry, "setter", accessor_info->setter(),
AccessorInfo::kSetterOffset);
- SetInternalReference(accessor_info, entry, "data", accessor_info->data(),
+ SetInternalReference(entry, "data", accessor_info->data(),
AccessorInfo::kDataOffset);
}
-void V8HeapExplorer::ExtractAccessorPairReferences(
- int entry, AccessorPair* accessors) {
- SetInternalReference(accessors, entry, "getter", accessors->getter(),
+void V8HeapExplorer::ExtractAccessorPairReferences(HeapEntry* entry,
+ AccessorPair* accessors) {
+ SetInternalReference(entry, "getter", accessors->getter(),
AccessorPair::kGetterOffset);
- SetInternalReference(accessors, entry, "setter", accessors->setter(),
+ SetInternalReference(entry, "setter", accessors->setter(),
AccessorPair::kSetterOffset);
}
@@ -1291,58 +1117,56 @@ void V8HeapExplorer::TagCodeObject(Code* code) {
}
}
-void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
+void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code* code) {
TagCodeObject(code);
TagObject(code->relocation_info(), "(code relocation info)");
- SetInternalReference(code, entry,
- "relocation_info", code->relocation_info(),
+ SetInternalReference(entry, "relocation_info", code->relocation_info(),
Code::kRelocationInfoOffset);
TagObject(code->deoptimization_data(), "(code deopt data)");
- SetInternalReference(code, entry,
- "deoptimization_data", code->deoptimization_data(),
+ SetInternalReference(entry, "deoptimization_data",
+ code->deoptimization_data(),
Code::kDeoptimizationDataOffset);
TagObject(code->source_position_table(), "(source position table)");
- SetInternalReference(code, entry, "source_position_table",
+ SetInternalReference(entry, "source_position_table",
code->source_position_table(),
Code::kSourcePositionTableOffset);
}
-void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
- SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
+void V8HeapExplorer::ExtractCellReferences(HeapEntry* entry, Cell* cell) {
+ SetInternalReference(entry, "value", cell->value(), Cell::kValueOffset);
}
void V8HeapExplorer::ExtractFeedbackCellReferences(
- int entry, FeedbackCell* feedback_cell) {
+ HeapEntry* entry, FeedbackCell* feedback_cell) {
TagObject(feedback_cell, "(feedback cell)");
- SetInternalReference(feedback_cell, entry, "value", feedback_cell->value(),
+ SetInternalReference(entry, "value", feedback_cell->value(),
FeedbackCell::kValueOffset);
}
-void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
+void V8HeapExplorer::ExtractPropertyCellReferences(HeapEntry* entry,
PropertyCell* cell) {
- SetInternalReference(cell, entry, "value", cell->value(),
+ SetInternalReference(entry, "value", cell->value(),
PropertyCell::kValueOffset);
TagObject(cell->dependent_code(), "(dependent code)");
- SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(),
+ SetInternalReference(entry, "dependent_code", cell->dependent_code(),
PropertyCell::kDependentCodeOffset);
}
-void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
+void V8HeapExplorer::ExtractAllocationSiteReferences(HeapEntry* entry,
AllocationSite* site) {
- SetInternalReference(site, entry, "transition_info",
+ SetInternalReference(entry, "transition_info",
site->transition_info_or_boilerplate(),
AllocationSite::kTransitionInfoOrBoilerplateOffset);
- SetInternalReference(site, entry, "nested_site", site->nested_site(),
+ SetInternalReference(entry, "nested_site", site->nested_site(),
AllocationSite::kNestedSiteOffset);
TagObject(site->dependent_code(), "(dependent code)");
- SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
+ SetInternalReference(entry, "dependent_code", site->dependent_code(),
AllocationSite::kDependentCodeOffset);
}
void V8HeapExplorer::ExtractArrayBoilerplateDescriptionReferences(
- int entry, ArrayBoilerplateDescription* value) {
- SetInternalReference(value, entry, "constant_elements",
- value->constant_elements(),
+ HeapEntry* entry, ArrayBoilerplateDescription* value) {
+ SetInternalReference(entry, "constant_elements", value->constant_elements(),
ArrayBoilerplateDescription::kConstantElementsOffset);
}
@@ -1352,7 +1176,7 @@ class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
: size_(size)
, explorer_(explorer) {
}
- virtual HeapEntry* AllocateEntry(HeapThing ptr) {
+ HeapEntry* AllocateEntry(HeapThing ptr) override {
return explorer_->AddEntry(reinterpret_cast<Address>(ptr),
HeapEntry::kNative, "system / JSArrayBufferData",
size_);
@@ -1362,73 +1186,74 @@ class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
V8HeapExplorer* explorer_;
};
-void V8HeapExplorer::ExtractJSArrayBufferReferences(
- int entry, JSArrayBuffer* buffer) {
+void V8HeapExplorer::ExtractJSArrayBufferReferences(HeapEntry* entry,
+ JSArrayBuffer* buffer) {
// Setup a reference to a native memory backing_store object.
if (!buffer->backing_store())
return;
- size_t data_size = NumberToSize(buffer->byte_length());
+ size_t data_size = buffer->byte_length();
JSArrayBufferDataEntryAllocator allocator(data_size, this);
HeapEntry* data_entry =
- filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- entry, "backing_store", data_entry);
+ generator_->FindOrAddEntry(buffer->backing_store(), &allocator);
+ entry->SetNamedReference(HeapGraphEdge::kInternal, "backing_store",
+ data_entry);
}
-void V8HeapExplorer::ExtractJSPromiseReferences(int entry, JSPromise* promise) {
- SetInternalReference(promise, entry, "reactions_or_result",
+void V8HeapExplorer::ExtractJSPromiseReferences(HeapEntry* entry,
+ JSPromise* promise) {
+ SetInternalReference(entry, "reactions_or_result",
promise->reactions_or_result(),
JSPromise::kReactionsOrResultOffset);
}
void V8HeapExplorer::ExtractJSGeneratorObjectReferences(
- int entry, JSGeneratorObject* generator) {
- SetInternalReference(generator, entry, "function", generator->function(),
+ HeapEntry* entry, JSGeneratorObject* generator) {
+ SetInternalReference(entry, "function", generator->function(),
JSGeneratorObject::kFunctionOffset);
- SetInternalReference(generator, entry, "context", generator->context(),
+ SetInternalReference(entry, "context", generator->context(),
JSGeneratorObject::kContextOffset);
- SetInternalReference(generator, entry, "receiver", generator->receiver(),
+ SetInternalReference(entry, "receiver", generator->receiver(),
JSGeneratorObject::kReceiverOffset);
- SetInternalReference(generator, entry, "parameters_and_registers",
+ SetInternalReference(entry, "parameters_and_registers",
generator->parameters_and_registers(),
JSGeneratorObject::kParametersAndRegistersOffset);
}
-void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
+void V8HeapExplorer::ExtractFixedArrayReferences(HeapEntry* entry,
+ FixedArray* array) {
for (int i = 0, l = array->length(); i < l; ++i) {
DCHECK(!HasWeakHeapObjectTag(array->get(i)));
- SetInternalReference(array, entry, i, array->get(i),
- array->OffsetOfElementAt(i));
+ SetInternalReference(entry, i, array->get(i), array->OffsetOfElementAt(i));
}
}
void V8HeapExplorer::ExtractFeedbackVectorReferences(
- int entry, FeedbackVector* feedback_vector) {
+ HeapEntry* entry, FeedbackVector* feedback_vector) {
MaybeObject* code = feedback_vector->optimized_code_weak_or_smi();
HeapObject* code_heap_object;
- if (code->ToWeakHeapObject(&code_heap_object)) {
- SetWeakReference(feedback_vector, entry, "optimized code", code_heap_object,
+ if (code->GetHeapObjectIfWeak(&code_heap_object)) {
+ SetWeakReference(entry, "optimized code", code_heap_object,
FeedbackVector::kOptimizedCodeOffset);
}
}
template <typename T>
-void V8HeapExplorer::ExtractWeakArrayReferences(int header_size, int entry,
- T* array) {
+void V8HeapExplorer::ExtractWeakArrayReferences(int header_size,
+ HeapEntry* entry, T* array) {
for (int i = 0; i < array->length(); ++i) {
MaybeObject* object = array->Get(i);
HeapObject* heap_object;
- if (object->ToWeakHeapObject(&heap_object)) {
- SetWeakReference(array, entry, i, heap_object,
- header_size + i * kPointerSize);
- } else if (object->ToStrongHeapObject(&heap_object)) {
- SetInternalReference(array, entry, i, heap_object,
+ if (object->GetHeapObjectIfWeak(&heap_object)) {
+ SetWeakReference(entry, i, heap_object, header_size + i * kPointerSize);
+ } else if (object->GetHeapObjectIfStrong(&heap_object)) {
+ SetInternalReference(entry, i, heap_object,
header_size + i * kPointerSize);
}
}
}
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
+void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
+ HeapEntry* entry) {
Isolate* isolate = js_obj->GetIsolate();
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
@@ -1446,12 +1271,12 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
- value, nullptr, field_offset);
+ SetDataOrAccessorPropertyReference(details.kind(), entry, k, value,
+ nullptr, field_offset);
break;
}
case kDescriptor:
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
+ SetDataOrAccessorPropertyReference(details.kind(), entry,
descs->GetKey(i),
descs->GetStrongValue(i));
break;
@@ -1464,14 +1289,12 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
- if (dictionary->IsKey(roots, dictionary->KeyAt(i))) {
- PropertyCell* cell = dictionary->CellAt(i);
- Name* name = cell->name();
- Object* value = cell->value();
- PropertyDetails details = cell->property_details();
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, name,
- value);
- }
+ if (!dictionary->IsKey(roots, dictionary->KeyAt(i))) continue;
+ PropertyCell* cell = dictionary->CellAt(i);
+ Name* name = cell->name();
+ Object* value = cell->value();
+ PropertyDetails details = cell->property_details();
+ SetDataOrAccessorPropertyReference(details.kind(), entry, name, value);
}
} else {
NameDictionary* dictionary = js_obj->property_dictionary();
@@ -1479,36 +1302,33 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(roots, k)) {
- Object* value = dictionary->ValueAt(i);
- PropertyDetails details = dictionary->DetailsAt(i);
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
- Name::cast(k), value);
- }
+ if (!dictionary->IsKey(roots, k)) continue;
+ Object* value = dictionary->ValueAt(i);
+ PropertyDetails details = dictionary->DetailsAt(i);
+ SetDataOrAccessorPropertyReference(details.kind(), entry, Name::cast(k),
+ value);
}
}
}
-
-void V8HeapExplorer::ExtractAccessorPairProperty(JSObject* js_obj, int entry,
- Name* key,
+void V8HeapExplorer::ExtractAccessorPairProperty(HeapEntry* entry, Name* key,
Object* callback_obj,
int field_offset) {
if (!callback_obj->IsAccessorPair()) return;
AccessorPair* accessors = AccessorPair::cast(callback_obj);
- SetPropertyReference(js_obj, entry, key, accessors, nullptr, field_offset);
+ SetPropertyReference(entry, key, accessors, nullptr, field_offset);
Object* getter = accessors->getter();
if (!getter->IsOddball()) {
- SetPropertyReference(js_obj, entry, key, getter, "get %s");
+ SetPropertyReference(entry, key, getter, "get %s");
}
Object* setter = accessors->setter();
if (!setter->IsOddball()) {
- SetPropertyReference(js_obj, entry, key, setter, "set %s");
+ SetPropertyReference(entry, key, setter, "set %s");
}
}
-
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
+void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
+ HeapEntry* entry) {
ReadOnlyRoots roots = js_obj->GetReadOnlyRoots();
if (js_obj->HasObjectElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
@@ -1517,7 +1337,7 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
: elements->length();
for (int i = 0; i < length; ++i) {
if (!elements->get(i)->IsTheHole(roots)) {
- SetElementReference(js_obj, entry, i, elements->get(i));
+ SetElementReference(entry, i, elements->get(i));
}
}
} else if (js_obj->HasDictionaryElements()) {
@@ -1525,22 +1345,20 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(roots, k)) {
- DCHECK(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
- }
+ if (!dictionary->IsKey(roots, k)) continue;
+ DCHECK(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ SetElementReference(entry, index, dictionary->ValueAt(i));
}
}
}
-
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
+void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
+ HeapEntry* entry) {
int length = js_obj->GetEmbedderFieldCount();
for (int i = 0; i < length; ++i) {
Object* o = js_obj->GetEmbedderField(i);
- SetInternalReference(js_obj, entry, i, o,
- js_obj->GetEmbedderFieldOffset(i));
+ SetInternalReference(entry, i, o, js_obj->GetEmbedderFieldOffset(i));
}
}
@@ -1564,10 +1382,8 @@ String* V8HeapExplorer::GetConstructorName(JSObject* object) {
return *JSReceiver::GetConstructorName(handle(object, isolate));
}
-
HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
- if (!obj->IsHeapObject()) return nullptr;
- return filler_->FindOrAddEntry(obj, this);
+ return obj->IsHeapObject() ? generator_->FindOrAddEntry(obj, this) : nullptr;
}
class RootsReferencesExtractor : public RootVisitor {
@@ -1597,8 +1413,9 @@ class RootsReferencesExtractor : public RootVisitor {
bool visiting_weak_roots_;
};
-bool V8HeapExplorer::IterateAndExtractReferences(SnapshotFiller* filler) {
- filler_ = filler;
+bool V8HeapExplorer::IterateAndExtractReferences(
+ HeapSnapshotGenerator* generator) {
+ generator_ = generator;
// Create references to the synthetic roots.
SetRootGcRootsReference();
@@ -1610,7 +1427,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(SnapshotFiller* filler) {
// first. Otherwise a particular JSFunction object could set
// its custom name to a generic builtin.
RootsReferencesExtractor extractor(this);
- heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG_FOR_SERIALIZATION);
extractor.SetVisitingWeakRoots();
heap_->IterateWeakGlobalHandles(&extractor);
@@ -1630,10 +1447,9 @@ bool V8HeapExplorer::IterateAndExtractReferences(SnapshotFiller* filler) {
visited_fields_.resize(max_pointer, false);
}
- HeapEntry* heap_entry = GetEntry(obj);
- int entry = heap_entry->index();
+ HeapEntry* entry = GetEntry(obj);
ExtractReferences(entry, obj);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+ SetInternalReference(entry, "map", obj->map(), HeapObject::kMapOffset);
// Extract unvisited fields as hidden references and restore tags
// of visited fields.
IndexedReferencesExtractor refs_extractor(this, obj, entry);
@@ -1650,7 +1466,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(SnapshotFiller* filler) {
if (!progress_->ProgressReport(false)) interrupted = true;
}
- filler_ = nullptr;
+ generator_ = nullptr;
return interrupted ? false : progress_->ProgressReport(true);
}
@@ -1684,16 +1500,13 @@ bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
return true;
}
-void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
- int parent_entry,
+void V8HeapExplorer::SetContextReference(HeapEntry* parent_entry,
String* reference_name,
- Object* child_obj,
- int field_offset) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
+ Object* child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
- filler_->SetNamedReference(HeapGraphEdge::kContextVariable, parent_entry,
- names_->GetName(reference_name), child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kContextVariable,
+ names_->GetName(reference_name), child_entry);
MarkVisitedField(field_offset);
}
@@ -1704,135 +1517,98 @@ void V8HeapExplorer::MarkVisitedField(int offset) {
visited_fields_[index] = true;
}
-
-void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
- int parent_entry,
+void V8HeapExplorer::SetNativeBindReference(HeapEntry* parent_entry,
const char* reference_name,
Object* child_obj) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
- filler_->SetNamedReference(HeapGraphEdge::kShortcut, parent_entry,
- reference_name, child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kShortcut, reference_name,
+ child_entry);
}
-
-void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
+void V8HeapExplorer::SetElementReference(HeapEntry* parent_entry, int index,
Object* child_obj) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
- filler_->SetIndexedReference(HeapGraphEdge::kElement, parent_entry, index,
- child_entry);
+ parent_entry->SetIndexedReference(HeapGraphEdge::kElement, index,
+ child_entry);
}
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
+void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry,
const char* reference_name,
- Object* child_obj,
- int field_offset) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
+ Object* child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- reference_name,
- child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kInternal, reference_name,
+ child_entry);
}
MarkVisitedField(field_offset);
}
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
+void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry, int index,
+ Object* child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- names_->GetName(index),
- child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kInternal,
+ names_->GetName(index), child_entry);
}
MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- int parent_entry, int index,
+ HeapEntry* parent_entry, int index,
Object* child_obj, int field_offset) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
+ DCHECK_EQ(parent_entry, GetEntry(parent_obj));
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != nullptr && IsEssentialObject(child_obj) &&
IsEssentialHiddenReference(parent_obj, field_offset)) {
- filler_->SetIndexedReference(HeapGraphEdge::kHidden, parent_entry, index,
- child_entry);
+ parent_entry->SetIndexedReference(HeapGraphEdge::kHidden, index,
+ child_entry);
}
}
-
-void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
- int parent_entry,
+void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
const char* reference_name,
- Object* child_obj,
- int field_offset) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
+ Object* child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kWeak,
- parent_entry,
- reference_name,
- child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kWeak, reference_name,
+ child_entry);
}
MarkVisitedField(field_offset);
}
-
-void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
+void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
+ Object* child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kWeak,
- parent_entry,
- names_->GetFormatted("%d", index),
- child_entry);
+ parent_entry->SetNamedReference(
+ HeapGraphEdge::kWeak, names_->GetFormatted("%d", index), child_entry);
}
MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetDataOrAccessorPropertyReference(
- PropertyKind kind, JSObject* parent_obj, int parent_entry,
- Name* reference_name, Object* child_obj, const char* name_format_string,
- int field_offset) {
+ PropertyKind kind, HeapEntry* parent_entry, Name* reference_name,
+ Object* child_obj, const char* name_format_string, int field_offset) {
if (kind == kAccessor) {
- ExtractAccessorPairProperty(parent_obj, parent_entry, reference_name,
- child_obj, field_offset);
+ ExtractAccessorPairProperty(parent_entry, reference_name, child_obj,
+ field_offset);
} else {
- SetPropertyReference(parent_obj, parent_entry, reference_name, child_obj,
+ SetPropertyReference(parent_entry, reference_name, child_obj,
name_format_string, field_offset);
}
}
-
-void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
- int parent_entry,
+void V8HeapExplorer::SetPropertyReference(HeapEntry* parent_entry,
Name* reference_name,
Object* child_obj,
const char* name_format_string,
int field_offset) {
- DCHECK(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
HeapGraphEdge::Type type =
@@ -1848,29 +1624,25 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
.get())
: names_->GetName(reference_name);
- filler_->SetNamedReference(type, parent_entry, name, child_entry);
+ parent_entry->SetNamedReference(type, name, child_entry);
MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetRootGcRootsReference() {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- snapshot_->gc_roots());
+ snapshot_->root()->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ snapshot_->gc_roots());
}
void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
- filler_->SetNamedAutoIndexReference(HeapGraphEdge::kShortcut,
- snapshot_->root()->index(), nullptr,
- child_entry);
+ snapshot_->root()->SetNamedAutoIndexReference(HeapGraphEdge::kShortcut,
+ nullptr, child_entry, names_);
}
void V8HeapExplorer::SetGcRootsReference(Root root) {
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- snapshot_->gc_roots()->index(),
- snapshot_->gc_subroot(root));
+ snapshot_->gc_roots()->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement, snapshot_->gc_subroot(root));
}
void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
@@ -1881,12 +1653,11 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
HeapGraphEdge::Type edge_type =
is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kInternal;
if (name != nullptr) {
- filler_->SetNamedReference(edge_type, snapshot_->gc_subroot(root)->index(),
- name, child_entry);
+ snapshot_->gc_subroot(root)->SetNamedReference(edge_type, name,
+ child_entry);
} else {
- filler_->SetNamedAutoIndexReference(edge_type,
- snapshot_->gc_subroot(root)->index(),
- description, child_entry);
+ snapshot_->gc_subroot(root)->SetNamedAutoIndexReference(
+ edge_type, description, child_entry, names_);
}
// Add a shortcut to JS global object reference at snapshot root.
@@ -1897,53 +1668,34 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
JSGlobalObject* global = Context::cast(child_obj)->global_object();
if (!global->IsJSGlobalObject()) return;
- if (user_roots_.Contains(global)) return;
+ if (!user_roots_.insert(global).second) return;
- user_roots_.Insert(global);
SetUserGlobalReference(global);
}
-const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
- ReadOnlyRoots roots(heap_);
- if (strong_gc_subroot_names_.is_empty()) {
-#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
-#define RO_NAME_ENTRY(name) \
- strong_gc_subroot_names_.SetTag(roots.name(), #name);
-#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
- STRONG_MUTABLE_ROOT_LIST(ROOT_NAME)
+// This static array is used to prevent excessive code-size in
+// GetStrongGcSubrootName below, which would happen if we called emplace() for
+// every root in a macro.
+static const char* root_names[] = {
+#define ROOT_NAME(type, name, CamelName) #name,
+ READ_ONLY_ROOT_LIST(ROOT_NAME) MUTABLE_ROOT_LIST(ROOT_NAME)
#undef ROOT_NAME
-#define ROOT_NAME(type, name, camel_name) RO_NAME_ENTRY(name)
- STRONG_READ_ONLY_ROOT_LIST(ROOT_NAME)
-#undef ROOT_NAME
-#define STRUCT_MAP_NAME(NAME, Name, name) RO_NAME_ENTRY(name##_map)
- STRUCT_LIST(STRUCT_MAP_NAME)
-#undef STRUCT_MAP_NAME
-#define ALLOCATION_SITE_MAP_NAME(NAME, Name, Size, name) \
- RO_NAME_ENTRY(name##_map)
- ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_NAME)
-#undef ALLOCATION_SITE_MAP_NAME
-#define DATA_HANDLER_MAP_NAME(NAME, Name, Size, name) NAME_ENTRY(name##_map)
- DATA_HANDLER_LIST(DATA_HANDLER_MAP_NAME)
-#undef DATA_HANDLER_MAP_NAME
-#define STRING_NAME(name, str) RO_NAME_ENTRY(name)
- INTERNALIZED_STRING_LIST(STRING_NAME)
-#undef STRING_NAME
-#define SYMBOL_NAME(name) RO_NAME_ENTRY(name)
- PRIVATE_SYMBOL_LIST(SYMBOL_NAME)
-#undef SYMBOL_NAME
-#define SYMBOL_NAME(name, description) RO_NAME_ENTRY(name)
- PUBLIC_SYMBOL_LIST(SYMBOL_NAME)
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_NAME)
-#undef SYMBOL_NAME
-#define ACCESSOR_NAME(accessor_name, AccessorName) \
- NAME_ENTRY(accessor_name##_accessor)
- ACCESSOR_INFO_LIST(ACCESSOR_NAME)
-#undef ACCESSOR_NAME
-#undef NAME_ENTRY
-#undef RO_NAME_ENTRY
- CHECK(!strong_gc_subroot_names_.is_empty());
- }
- return strong_gc_subroot_names_.GetTag(object);
+};
+STATIC_ASSERT(static_cast<uint16_t>(RootIndex::kRootListLength) ==
+ arraysize(root_names));
+
+const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
+ if (strong_gc_subroot_names_.empty()) {
+ for (uint16_t i = 0; i < static_cast<uint16_t>(RootIndex::kRootListLength);
+ i++) {
+ const char* name = root_names[i];
+ RootIndex index = static_cast<RootIndex>(i);
+ strong_gc_subroot_names_.emplace(heap_->root(index), name);
+ }
+ CHECK(!strong_gc_subroot_names_.empty());
+ }
+ auto it = strong_gc_subroot_names_.find(object);
+ return it != strong_gc_subroot_names_.end() ? it->second : nullptr;
}
void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
@@ -1993,7 +1745,7 @@ void V8HeapExplorer::TagGlobalObjects() {
DisallowHeapAllocation no_allocation;
for (int i = 0, l = enumerator.count(); i < l; ++i) {
- objects_tags_.SetTag(*enumerator.at(i), urls[i]);
+ if (urls[i]) objects_tags_.emplace(*enumerator.at(i), urls[i]);
}
}
@@ -2055,7 +1807,7 @@ class GlobalHandlesExtractor : public PersistentHandleVisitor {
public:
explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
: explorer_(explorer) {}
- ~GlobalHandlesExtractor() override {}
+ ~GlobalHandlesExtractor() override = default;
void VisitPersistentHandle(Persistent<Value>* value,
uint16_t class_id) override {
Handle<Object> object = Utils::OpenPersistent(value);
@@ -2077,7 +1829,7 @@ class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
heap_object_map_(snapshot_->profiler()->heap_object_map()),
entries_type_(entries_type) {
}
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ HeapEntry* AllocateEntry(HeapThing ptr) override;
private:
HeapSnapshot* snapshot_;
StringsStorage* names_;
@@ -2108,7 +1860,7 @@ class EmbedderGraphEntriesAllocator : public HeapEntriesAllocator {
: snapshot_(snapshot),
names_(snapshot_->profiler()->names()),
heap_object_map_(snapshot_->profiler()->heap_object_map()) {}
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ HeapEntry* AllocateEntry(HeapThing ptr) override;
private:
HeapSnapshot* snapshot_;
@@ -2135,12 +1887,9 @@ HeapEntry::Type EmbedderGraphNodeType(EmbedderGraphImpl::Node* node) {
// Otherwise, the result is the embedder node name.
const char* MergeNames(StringsStorage* names, const char* embedder_name,
const char* wrapper_name) {
- for (const char* suffix = wrapper_name; *suffix; suffix++) {
- if (*suffix == '/') {
- return names->GetFormatted("%s %s", embedder_name, suffix);
- }
- }
- return embedder_name;
+ const char* suffix = strchr(wrapper_name, '/');
+ return suffix ? names->GetFormatted("%s %s", embedder_name, suffix)
+ : embedder_name;
}
} // anonymous namespace
@@ -2163,17 +1912,17 @@ class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
hash_(reinterpret_cast<intptr_t>(label)),
label_(label) {}
- virtual ~NativeGroupRetainedObjectInfo() {}
- virtual void Dispose() {
+ ~NativeGroupRetainedObjectInfo() override = default;
+ void Dispose() override {
CHECK(!disposed_);
disposed_ = true;
delete this;
}
- virtual bool IsEquivalent(RetainedObjectInfo* other) {
+ bool IsEquivalent(RetainedObjectInfo* other) override {
return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
}
- virtual intptr_t GetHash() { return hash_; }
- virtual const char* GetLabel() { return label_; }
+ intptr_t GetHash() override { return hash_; }
+ const char* GetLabel() override { return label_; }
private:
bool disposed_;
@@ -2193,8 +1942,7 @@ NativeObjectsExplorer::NativeObjectsExplorer(
native_entries_allocator_(
new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative)),
embedder_graph_entries_allocator_(
- new EmbedderGraphEntriesAllocator(snapshot)),
- filler_(nullptr) {}
+ new EmbedderGraphEntriesAllocator(snapshot)) {}
NativeObjectsExplorer::~NativeObjectsExplorer() {
for (auto map_entry : objects_by_info_) {
@@ -2231,7 +1979,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
DCHECK(!object.is_null());
HeapObject* heap_object = HeapObject::cast(*object);
info->push_back(heap_object);
- in_groups_.Insert(heap_object);
+ in_groups_.insert(heap_object);
}
}
@@ -2252,25 +2000,23 @@ void NativeObjectsExplorer::FillEdges() {
Handle<Object> parent_object = v8::Utils::OpenHandle(
*pair.first->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
HeapObject* parent = HeapObject::cast(*parent_object);
- int parent_entry =
- filler_->FindOrAddEntry(parent, native_entries_allocator_.get())
- ->index();
- DCHECK_NE(parent_entry, HeapEntry::kNoEntry);
+ HeapEntry* parent_entry =
+ generator_->FindOrAddEntry(parent, native_entries_allocator_.get());
+ DCHECK_NOT_NULL(parent_entry);
Handle<Object> child_object = v8::Utils::OpenHandle(
*pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
HeapObject* child = HeapObject::cast(*child_object);
HeapEntry* child_entry =
- filler_->FindOrAddEntry(child, native_entries_allocator_.get());
- filler_->SetNamedReference(HeapGraphEdge::kInternal, parent_entry, "native",
- child_entry);
+ generator_->FindOrAddEntry(child, native_entries_allocator_.get());
+ parent_entry->SetNamedReference(HeapGraphEdge::kInternal, "native",
+ child_entry);
}
edges_.clear();
}
std::vector<HeapObject*>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
v8::RetainedObjectInfo* info) {
- auto map_entry = objects_by_info_.find(info);
- if (map_entry != objects_by_info_.end()) {
+ if (objects_by_info_.count(info)) {
info->Dispose();
} else {
objects_by_info_[info] = new std::vector<HeapObject*>();
@@ -2285,21 +2031,20 @@ HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
node = wrapper;
}
if (node->IsEmbedderNode()) {
- return filler_->FindOrAddEntry(node,
- embedder_graph_entries_allocator_.get());
+ return generator_->FindOrAddEntry(node,
+ embedder_graph_entries_allocator_.get());
} else {
EmbedderGraphImpl::V8NodeImpl* v8_node =
static_cast<EmbedderGraphImpl::V8NodeImpl*>(node);
Object* object = v8_node->GetObject();
if (object->IsSmi()) return nullptr;
- HeapEntry* entry = filler_->FindEntry(HeapObject::cast(object));
- return entry;
+ return generator_->FindEntry(HeapObject::cast(object));
}
}
bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFiller* filler) {
- filler_ = filler;
+ HeapSnapshotGenerator* generator) {
+ generator_ = generator;
if (FLAG_heap_profiler_use_embedder_graph &&
snapshot_->profiler()->HasBuildEmbedderGraphCallback()) {
@@ -2309,9 +2054,8 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
snapshot_->profiler()->BuildEmbedderGraph(isolate_, &graph);
for (const auto& node : graph.nodes()) {
if (node->IsRootNode()) {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement, snapshot_->root()->index(),
- EntryForEmbedderGraphNode(node.get()));
+ snapshot_->root()->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement, EntryForEmbedderGraphNode(node.get()));
}
// Adjust the name and the type of the V8 wrapper node.
auto wrapper = node->WrapperNode();
@@ -2326,21 +2070,15 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
// Fill edges of the graph.
for (const auto& edge : graph.edges()) {
HeapEntry* from = EntryForEmbedderGraphNode(edge.from);
- // The |from| and |to| can nullptr if the corrsponding node is a V8 node
+ // |from| and |to| can be nullptr if the corresponding node is a V8 node
// pointing to a Smi.
if (!from) continue;
- // Adding an entry for |edge.to| can invalidate the |from| entry because
- // it is an address in std::vector. Use index instead of pointer.
- int from_index = from->index();
HeapEntry* to = EntryForEmbedderGraphNode(edge.to);
- if (to) {
- if (edge.name == nullptr) {
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- from_index, to);
- } else {
- filler_->SetNamedReference(HeapGraphEdge::kInternal, from_index,
- edge.name, to);
- }
+ if (!to) continue;
+ if (edge.name == nullptr) {
+ from->SetIndexedAutoIndexReference(HeapGraphEdge::kElement, to);
+ } else {
+ from->SetNamedReference(HeapGraphEdge::kInternal, edge.name, to);
}
}
} else {
@@ -2358,15 +2096,14 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
SetRootNativeRootsReference();
}
}
- filler_ = nullptr;
+ generator_ = nullptr;
return true;
}
NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
const char* label) {
const char* label_copy = names_->GetCopy(label);
- auto map_entry = native_groups_.find(label_copy);
- if (map_entry == native_groups_.end()) {
+ if (!native_groups_.count(label_copy)) {
native_groups_[label_copy] = new NativeGroupRetainedObjectInfo(label);
}
return native_groups_[label_copy];
@@ -2375,61 +2112,48 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
void NativeObjectsExplorer::SetNativeRootReference(
v8::RetainedObjectInfo* info) {
HeapEntry* child_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_.get());
+ generator_->FindOrAddEntry(info, native_entries_allocator_.get());
DCHECK_NOT_NULL(child_entry);
NativeGroupRetainedObjectInfo* group_info =
FindOrAddGroupInfo(info->GetGroupLabel());
- HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_.get());
- // |FindOrAddEntry| can move and resize the entries backing store. Reload
- // potentially-stale pointer.
- child_entry = filler_->FindEntry(info);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kInternal, group_entry->index(), nullptr, child_entry);
+ HeapEntry* group_entry = generator_->FindOrAddEntry(
+ group_info, synthetic_entries_allocator_.get());
+ group_entry->SetNamedAutoIndexReference(HeapGraphEdge::kInternal, nullptr,
+ child_entry, names_);
}
-
void NativeObjectsExplorer::SetWrapperNativeReferences(
HeapObject* wrapper, v8::RetainedObjectInfo* info) {
- HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
+ HeapEntry* wrapper_entry = generator_->FindEntry(wrapper);
DCHECK_NOT_NULL(wrapper_entry);
HeapEntry* info_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_.get());
+ generator_->FindOrAddEntry(info, native_entries_allocator_.get());
DCHECK_NOT_NULL(info_entry);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- wrapper_entry->index(),
- "native",
- info_entry);
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- info_entry->index(),
- wrapper_entry);
+ wrapper_entry->SetNamedReference(HeapGraphEdge::kInternal, "native",
+ info_entry);
+ info_entry->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ wrapper_entry);
}
-
void NativeObjectsExplorer::SetRootNativeRootsReference() {
for (auto map_entry : native_groups_) {
NativeGroupRetainedObjectInfo* group_info = map_entry.second;
HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, native_entries_allocator_.get());
+ generator_->FindOrAddEntry(group_info, native_entries_allocator_.get());
DCHECK_NOT_NULL(group_entry);
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- group_entry);
+ snapshot_->root()->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ group_entry);
}
}
-
void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
- if (in_groups_.Contains(*p)) return;
- Isolate* isolate = isolate_;
+ if (in_groups_.count(*p)) return;
v8::RetainedObjectInfo* info =
- isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
+ isolate_->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
if (info == nullptr) return;
GetVectorMaybeDisposeInfo(info)->push_back(HeapObject::cast(*p));
}
-
HeapSnapshotGenerator::HeapSnapshotGenerator(
HeapSnapshot* snapshot,
v8::ActivityControl* control,
@@ -2464,10 +2188,10 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
// full GC is reachable from the root when computing dominators.
// This is not true for weakly reachable objects.
// As a temporary solution we call GC twice.
- heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- GarbageCollectionReason::kHeapProfiler);
- heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- GarbageCollectionReason::kHeapProfiler);
+ heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kHeapProfiler);
+ heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kHeapProfiler);
NullContextScope null_context_scope(heap_->isolate());
@@ -2525,12 +2249,10 @@ void HeapSnapshotGenerator::InitProgressCounter() {
}
bool HeapSnapshotGenerator::FillReferences() {
- SnapshotFiller filler(snapshot_, &entries_);
- return v8_heap_explorer_.IterateAndExtractReferences(&filler) &&
- dom_explorer_.IterateAndExtractReferences(&filler);
+ return v8_heap_explorer_.IterateAndExtractReferences(this) &&
+ dom_explorer_.IterateAndExtractReferences(this);
}
-
template<int bytes> struct MaxDecimalDigitsIn;
template<> struct MaxDecimalDigitsIn<4> {
static const int kSigned = 11;
@@ -2541,7 +2263,6 @@ template<> struct MaxDecimalDigitsIn<8> {
static const int kUnsigned = 20;
};
-
class OutputStreamWriter {
public:
explicit OutputStreamWriter(v8::OutputStream* stream)
@@ -2765,9 +2486,8 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
writer_->AddString(buffer.start());
}
-
void HeapSnapshotJSONSerializer::SerializeEdges() {
- std::deque<HeapGraphEdge*>& edges = snapshot_->children();
+ std::vector<HeapGraphEdge*>& edges = snapshot_->children();
for (size_t i = 0; i < edges.size(); ++i) {
DCHECK(i == 0 ||
edges[i - 1]->from()->index() <= edges[i]->from()->index());
@@ -2803,16 +2523,14 @@ void HeapSnapshotJSONSerializer::SerializeNode(const HeapEntry* entry) {
writer_->AddString(buffer.start());
}
-
void HeapSnapshotJSONSerializer::SerializeNodes() {
- std::vector<HeapEntry>& entries = snapshot_->entries();
+ const std::deque<HeapEntry>& entries = snapshot_->entries();
for (const HeapEntry& entry : entries) {
SerializeNode(&entry);
if (writer_->aborted()) return;
}
}
-
void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddString("\"meta\":");
// The object describing node serialization layout.
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index f25bee9f46..1f8f364912 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -7,6 +7,7 @@
#include <deque>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "include/v8-profiler.h"
@@ -28,10 +29,14 @@ class HeapEntry;
class HeapIterator;
class HeapProfiler;
class HeapSnapshot;
+class HeapSnapshotGenerator;
class JSArrayBuffer;
class JSCollection;
+class JSGeneratorObject;
+class JSGlobalObject;
+class JSGlobalProxy;
+class JSPromise;
class JSWeakCollection;
-class SnapshotFiller;
struct SourceLocation {
SourceLocation(int entry_index, int scriptId, int line, int col)
@@ -43,7 +48,7 @@ struct SourceLocation {
const int col;
};
-class HeapGraphEdge BASE_EMBEDDED {
+class HeapGraphEdge {
public:
enum Type {
kContextVariable = v8::HeapGraphEdge::kContextVariable,
@@ -55,9 +60,8 @@ class HeapGraphEdge BASE_EMBEDDED {
kWeak = v8::HeapGraphEdge::kWeak
};
- HeapGraphEdge(Type type, const char* name, int from, int to);
- HeapGraphEdge(Type type, int index, int from, int to);
- void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
+ HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
+ HeapGraphEdge(Type type, int index, HeapEntry* from, HeapEntry* to);
Type type() const { return TypeField::decode(bit_field_); }
int index() const {
@@ -81,12 +85,7 @@ class HeapGraphEdge BASE_EMBEDDED {
class TypeField : public BitField<Type, 0, 3> {};
class FromIndexField : public BitField<int, 3, 29> {};
uint32_t bit_field_;
- union {
- // During entries population |to_index_| is used for storing the index,
- // afterwards it is replaced with a pointer to the entry.
- int to_index_;
- HeapEntry* to_entry_;
- };
+ HeapEntry* to_entry_;
union {
int index_;
const char* name_;
@@ -96,7 +95,7 @@ class HeapGraphEdge BASE_EMBEDDED {
// HeapEntry instances represent an entity from the heap (or a special
// virtual node, e.g. root).
-class HeapEntry BASE_EMBEDDED {
+class HeapEntry {
public:
enum Type {
kHidden = v8::HeapGraphNode::kHidden,
@@ -114,15 +113,9 @@ class HeapEntry BASE_EMBEDDED {
kSymbol = v8::HeapGraphNode::kSymbol,
kBigInt = v8::HeapGraphNode::kBigInt
};
- static const int kNoEntry;
- HeapEntry() { }
- HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- size_t self_size,
- unsigned trace_node_id);
+ HeapEntry(HeapSnapshot* snapshot, int index, Type type, const char* name,
+ SnapshotObjectId id, size_t self_size, unsigned trace_node_id);
HeapSnapshot* snapshot() { return snapshot_; }
Type type() const { return static_cast<Type>(type_); }
@@ -132,8 +125,8 @@ class HeapEntry BASE_EMBEDDED {
SnapshotObjectId id() const { return id_; }
size_t self_size() const { return self_size_; }
unsigned trace_node_id() const { return trace_node_id_; }
- V8_INLINE int index() const;
- int children_count() const { return children_count_; }
+ int index() const { return index_; }
+ V8_INLINE int children_count() const;
V8_INLINE int set_children_index(int index);
V8_INLINE void add_child(HeapGraphEdge* edge);
V8_INLINE HeapGraphEdge* child(int i);
@@ -143,18 +136,30 @@ class HeapEntry BASE_EMBEDDED {
HeapGraphEdge::Type type, int index, HeapEntry* entry);
void SetNamedReference(
HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
+ void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ HeapEntry* child) {
+ SetIndexedReference(type, children_count_ + 1, child);
+ }
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ const char* description, HeapEntry* child,
+ StringsStorage* strings);
void Print(
const char* prefix, const char* edge_name, int max_depth, int indent);
private:
- V8_INLINE std::deque<HeapGraphEdge*>::iterator children_begin();
- V8_INLINE std::deque<HeapGraphEdge*>::iterator children_end();
+ V8_INLINE std::vector<HeapGraphEdge*>::iterator children_begin() const;
+ V8_INLINE std::vector<HeapGraphEdge*>::iterator children_end() const;
const char* TypeAsString();
unsigned type_: 4;
- int children_count_: 28;
- int children_index_;
+ unsigned index_ : 28; // Supports up to ~250M objects.
+ union {
+ // The count is used during the snapshot build phase,
+ // then it gets converted into the index by the |FillChildren| function.
+ unsigned children_count_;
+ unsigned children_end_index_;
+ };
size_t self_size_;
HeapSnapshot* snapshot_;
const char* name_;
@@ -163,7 +168,6 @@ class HeapEntry BASE_EMBEDDED {
unsigned trace_node_id_;
};
-
// HeapSnapshot represents a single heap snapshot. It is stored in
// HeapProfiler, which is also a factory for
// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
@@ -174,22 +178,23 @@ class HeapSnapshot {
explicit HeapSnapshot(HeapProfiler* profiler);
void Delete();
- HeapProfiler* profiler() { return profiler_; }
- HeapEntry* root() { return &entries_[root_index_]; }
- HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* gc_subroot(Root root) {
- return &entries_[gc_subroot_indexes_[static_cast<int>(root)]];
+ HeapProfiler* profiler() const { return profiler_; }
+ HeapEntry* root() const { return root_entry_; }
+ HeapEntry* gc_roots() const { return gc_roots_entry_; }
+ HeapEntry* gc_subroot(Root root) const {
+ return gc_subroot_entries_[static_cast<int>(root)];
}
- std::vector<HeapEntry>& entries() { return entries_; }
+ std::deque<HeapEntry>& entries() { return entries_; }
std::deque<HeapGraphEdge>& edges() { return edges_; }
- std::deque<HeapGraphEdge*>& children() { return children_; }
+ std::vector<HeapGraphEdge*>& children() { return children_; }
const std::vector<SourceLocation>& locations() const { return locations_; }
void RememberLastJSObjectId();
SnapshotObjectId max_snapshot_js_object_id() const {
return max_snapshot_js_object_id_;
}
+ bool is_complete() const { return !children_.empty(); }
- void AddLocation(int entry, int scriptId, int line, int col);
+ void AddLocation(HeapEntry* entry, int scriptId, int line, int col);
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
@@ -197,28 +202,28 @@ class HeapSnapshot {
unsigned trace_node_id);
void AddSyntheticRootEntries();
HeapEntry* GetEntryById(SnapshotObjectId id);
- std::vector<HeapEntry*>* GetSortedEntriesList();
void FillChildren();
void Print(int max_depth);
private:
- HeapEntry* AddRootEntry();
- HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(Root root, SnapshotObjectId id);
+ void AddRootEntry();
+ void AddGcRootsEntry();
+ void AddGcSubrootEntry(Root root, SnapshotObjectId id);
HeapProfiler* profiler_;
- int root_index_;
- int gc_roots_index_;
- int gc_subroot_indexes_[static_cast<int>(Root::kNumberOfRoots)];
- std::vector<HeapEntry> entries_;
+ HeapEntry* root_entry_ = nullptr;
+ HeapEntry* gc_roots_entry_ = nullptr;
+ HeapEntry* gc_subroot_entries_[static_cast<int>(Root::kNumberOfRoots)];
+ // For |entries_| we rely on the deque property, that it never reallocates
+ // backing storage, thus all entry pointers remain valid for the duration
+ // of snapshotting.
+ std::deque<HeapEntry> entries_;
std::deque<HeapGraphEdge> edges_;
- std::deque<HeapGraphEdge*> children_;
- std::vector<HeapEntry*> sorted_entries_;
+ std::vector<HeapGraphEdge*> children_;
+ std::unordered_map<SnapshotObjectId, HeapEntry*> entries_by_id_cache_;
std::vector<SourceLocation> locations_;
- SnapshotObjectId max_snapshot_js_object_id_;
-
- friend class HeapSnapshotTester;
+ SnapshotObjectId max_snapshot_js_object_id_ = -1;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
};
@@ -294,68 +299,28 @@ typedef void* HeapThing;
// An interface that creates HeapEntries by HeapThings.
class HeapEntriesAllocator {
public:
- virtual ~HeapEntriesAllocator() { }
+ virtual ~HeapEntriesAllocator() = default;
virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
};
-// The HeapEntriesMap instance is used to track a mapping between
-// real heap objects and their representations in heap snapshots.
-class HeapEntriesMap {
- public:
- HeapEntriesMap();
-
- int Map(HeapThing thing);
- void Pair(HeapThing thing, int entry);
-
- private:
- static uint32_t Hash(HeapThing thing) {
- return ComputeUnseededHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
- }
-
- base::HashMap entries_;
-
- friend class HeapObjectsSet;
-
- DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
-};
-
-
-class HeapObjectsSet {
- public:
- HeapObjectsSet();
- void Clear();
- bool Contains(Object* object);
- void Insert(Object* obj);
- const char* GetTag(Object* obj);
- void SetTag(Object* obj, const char* tag);
- bool is_empty() const { return entries_.occupancy() == 0; }
-
- private:
- base::HashMap entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
-};
-
-
class SnapshottingProgressReportingInterface {
public:
- virtual ~SnapshottingProgressReportingInterface() { }
+ virtual ~SnapshottingProgressReportingInterface() = default;
virtual void ProgressStep() = 0;
virtual bool ProgressReport(bool force) = 0;
};
-
// An implementation of V8 heap graph extractor.
class V8HeapExplorer : public HeapEntriesAllocator {
public:
V8HeapExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
v8::HeapProfiler::ObjectNameResolver* resolver);
- virtual ~V8HeapExplorer();
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ ~V8HeapExplorer() override = default;
+
+ HeapEntry* AllocateEntry(HeapThing ptr) override;
int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFiller* filler);
+ bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
void TagGlobalObjects();
void TagCodeObject(Code* code);
void TagBuiltinCodeObject(Code* code, const char* name);
@@ -377,91 +342,74 @@ class V8HeapExplorer : public HeapEntriesAllocator {
const char* GetSystemEntryName(HeapObject* object);
- void ExtractLocation(int entry, HeapObject* object);
- void ExtractLocationForJSFunction(int entry, JSFunction* func);
- void ExtractReferences(int entry, HeapObject* obj);
- void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy);
- void ExtractJSObjectReferences(int entry, JSObject* js_obj);
- void ExtractStringReferences(int entry, String* obj);
- void ExtractSymbolReferences(int entry, Symbol* symbol);
- void ExtractJSCollectionReferences(int entry, JSCollection* collection);
- void ExtractJSWeakCollectionReferences(int entry,
+ void ExtractLocation(HeapEntry* entry, HeapObject* object);
+ void ExtractLocationForJSFunction(HeapEntry* entry, JSFunction* func);
+ void ExtractReferences(HeapEntry* entry, HeapObject* obj);
+ void ExtractJSGlobalProxyReferences(HeapEntry* entry, JSGlobalProxy* proxy);
+ void ExtractJSObjectReferences(HeapEntry* entry, JSObject* js_obj);
+ void ExtractStringReferences(HeapEntry* entry, String* obj);
+ void ExtractSymbolReferences(HeapEntry* entry, Symbol* symbol);
+ void ExtractJSCollectionReferences(HeapEntry* entry,
+ JSCollection* collection);
+ void ExtractJSWeakCollectionReferences(HeapEntry* entry,
JSWeakCollection* collection);
- void ExtractEphemeronHashTableReferences(int entry,
+ void ExtractEphemeronHashTableReferences(HeapEntry* entry,
EphemeronHashTable* table);
- void ExtractContextReferences(int entry, Context* context);
- void ExtractMapReferences(int entry, Map* map);
- void ExtractSharedFunctionInfoReferences(int entry,
+ void ExtractContextReferences(HeapEntry* entry, Context* context);
+ void ExtractMapReferences(HeapEntry* entry, Map* map);
+ void ExtractSharedFunctionInfoReferences(HeapEntry* entry,
SharedFunctionInfo* shared);
- void ExtractScriptReferences(int entry, Script* script);
- void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
- void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
- void ExtractCodeReferences(int entry, Code* code);
- void ExtractCellReferences(int entry, Cell* cell);
- void ExtractFeedbackCellReferences(int entry, FeedbackCell* feedback_cell);
- void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
- void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
+ void ExtractScriptReferences(HeapEntry* entry, Script* script);
+ void ExtractAccessorInfoReferences(HeapEntry* entry,
+ AccessorInfo* accessor_info);
+ void ExtractAccessorPairReferences(HeapEntry* entry, AccessorPair* accessors);
+ void ExtractCodeReferences(HeapEntry* entry, Code* code);
+ void ExtractCellReferences(HeapEntry* entry, Cell* cell);
+ void ExtractFeedbackCellReferences(HeapEntry* entry,
+ FeedbackCell* feedback_cell);
+ void ExtractPropertyCellReferences(HeapEntry* entry, PropertyCell* cell);
+ void ExtractAllocationSiteReferences(HeapEntry* entry, AllocationSite* site);
void ExtractArrayBoilerplateDescriptionReferences(
- int entry, ArrayBoilerplateDescription* value);
- void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
- void ExtractJSPromiseReferences(int entry, JSPromise* promise);
- void ExtractJSGeneratorObjectReferences(int entry,
+ HeapEntry* entry, ArrayBoilerplateDescription* value);
+ void ExtractJSArrayBufferReferences(HeapEntry* entry, JSArrayBuffer* buffer);
+ void ExtractJSPromiseReferences(HeapEntry* entry, JSPromise* promise);
+ void ExtractJSGeneratorObjectReferences(HeapEntry* entry,
JSGeneratorObject* generator);
- void ExtractFixedArrayReferences(int entry, FixedArray* array);
- void ExtractFeedbackVectorReferences(int entry,
+ void ExtractFixedArrayReferences(HeapEntry* entry, FixedArray* array);
+ void ExtractFeedbackVectorReferences(HeapEntry* entry,
FeedbackVector* feedback_vector);
template <typename T>
- void ExtractWeakArrayReferences(int header_size, int entry, T* array);
- void ExtractPropertyReferences(JSObject* js_obj, int entry);
- void ExtractAccessorPairProperty(JSObject* js_obj, int entry, Name* key,
+ void ExtractWeakArrayReferences(int header_size, HeapEntry* entry, T* array);
+ void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractAccessorPairProperty(HeapEntry* entry, Name* key,
Object* callback_obj, int field_offset = -1);
- void ExtractElementReferences(JSObject* js_obj, int entry);
- void ExtractInternalReferences(JSObject* js_obj, int entry);
+ void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
bool IsEssentialObject(Object* object);
bool IsEssentialHiddenReference(Object* parent, int field_offset);
- void SetContextReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child,
- int field_offset);
- void SetNativeBindReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child);
- void SetElementReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child,
- int field_offset = -1);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child,
+ void SetContextReference(HeapEntry* parent_entry, String* reference_name,
+ Object* child, int field_offset);
+ void SetNativeBindReference(HeapEntry* parent_entry,
+ const char* reference_name, Object* child);
+ void SetElementReference(HeapEntry* parent_entry, int index, Object* child);
+ void SetInternalReference(HeapEntry* parent_entry, const char* reference_name,
+ Object* child, int field_offset = -1);
+ void SetInternalReference(HeapEntry* parent_entry, int index, Object* child,
int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj, int parent, int index,
- Object* child, int field_offset);
- void SetWeakReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child_obj,
+ void SetHiddenReference(HeapObject* parent_obj, HeapEntry* parent_entry,
+ int index, Object* child, int field_offset);
+ void SetWeakReference(HeapEntry* parent_entry, const char* reference_name,
+ Object* child_obj, int field_offset);
+ void SetWeakReference(HeapEntry* parent_entry, int index, Object* child_obj,
int field_offset);
- void SetWeakReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child_obj,
- int field_offset);
- void SetPropertyReference(HeapObject* parent_obj, int parent,
- Name* reference_name, Object* child,
+ void SetPropertyReference(HeapEntry* parent_entry, Name* reference_name,
+ Object* child,
const char* name_format_string = nullptr,
int field_offset = -1);
void SetDataOrAccessorPropertyReference(
- PropertyKind kind, JSObject* parent_obj, int parent, Name* reference_name,
+ PropertyKind kind, HeapEntry* parent_entry, Name* reference_name,
Object* child, const char* name_format_string = nullptr,
int field_offset = -1);
@@ -480,10 +428,10 @@ class V8HeapExplorer : public HeapEntriesAllocator {
StringsStorage* names_;
HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
- SnapshotFiller* filler_;
- HeapObjectsSet objects_tags_;
- HeapObjectsSet strong_gc_subroot_names_;
- HeapObjectsSet user_roots_;
+ HeapSnapshotGenerator* generator_ = nullptr;
+ std::unordered_map<JSGlobalObject*, const char*> objects_tags_;
+ std::unordered_map<Object*, const char*> strong_gc_subroot_names_;
+ std::unordered_set<JSGlobalObject*> user_roots_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
std::vector<bool> visited_fields_;
@@ -505,7 +453,7 @@ class NativeObjectsExplorer {
SnapshottingProgressReportingInterface* progress);
virtual ~NativeObjectsExplorer();
int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFiller* filler);
+ bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
private:
void FillRetainedObjects();
@@ -538,7 +486,7 @@ class NativeObjectsExplorer {
HeapSnapshot* snapshot_;
StringsStorage* names_;
bool embedder_queried_;
- HeapObjectsSet in_groups_;
+ std::unordered_set<Object*> in_groups_;
std::unordered_map<v8::RetainedObjectInfo*, std::vector<HeapObject*>*,
RetainedInfoHasher, RetainedInfoEquals>
objects_by_info_;
@@ -549,7 +497,7 @@ class NativeObjectsExplorer {
std::unique_ptr<HeapEntriesAllocator> native_entries_allocator_;
std::unique_ptr<HeapEntriesAllocator> embedder_graph_entries_allocator_;
// Used during references extraction.
- SnapshotFiller* filler_;
+ HeapSnapshotGenerator* generator_ = nullptr;
v8::HeapProfiler::RetainerEdges edges_;
static HeapThing const kNativesRootObject;
@@ -559,27 +507,45 @@ class NativeObjectsExplorer {
DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
};
-
class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
public:
+ // The HeapEntriesMap instance is used to track a mapping between
+ // real heap objects and their representations in heap snapshots.
+ using HeapEntriesMap = std::unordered_map<HeapThing, HeapEntry*>;
+
HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver,
Heap* heap);
bool GenerateSnapshot();
+ HeapEntry* FindEntry(HeapThing ptr) {
+ auto it = entries_map_.find(ptr);
+ return it != entries_map_.end() ? it->second : nullptr;
+ }
+
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ return entries_map_.emplace(ptr, allocator->AllocateEntry(ptr))
+ .first->second;
+ }
+
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != nullptr ? entry : AddEntry(ptr, allocator);
+ }
+
private:
bool FillReferences();
- void ProgressStep();
- bool ProgressReport(bool force = false);
+ void ProgressStep() override;
+ bool ProgressReport(bool force = false) override;
void InitProgressCounter();
HeapSnapshot* snapshot_;
v8::ActivityControl* control_;
V8HeapExplorer v8_heap_explorer_;
NativeObjectsExplorer dom_explorer_;
- // Mapping from HeapThing pointers to HeapEntry* pointers.
- HeapEntriesMap entries_;
+ // Mapping from HeapThing pointers to HeapEntry indices.
+ HeapEntriesMap entries_map_;
// Used during snapshot generation.
int progress_counter_;
int progress_total_;
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 48c3f73958..2e07135d85 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -6,8 +6,10 @@
#include <stdint.h>
#include <memory>
+
#include "src/api-inl.h"
#include "src/base/ieee754.h"
+#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/frames-inl.h"
#include "src/heap/heap.h"
@@ -61,7 +63,6 @@ SamplingHeapProfiler::SamplingHeapProfiler(
heap->isolate()->random_number_generator())),
names_(names),
profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0),
- samples_(),
stack_depth_(stack_depth),
rate_(rate),
flags_(flags) {
@@ -75,8 +76,6 @@ SamplingHeapProfiler::SamplingHeapProfiler(
SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->RemoveAllocationObserversFromAllSpaces(other_spaces_observer_.get(),
new_space_observer_.get());
-
- samples_.clear();
}
@@ -96,9 +95,9 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
AllocationNode* node = AddStack();
node->allocations_[size]++;
- Sample* sample = new Sample(size, node, loc, this);
- samples_.emplace(sample);
- sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+ auto sample = base::make_unique<Sample>(size, node, loc, this);
+ sample->global.SetWeak(sample.get(), OnWeakCallback,
+ WeakCallbackType::kParameter);
#if __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated"
@@ -109,6 +108,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
#if __clang__
#pragma clang diagnostic pop
#endif
+ samples_.emplace(sample.get(), std::move(sample));
}
void SamplingHeapProfiler::OnWeakCallback(
@@ -125,17 +125,10 @@ void SamplingHeapProfiler::OnWeakCallback(
AllocationNode::FunctionId id = AllocationNode::function_id(
node->script_id_, node->script_position_, node->name_);
parent->children_.erase(id);
- delete node;
node = parent;
}
}
- auto it = std::find_if(sample->profiler->samples_.begin(),
- sample->profiler->samples_.end(),
- [&sample](const std::unique_ptr<Sample>& s) {
- return s.get() == sample;
- });
-
- sample->profiler->samples_.erase(it);
+ sample->profiler->samples_.erase(sample);
// sample is deleted because its unique ptr was erased from samples_.
}
@@ -147,11 +140,11 @@ SamplingHeapProfiler::AllocationNode::FindOrAddChildNode(const char* name,
auto it = children_.find(id);
if (it != children_.end()) {
DCHECK_EQ(strcmp(it->second->name_, name), 0);
- return it->second;
+ return it->second.get();
}
- auto child = new AllocationNode(this, name, script_id, start_position);
- children_.insert(std::make_pair(id, child));
- return child;
+ auto child =
+ base::make_unique<AllocationNode>(this, name, script_id, start_position);
+ return children_.emplace(id, std::move(child)).first->second.get();
}
SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
@@ -262,19 +255,19 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
allocations.push_back(ScaleSample(alloc.first, alloc.second));
}
- profile->nodes().push_back(v8::AllocationProfile::Node(
- {ToApiHandle<v8::String>(
- isolate_->factory()->InternalizeUtf8String(node->name_)),
- script_name, node->script_id_, node->script_position_, line, column,
- std::vector<v8::AllocationProfile::Node*>(), allocations}));
+ profile->nodes().push_back(v8::AllocationProfile::Node{
+ ToApiHandle<v8::String>(
+ isolate_->factory()->InternalizeUtf8String(node->name_)),
+ script_name, node->script_id_, node->script_position_, line, column,
+ std::vector<v8::AllocationProfile::Node*>(), allocations});
v8::AllocationProfile::Node* current = &profile->nodes().back();
- // The children map may have nodes inserted into it during translation
+ // The |children_| map may have nodes inserted into it during translation
// because the translation may allocate strings on the JS heap that have
// the potential to be sampled. That's ok since map iterators are not
// invalidated upon std::map insertion.
- for (auto it : node->children_) {
+ for (const auto& it : node->children_) {
current->children.push_back(
- TranslateAllocationNode(profile, it.second, scripts));
+ TranslateAllocationNode(profile, it.second.get(), scripts));
}
node->pinned_ = false;
return current;
@@ -299,6 +292,5 @@ v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
return profile;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
index 46fa405279..072c5eb677 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.h
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -8,7 +8,7 @@
#include <deque>
#include <map>
#include <memory>
-#include <set>
+#include <unordered_map>
#include "include/v8-profiler.h"
#include "src/heap/heap.h"
#include "src/profiler/strings-storage.h"
@@ -77,13 +77,7 @@ class SamplingHeapProfiler {
: parent_(parent),
script_id_(script_id),
script_position_(start_position),
- name_(name),
- pinned_(false) {}
- ~AllocationNode() {
- for (auto child : children_) {
- delete child.second;
- }
- }
+ name_(name) {}
private:
typedef uint64_t FunctionId;
@@ -107,12 +101,12 @@ class SamplingHeapProfiler {
// TODO(alph): make use of unordered_map's here. Pay attention to
// iterator invalidation during TranslateAllocationNode.
std::map<size_t, unsigned int> allocations_;
- std::map<FunctionId, AllocationNode*> children_;
+ std::map<FunctionId, std::unique_ptr<AllocationNode>> children_;
AllocationNode* const parent_;
const int script_id_;
const int script_position_;
const char* const name_;
- bool pinned_;
+ bool pinned_ = false;
friend class SamplingHeapProfiler;
@@ -146,7 +140,7 @@ class SamplingHeapProfiler {
std::unique_ptr<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_;
AllocationNode profile_root_;
- std::set<std::unique_ptr<Sample>> samples_;
+ std::unordered_map<Sample*, std::unique_ptr<Sample>> samples_;
const int stack_depth_;
const uint64_t rate_;
v8::HeapProfiler::SamplingFlags flags_;
@@ -166,7 +160,7 @@ class SamplingAllocationObserver : public AllocationObserver {
heap_(heap),
random_(random),
rate_(rate) {}
- virtual ~SamplingAllocationObserver() {}
+ ~SamplingAllocationObserver() override = default;
protected:
void Step(int bytes_allocated, Address soon_object, size_t size) override {
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index e3bd1d9c69..69a6bbf778 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -206,7 +206,7 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// Check whether we interrupted setup/teardown of a stack frame in JS code.
// Avoid this check for C++ code, as that would trigger false positives.
if (regs->pc &&
- isolate->heap()->memory_allocator()->code_range()->contains(
+ isolate->heap()->memory_allocator()->code_range().contains(
reinterpret_cast<i::Address>(regs->pc)) &&
IsNoFrameRegion(reinterpret_cast<i::Address>(regs->pc))) {
// The frame is not setup, so it'd be hard to iterate the stack. Bailout.
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index d7da209e2e..d5888f54a3 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -20,7 +20,7 @@ class TracingCpuProfilerImpl final
: private v8::TracingController::TraceStateObserver {
public:
explicit TracingCpuProfilerImpl(Isolate*);
- ~TracingCpuProfilerImpl();
+ ~TracingCpuProfilerImpl() override;
// v8::TracingController::TraceStateObserver
void OnTraceEnabled() final;
diff --git a/deps/v8/src/profiler/unbound-queue.h b/deps/v8/src/profiler/unbound-queue.h
index 0efe95abdf..547ac191b3 100644
--- a/deps/v8/src/profiler/unbound-queue.h
+++ b/deps/v8/src/profiler/unbound-queue.h
@@ -18,8 +18,8 @@ namespace internal {
// elements, so producer never blocks. Implemented after Herb
// Sutter's article:
// http://www.ddj.com/high-performance-computing/210604448
-template<typename Record>
-class UnboundQueue BASE_EMBEDDED {
+template <typename Record>
+class UnboundQueue {
public:
inline UnboundQueue();
inline ~UnboundQueue();
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 4968258860..1e953001eb 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -230,7 +230,7 @@ enum class PropertyCellConstantType {
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
+class PropertyDetails {
public:
// Property details for dictionary mode properties/elements.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 8e56dcf47e..5f41948cfd 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -25,7 +25,7 @@ std::ostream& operator<<(std::ostream& os,
Descriptor::Descriptor() : details_(Smi::kZero) {}
-Descriptor::Descriptor(Handle<Name> key, MaybeObjectHandle value,
+Descriptor::Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
PropertyKind kind, PropertyAttributes attributes,
PropertyLocation location, PropertyConstness constness,
Representation representation, int field_index)
@@ -37,7 +37,7 @@ Descriptor::Descriptor(Handle<Name> key, MaybeObjectHandle value,
DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
}
-Descriptor::Descriptor(Handle<Name> key, MaybeObjectHandle value,
+Descriptor::Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
PropertyDetails details)
: key_(key), value_(value), details_(details) {
DCHECK(key->IsUniqueName());
@@ -55,8 +55,8 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
- MaybeObjectHandle wrapped_field_type) {
- DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakHeapObject());
+ const MaybeObjectHandle& wrapped_field_type) {
+ DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeak());
PropertyDetails details(kData, attributes, kField, constness, representation,
field_index);
return Descriptor(key, wrapped_field_type, details);
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 7a7d485bc3..276a5fd46a 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -22,7 +22,7 @@ namespace internal {
// Each descriptor has a key, property attributes, property type,
// property index (in the actual instance-descriptor array) and
// optionally a piece of data.
-class Descriptor final BASE_EMBEDDED {
+class Descriptor final {
public:
Descriptor();
@@ -40,7 +40,7 @@ class Descriptor final BASE_EMBEDDED {
PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
- MaybeObjectHandle wrapped_field_type);
+ const MaybeObjectHandle& wrapped_field_type);
static Descriptor DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes);
@@ -58,13 +58,13 @@ class Descriptor final BASE_EMBEDDED {
PropertyDetails details_;
protected:
- Descriptor(Handle<Name> key, MaybeObjectHandle value,
+ Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
PropertyDetails details);
- Descriptor(Handle<Name> key, MaybeObjectHandle value, PropertyKind kind,
- PropertyAttributes attributes, PropertyLocation location,
- PropertyConstness constness, Representation representation,
- int field_index);
+ Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
+ PropertyKind kind, PropertyAttributes attributes,
+ PropertyLocation location, PropertyConstness constness,
+ Representation representation, int field_index);
friend class MapUpdater;
};
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index d09a6c82a6..e8fe06ac44 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -41,7 +41,7 @@ class PrototypeIterator {
inline explicit PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
WhereToEnd where_to_end = END_AT_NULL);
- ~PrototypeIterator() {}
+ ~PrototypeIterator() = default;
inline bool HasAccess() const;
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 3fdc3d98f5..64028d3927 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -2168,10 +2168,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
macro_assembler->Bind(&fall_through);
}
-
-RegExpNode::~RegExpNode() {
-}
-
+RegExpNode::~RegExpNode() = default;
RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
Trace* trace) {
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index fd2a90521d..ee9d167aa2 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -592,9 +592,9 @@ class SeqRegExpNode: public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) { }
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- virtual RegExpNode* FilterOneByte(int depth);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
+ RegExpNode* FilterOneByte(int depth) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
@@ -636,21 +636,22 @@ class ActionNode: public SeqRegExpNode {
int repetition_register,
int repetition_limit,
RegExpNode* on_success);
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) override {
return on_success()->GetQuickCheckDetails(
details, compiler, filled_in, not_at_start);
}
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start);
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
ActionType action_type() { return action_type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
- virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
+ int GreedyLoopTextLength() override {
+ return kNodeIsTooComplexForGreedyLoops;
+ }
private:
union {
@@ -714,23 +715,22 @@ class TextNode: public SeqRegExpNode {
bool read_backward,
RegExpNode* on_success,
JSRegExp::Flags flags);
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
ZoneList<TextElement>* elements() { return elms_; }
bool read_backward() { return read_backward_; }
void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
- virtual int GreedyLoopTextLength();
- virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start);
+ int GreedyLoopTextLength() override;
+ RegExpNode* GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
void CalculateOffsets();
- virtual RegExpNode* FilterOneByte(int depth);
+ RegExpNode* FilterOneByte(int depth) override;
private:
enum TextEmitPassType {
@@ -779,15 +779,14 @@ class AssertionNode: public SeqRegExpNode {
static AssertionNode* AfterNewline(RegExpNode* on_success) {
return new(on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
}
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start);
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
AssertionType assertion_type() { return assertion_type_; }
private:
@@ -811,22 +810,20 @@ class BackReferenceNode: public SeqRegExpNode {
end_reg_(end_reg),
flags_(flags),
read_backward_(read_backward) {}
- virtual void Accept(NodeVisitor* visitor);
+ void Accept(NodeVisitor* visitor) override;
int start_register() { return start_reg_; }
int end_register() { return end_reg_; }
bool read_backward() { return read_backward_; }
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int recursion_depth,
+ bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override {
return;
}
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start);
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
private:
int start_reg_;
@@ -840,20 +837,20 @@ class EndNode: public RegExpNode {
public:
enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
- virtual void Accept(NodeVisitor* visitor);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) { return 0; }
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int recursion_depth,
+ bool not_at_start) override {
+ return 0;
+ }
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
@@ -875,7 +872,7 @@ class NegativeSubmatchSuccess: public EndNode {
current_position_register_(position_reg),
clear_capture_count_(clear_capture_count),
clear_capture_start_(clear_capture_start) { }
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
private:
int stack_pointer_register_;
@@ -930,24 +927,23 @@ class ChoiceNode: public RegExpNode {
table_(nullptr),
not_at_start_(false),
being_calculated_(false) {}
- virtual void Accept(NodeVisitor* visitor);
+ void Accept(NodeVisitor* visitor) override;
void AddAlternative(GuardedAlternative node) {
alternatives()->Add(node, zone());
}
ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
DispatchTable* GetTable(bool ignore_case);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
int EatsAtLeastHelper(int still_to_find,
int budget,
RegExpNode* ignore_this_node,
bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start);
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
bool being_calculated() { return being_calculated_; }
bool not_at_start() { return not_at_start_; }
@@ -956,7 +952,7 @@ class ChoiceNode: public RegExpNode {
virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
return true;
}
- virtual RegExpNode* FilterOneByte(int depth);
+ RegExpNode* FilterOneByte(int depth) override;
virtual bool read_backward() { return false; }
protected:
@@ -1009,13 +1005,12 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
AddAlternative(this_must_fail);
AddAlternative(then_do_this);
}
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
alternatives_->at(1).node()->FillInBMInfo(isolate, offset, budget - 1, bm,
not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
@@ -1025,10 +1020,10 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
// starts by loading enough characters for the alternative that takes fewest
// characters, but on a negative lookahead the negative branch did not take
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
- virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
+ bool try_to_emit_quick_check_for_alternative(bool is_first) override {
return !is_first;
}
- virtual RegExpNode* FilterOneByte(int depth);
+ RegExpNode* FilterOneByte(int depth) override;
};
@@ -1042,20 +1037,19 @@ class LoopChoiceNode: public ChoiceNode {
read_backward_(read_backward) {}
void AddLoopAlternative(GuardedAlternative alt);
void AddContinueAlternative(GuardedAlternative alt);
- virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start);
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
- virtual bool read_backward() { return read_backward_; }
- virtual void Accept(NodeVisitor* visitor);
- virtual RegExpNode* FilterOneByte(int depth);
+ bool read_backward() override { return read_backward_; }
+ void Accept(NodeVisitor* visitor) override;
+ RegExpNode* FilterOneByte(int depth) override;
private:
// AddAlternative is made private for loop nodes because alternatives
@@ -1404,7 +1398,7 @@ struct PreloadState {
class NodeVisitor {
public:
- virtual ~NodeVisitor() { }
+ virtual ~NodeVisitor() = default;
#define DECLARE_VISIT(Type) \
virtual void Visit##Type(Type##Node* that) = 0;
FOR_EACH_NODE_TYPE(DECLARE_VISIT)
@@ -1466,11 +1460,10 @@ class Analysis: public NodeVisitor {
: isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
void EnsureAnalyzed(RegExpNode* node);
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#define DECLARE_VISIT(Type) void Visit##Type(Type##Node* that) override;
+ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
#undef DECLARE_VISIT
- virtual void VisitLoopChoice(LoopChoiceNode* that);
+ void VisitLoopChoice(LoopChoiceNode* that) override;
bool has_failed() { return error_message_ != nullptr; }
const char* error_message() {
diff --git a/deps/v8/src/regexp/property-sequences.cc b/deps/v8/src/regexp/property-sequences.cc
new file mode 100644
index 0000000000..08194f1e70
--- /dev/null
+++ b/deps/v8/src/regexp/property-sequences.cc
@@ -0,0 +1,1115 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef V8_INTL_SUPPORT
+
+#include "src/regexp/property-sequences.h"
+
+namespace v8 {
+namespace internal {
+
+/*
+Generated from following Node.js source:
+
+package.json
+
+```
+{
+ "private": true,
+ "dependencies": {
+ "unicode-11.0.0": "^0.7.8"
+ }
+}
+```
+
+generate-unicode-sequence-property-data.js
+```
+const toHex = (symbol) => {
+ return '0x' + symbol.codePointAt(0).toString(16)
+ .toUpperCase().padStart(6, '0');
+};
+
+```
+const generateData = (property) => {
+ const sequences =
+ require(`unicode-11.0.0/Sequence_Property/${ property }/index.js`);
+ const id = property.replace(/_/g, '') + 's';
+ const buffer = [];
+ for (const sequence of sequences) {
+ const symbols = [...sequence];
+ const codePoints = symbols.map(symbol => toHex(symbol));
+ buffer.push(' ' + codePoints.join(', ') + ', 0,');
+ }
+ const output =
+ `const uc32 UnicodePropertySequences::k${ id }[] = {\n` +
+ `${ buffer.join('\n') }\n0 // null-terminating the list\n};\n`;
+ return output;
+};
+
+const properties = [
+ 'Emoji_Flag_Sequence',
+ 'Emoji_Tag_Sequence',
+ 'Emoji_ZWJ_Sequence',
+];
+
+for (const property of properties) {
+ console.log(generateData(property));
+}
+```
+*/
+
+const uc32 UnicodePropertySequences::kEmojiFlagSequences[] = {
+ 0x01F1E6, 0x01F1E8, 0,
+ 0x01F1FF, 0x01F1FC, 0,
+ 0x01F1E6, 0x01F1EA, 0,
+ 0x01F1E6, 0x01F1EB, 0,
+ 0x01F1E6, 0x01F1EC, 0,
+ 0x01F1E6, 0x01F1EE, 0,
+ 0x01F1E6, 0x01F1F1, 0,
+ 0x01F1E6, 0x01F1F2, 0,
+ 0x01F1E6, 0x01F1F4, 0,
+ 0x01F1E6, 0x01F1F6, 0,
+ 0x01F1E6, 0x01F1F7, 0,
+ 0x01F1E6, 0x01F1F8, 0,
+ 0x01F1E6, 0x01F1F9, 0,
+ 0x01F1E6, 0x01F1FA, 0,
+ 0x01F1E6, 0x01F1FC, 0,
+ 0x01F1E6, 0x01F1FD, 0,
+ 0x01F1E6, 0x01F1FF, 0,
+ 0x01F1E7, 0x01F1E6, 0,
+ 0x01F1E7, 0x01F1E7, 0,
+ 0x01F1E7, 0x01F1E9, 0,
+ 0x01F1E7, 0x01F1EA, 0,
+ 0x01F1E7, 0x01F1EB, 0,
+ 0x01F1E7, 0x01F1EC, 0,
+ 0x01F1E7, 0x01F1ED, 0,
+ 0x01F1E7, 0x01F1EE, 0,
+ 0x01F1E7, 0x01F1EF, 0,
+ 0x01F1E7, 0x01F1F1, 0,
+ 0x01F1E7, 0x01F1F2, 0,
+ 0x01F1E7, 0x01F1F3, 0,
+ 0x01F1E7, 0x01F1F4, 0,
+ 0x01F1E7, 0x01F1F6, 0,
+ 0x01F1E7, 0x01F1F7, 0,
+ 0x01F1E7, 0x01F1F8, 0,
+ 0x01F1E7, 0x01F1F9, 0,
+ 0x01F1E7, 0x01F1FB, 0,
+ 0x01F1E7, 0x01F1FC, 0,
+ 0x01F1E7, 0x01F1FE, 0,
+ 0x01F1E7, 0x01F1FF, 0,
+ 0x01F1E8, 0x01F1E6, 0,
+ 0x01F1E8, 0x01F1E8, 0,
+ 0x01F1E8, 0x01F1E9, 0,
+ 0x01F1E8, 0x01F1EB, 0,
+ 0x01F1E8, 0x01F1EC, 0,
+ 0x01F1E8, 0x01F1ED, 0,
+ 0x01F1E8, 0x01F1EE, 0,
+ 0x01F1E8, 0x01F1F0, 0,
+ 0x01F1E8, 0x01F1F1, 0,
+ 0x01F1E8, 0x01F1F2, 0,
+ 0x01F1E8, 0x01F1F3, 0,
+ 0x01F1E8, 0x01F1F4, 0,
+ 0x01F1E8, 0x01F1F5, 0,
+ 0x01F1E8, 0x01F1F7, 0,
+ 0x01F1E8, 0x01F1FA, 0,
+ 0x01F1E8, 0x01F1FB, 0,
+ 0x01F1E8, 0x01F1FC, 0,
+ 0x01F1E8, 0x01F1FD, 0,
+ 0x01F1E8, 0x01F1FE, 0,
+ 0x01F1E8, 0x01F1FF, 0,
+ 0x01F1E9, 0x01F1EA, 0,
+ 0x01F1E9, 0x01F1EC, 0,
+ 0x01F1E9, 0x01F1EF, 0,
+ 0x01F1E9, 0x01F1F0, 0,
+ 0x01F1E9, 0x01F1F2, 0,
+ 0x01F1E9, 0x01F1F4, 0,
+ 0x01F1E9, 0x01F1FF, 0,
+ 0x01F1EA, 0x01F1E6, 0,
+ 0x01F1EA, 0x01F1E8, 0,
+ 0x01F1EA, 0x01F1EA, 0,
+ 0x01F1EA, 0x01F1EC, 0,
+ 0x01F1EA, 0x01F1ED, 0,
+ 0x01F1EA, 0x01F1F7, 0,
+ 0x01F1EA, 0x01F1F8, 0,
+ 0x01F1EA, 0x01F1F9, 0,
+ 0x01F1EA, 0x01F1FA, 0,
+ 0x01F1EB, 0x01F1EE, 0,
+ 0x01F1EB, 0x01F1EF, 0,
+ 0x01F1EB, 0x01F1F0, 0,
+ 0x01F1EB, 0x01F1F2, 0,
+ 0x01F1EB, 0x01F1F4, 0,
+ 0x01F1EB, 0x01F1F7, 0,
+ 0x01F1EC, 0x01F1E6, 0,
+ 0x01F1EC, 0x01F1E7, 0,
+ 0x01F1EC, 0x01F1E9, 0,
+ 0x01F1EC, 0x01F1EA, 0,
+ 0x01F1EC, 0x01F1EB, 0,
+ 0x01F1EC, 0x01F1EC, 0,
+ 0x01F1EC, 0x01F1ED, 0,
+ 0x01F1EC, 0x01F1EE, 0,
+ 0x01F1EC, 0x01F1F1, 0,
+ 0x01F1EC, 0x01F1F2, 0,
+ 0x01F1EC, 0x01F1F3, 0,
+ 0x01F1EC, 0x01F1F5, 0,
+ 0x01F1EC, 0x01F1F6, 0,
+ 0x01F1EC, 0x01F1F7, 0,
+ 0x01F1EC, 0x01F1F8, 0,
+ 0x01F1EC, 0x01F1F9, 0,
+ 0x01F1EC, 0x01F1FA, 0,
+ 0x01F1EC, 0x01F1FC, 0,
+ 0x01F1EC, 0x01F1FE, 0,
+ 0x01F1ED, 0x01F1F0, 0,
+ 0x01F1ED, 0x01F1F2, 0,
+ 0x01F1ED, 0x01F1F3, 0,
+ 0x01F1ED, 0x01F1F7, 0,
+ 0x01F1ED, 0x01F1F9, 0,
+ 0x01F1ED, 0x01F1FA, 0,
+ 0x01F1EE, 0x01F1E8, 0,
+ 0x01F1EE, 0x01F1E9, 0,
+ 0x01F1EE, 0x01F1EA, 0,
+ 0x01F1EE, 0x01F1F1, 0,
+ 0x01F1EE, 0x01F1F2, 0,
+ 0x01F1EE, 0x01F1F3, 0,
+ 0x01F1EE, 0x01F1F4, 0,
+ 0x01F1EE, 0x01F1F6, 0,
+ 0x01F1EE, 0x01F1F7, 0,
+ 0x01F1EE, 0x01F1F8, 0,
+ 0x01F1EE, 0x01F1F9, 0,
+ 0x01F1EF, 0x01F1EA, 0,
+ 0x01F1EF, 0x01F1F2, 0,
+ 0x01F1EF, 0x01F1F4, 0,
+ 0x01F1EF, 0x01F1F5, 0,
+ 0x01F1F0, 0x01F1EA, 0,
+ 0x01F1F0, 0x01F1EC, 0,
+ 0x01F1F0, 0x01F1ED, 0,
+ 0x01F1F0, 0x01F1EE, 0,
+ 0x01F1F0, 0x01F1F2, 0,
+ 0x01F1F0, 0x01F1F3, 0,
+ 0x01F1F0, 0x01F1F5, 0,
+ 0x01F1F0, 0x01F1F7, 0,
+ 0x01F1F0, 0x01F1FC, 0,
+ 0x01F1E6, 0x01F1E9, 0,
+ 0x01F1F0, 0x01F1FF, 0,
+ 0x01F1F1, 0x01F1E6, 0,
+ 0x01F1F1, 0x01F1E7, 0,
+ 0x01F1F1, 0x01F1E8, 0,
+ 0x01F1F1, 0x01F1EE, 0,
+ 0x01F1F1, 0x01F1F0, 0,
+ 0x01F1F1, 0x01F1F7, 0,
+ 0x01F1F1, 0x01F1F8, 0,
+ 0x01F1F1, 0x01F1F9, 0,
+ 0x01F1F1, 0x01F1FA, 0,
+ 0x01F1F1, 0x01F1FB, 0,
+ 0x01F1F1, 0x01F1FE, 0,
+ 0x01F1F2, 0x01F1E6, 0,
+ 0x01F1F2, 0x01F1E8, 0,
+ 0x01F1F2, 0x01F1E9, 0,
+ 0x01F1F2, 0x01F1EA, 0,
+ 0x01F1F2, 0x01F1EB, 0,
+ 0x01F1F2, 0x01F1EC, 0,
+ 0x01F1F2, 0x01F1ED, 0,
+ 0x01F1F2, 0x01F1F0, 0,
+ 0x01F1F2, 0x01F1F1, 0,
+ 0x01F1F2, 0x01F1F2, 0,
+ 0x01F1F2, 0x01F1F3, 0,
+ 0x01F1F2, 0x01F1F4, 0,
+ 0x01F1F2, 0x01F1F5, 0,
+ 0x01F1F2, 0x01F1F6, 0,
+ 0x01F1F2, 0x01F1F7, 0,
+ 0x01F1F2, 0x01F1F8, 0,
+ 0x01F1F2, 0x01F1F9, 0,
+ 0x01F1F2, 0x01F1FA, 0,
+ 0x01F1F2, 0x01F1FB, 0,
+ 0x01F1F2, 0x01F1FC, 0,
+ 0x01F1F2, 0x01F1FD, 0,
+ 0x01F1F2, 0x01F1FE, 0,
+ 0x01F1F2, 0x01F1FF, 0,
+ 0x01F1F3, 0x01F1E6, 0,
+ 0x01F1F3, 0x01F1E8, 0,
+ 0x01F1F3, 0x01F1EA, 0,
+ 0x01F1F3, 0x01F1EB, 0,
+ 0x01F1F3, 0x01F1EC, 0,
+ 0x01F1F3, 0x01F1EE, 0,
+ 0x01F1F3, 0x01F1F1, 0,
+ 0x01F1F3, 0x01F1F4, 0,
+ 0x01F1F3, 0x01F1F5, 0,
+ 0x01F1F3, 0x01F1F7, 0,
+ 0x01F1F3, 0x01F1FA, 0,
+ 0x01F1F3, 0x01F1FF, 0,
+ 0x01F1F4, 0x01F1F2, 0,
+ 0x01F1F5, 0x01F1E6, 0,
+ 0x01F1F5, 0x01F1EA, 0,
+ 0x01F1F5, 0x01F1EB, 0,
+ 0x01F1F5, 0x01F1EC, 0,
+ 0x01F1F5, 0x01F1ED, 0,
+ 0x01F1F5, 0x01F1F0, 0,
+ 0x01F1F5, 0x01F1F1, 0,
+ 0x01F1F5, 0x01F1F2, 0,
+ 0x01F1F5, 0x01F1F3, 0,
+ 0x01F1F5, 0x01F1F7, 0,
+ 0x01F1F5, 0x01F1F8, 0,
+ 0x01F1F5, 0x01F1F9, 0,
+ 0x01F1F5, 0x01F1FC, 0,
+ 0x01F1F5, 0x01F1FE, 0,
+ 0x01F1F6, 0x01F1E6, 0,
+ 0x01F1F7, 0x01F1EA, 0,
+ 0x01F1F7, 0x01F1F4, 0,
+ 0x01F1F7, 0x01F1F8, 0,
+ 0x01F1F7, 0x01F1FA, 0,
+ 0x01F1F7, 0x01F1FC, 0,
+ 0x01F1F8, 0x01F1E6, 0,
+ 0x01F1F8, 0x01F1E7, 0,
+ 0x01F1F8, 0x01F1E8, 0,
+ 0x01F1F8, 0x01F1E9, 0,
+ 0x01F1F8, 0x01F1EA, 0,
+ 0x01F1F8, 0x01F1EC, 0,
+ 0x01F1F8, 0x01F1ED, 0,
+ 0x01F1F8, 0x01F1EE, 0,
+ 0x01F1F8, 0x01F1EF, 0,
+ 0x01F1F8, 0x01F1F0, 0,
+ 0x01F1F8, 0x01F1F1, 0,
+ 0x01F1F8, 0x01F1F2, 0,
+ 0x01F1F8, 0x01F1F3, 0,
+ 0x01F1F8, 0x01F1F4, 0,
+ 0x01F1F8, 0x01F1F7, 0,
+ 0x01F1F8, 0x01F1F8, 0,
+ 0x01F1F8, 0x01F1F9, 0,
+ 0x01F1F8, 0x01F1FB, 0,
+ 0x01F1F8, 0x01F1FD, 0,
+ 0x01F1F8, 0x01F1FE, 0,
+ 0x01F1F8, 0x01F1FF, 0,
+ 0x01F1F9, 0x01F1E6, 0,
+ 0x01F1F9, 0x01F1E8, 0,
+ 0x01F1F9, 0x01F1E9, 0,
+ 0x01F1F9, 0x01F1EB, 0,
+ 0x01F1F9, 0x01F1EC, 0,
+ 0x01F1F9, 0x01F1ED, 0,
+ 0x01F1F9, 0x01F1EF, 0,
+ 0x01F1F9, 0x01F1F0, 0,
+ 0x01F1F9, 0x01F1F1, 0,
+ 0x01F1F9, 0x01F1F2, 0,
+ 0x01F1F9, 0x01F1F3, 0,
+ 0x01F1F9, 0x01F1F4, 0,
+ 0x01F1F9, 0x01F1F7, 0,
+ 0x01F1F9, 0x01F1F9, 0,
+ 0x01F1F9, 0x01F1FB, 0,
+ 0x01F1F9, 0x01F1FC, 0,
+ 0x01F1F9, 0x01F1FF, 0,
+ 0x01F1FA, 0x01F1E6, 0,
+ 0x01F1FA, 0x01F1EC, 0,
+ 0x01F1FA, 0x01F1F2, 0,
+ 0x01F1FA, 0x01F1F3, 0,
+ 0x01F1FA, 0x01F1F8, 0,
+ 0x01F1FA, 0x01F1FE, 0,
+ 0x01F1FA, 0x01F1FF, 0,
+ 0x01F1FB, 0x01F1E6, 0,
+ 0x01F1FB, 0x01F1E8, 0,
+ 0x01F1FB, 0x01F1EA, 0,
+ 0x01F1FB, 0x01F1EC, 0,
+ 0x01F1FB, 0x01F1EE, 0,
+ 0x01F1FB, 0x01F1F3, 0,
+ 0x01F1FB, 0x01F1FA, 0,
+ 0x01F1FC, 0x01F1EB, 0,
+ 0x01F1FC, 0x01F1F8, 0,
+ 0x01F1FD, 0x01F1F0, 0,
+ 0x01F1FE, 0x01F1EA, 0,
+ 0x01F1FE, 0x01F1F9, 0,
+ 0x01F1FF, 0x01F1E6, 0,
+ 0x01F1FF, 0x01F1F2, 0,
+ 0x01F1F0, 0x01F1FE, 0,
+ 0 // null-terminating the list
+};
+
+const uc32 UnicodePropertySequences::kEmojiTagSequences[] = {
+ 0x01F3F4, 0x0E0067, 0x0E0062, 0x0E0065, 0x0E006E, 0x0E0067, 0x0E007F, 0,
+ 0x01F3F4, 0x0E0067, 0x0E0062, 0x0E0073, 0x0E0063, 0x0E0074, 0x0E007F, 0,
+ 0x01F3F4, 0x0E0067, 0x0E0062, 0x0E0077, 0x0E006C, 0x0E0073, 0x0E007F, 0,
+ 0 // null-terminating the list
+};
+
+const uc32 UnicodePropertySequences::kEmojiZWJSequences[] = {
+ 0x0026F9, 0x00FE0F, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x0026F9, 0x00FE0F, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x0026F9, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C3, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C3, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C3, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C4, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C4, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3C4, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CA, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CA, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CA, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CB, 0x00FE0F, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CB, 0x00FE0F, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CB, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CC, 0x00FE0F, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CC, 0x00FE0F, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F3CC, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F3F3, 0x00FE0F, 0x00200D, 0x01F308, 0,
+ 0x01F3F4, 0x00200D, 0x002620, 0x00FE0F, 0,
+ 0x01F441, 0x00FE0F, 0x00200D, 0x01F5E8, 0x00FE0F, 0,
+ 0x01F468, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F468, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F468, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F468, 0x00200D, 0x002764, 0x00FE0F, 0x00200D, 0x01F468, 0,
+ 0x01F468, 0x00200D, 0x002764, 0x00FE0F, 0x00200D, 0x01F48B, 0x00200D,
+ 0x01F468, 0,
+ 0x01F468, 0x00200D, 0x01F33E, 0,
+ 0x01F468, 0x00200D, 0x01F373, 0,
+ 0x01F468, 0x00200D, 0x01F393, 0,
+ 0x01F468, 0x00200D, 0x01F3A4, 0,
+ 0x01F468, 0x00200D, 0x01F3A8, 0,
+ 0x01F468, 0x00200D, 0x01F3EB, 0,
+ 0x01F468, 0x00200D, 0x01F3ED, 0,
+ 0x01F468, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F466, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F467, 0,
+ 0x01F468, 0x00200D, 0x01F467, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F467, 0x00200D, 0x01F467, 0,
+ 0x01F468, 0x00200D, 0x01F468, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F468, 0x00200D, 0x01F466, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F468, 0x00200D, 0x01F467, 0,
+ 0x01F468, 0x00200D, 0x01F468, 0x00200D, 0x01F467, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F468, 0x00200D, 0x01F467, 0x00200D, 0x01F467, 0,
+ 0x01F468, 0x00200D, 0x01F469, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F469, 0x00200D, 0x01F466, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F469, 0x00200D, 0x01F467, 0,
+ 0x01F468, 0x00200D, 0x01F469, 0x00200D, 0x01F467, 0x00200D, 0x01F466, 0,
+ 0x01F468, 0x00200D, 0x01F469, 0x00200D, 0x01F467, 0x00200D, 0x01F467, 0,
+ 0x01F468, 0x00200D, 0x01F4BB, 0,
+ 0x01F468, 0x00200D, 0x01F4BC, 0,
+ 0x01F468, 0x00200D, 0x01F527, 0,
+ 0x01F468, 0x00200D, 0x01F52C, 0,
+ 0x01F468, 0x00200D, 0x01F680, 0,
+ 0x01F468, 0x00200D, 0x01F692, 0,
+ 0x01F468, 0x00200D, 0x01F9B0, 0,
+ 0x01F468, 0x00200D, 0x01F9B1, 0,
+ 0x01F468, 0x00200D, 0x01F9B2, 0,
+ 0x01F468, 0x00200D, 0x01F9B3, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F33E, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F373, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F393, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F3A4, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F3A8, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F3EB, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F3ED, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F4BB, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F4BC, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F527, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F52C, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F680, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F692, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F9B0, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F9B1, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F9B2, 0,
+ 0x01F468, 0x01F3FB, 0x00200D, 0x01F9B3, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F33E, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F373, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F393, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F3A4, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F3A8, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F3EB, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F3ED, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F4BB, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F4BC, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F527, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F52C, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F680, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F692, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F9B0, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F9B1, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F9B2, 0,
+ 0x01F468, 0x01F3FC, 0x00200D, 0x01F9B3, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F33E, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F373, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F393, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F3A4, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F3A8, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F3EB, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F3ED, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F4BB, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F4BC, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F527, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F52C, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F680, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F692, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F9B0, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F9B1, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F9B2, 0,
+ 0x01F468, 0x01F3FD, 0x00200D, 0x01F9B3, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F33E, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F373, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F393, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F3A4, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F3A8, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F3EB, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F3ED, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F4BB, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F4BC, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F527, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F52C, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F680, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F692, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F9B0, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F9B1, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F9B2, 0,
+ 0x01F468, 0x01F3FE, 0x00200D, 0x01F9B3, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F33E, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F373, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F393, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F3A4, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F3A8, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F3EB, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F3ED, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F4BB, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F4BC, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F527, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F52C, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F680, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F692, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F9B0, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F9B1, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F9B2, 0,
+ 0x01F468, 0x01F3FF, 0x00200D, 0x01F9B3, 0,
+ 0x01F469, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F469, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F469, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F469, 0x00200D, 0x002764, 0x00FE0F, 0x00200D, 0x01F468, 0,
+ 0x01F469, 0x00200D, 0x002764, 0x00FE0F, 0x00200D, 0x01F469, 0,
+ 0x01F469, 0x00200D, 0x002764, 0x00FE0F, 0x00200D, 0x01F48B, 0x00200D,
+ 0x01F468, 0,
+ 0x01F469, 0x00200D, 0x002764, 0x00FE0F, 0x00200D, 0x01F48B, 0x00200D,
+ 0x01F469, 0,
+ 0x01F469, 0x00200D, 0x01F33E, 0,
+ 0x01F469, 0x00200D, 0x01F373, 0,
+ 0x01F469, 0x00200D, 0x01F393, 0,
+ 0x01F469, 0x00200D, 0x01F3A4, 0,
+ 0x01F469, 0x00200D, 0x01F3A8, 0,
+ 0x01F469, 0x00200D, 0x01F3EB, 0,
+ 0x01F469, 0x00200D, 0x01F3ED, 0,
+ 0x01F469, 0x00200D, 0x01F466, 0,
+ 0x01F469, 0x00200D, 0x01F466, 0x00200D, 0x01F466, 0,
+ 0x01F469, 0x00200D, 0x01F467, 0,
+ 0x01F469, 0x00200D, 0x01F467, 0x00200D, 0x01F466, 0,
+ 0x01F469, 0x00200D, 0x01F467, 0x00200D, 0x01F467, 0,
+ 0x01F469, 0x00200D, 0x01F469, 0x00200D, 0x01F466, 0,
+ 0x01F469, 0x00200D, 0x01F469, 0x00200D, 0x01F466, 0x00200D, 0x01F466, 0,
+ 0x01F469, 0x00200D, 0x01F469, 0x00200D, 0x01F467, 0,
+ 0x01F469, 0x00200D, 0x01F469, 0x00200D, 0x01F467, 0x00200D, 0x01F466, 0,
+ 0x01F469, 0x00200D, 0x01F469, 0x00200D, 0x01F467, 0x00200D, 0x01F467, 0,
+ 0x01F469, 0x00200D, 0x01F4BB, 0,
+ 0x01F469, 0x00200D, 0x01F4BC, 0,
+ 0x01F469, 0x00200D, 0x01F527, 0,
+ 0x01F469, 0x00200D, 0x01F52C, 0,
+ 0x01F469, 0x00200D, 0x01F680, 0,
+ 0x01F469, 0x00200D, 0x01F692, 0,
+ 0x01F469, 0x00200D, 0x01F9B0, 0,
+ 0x01F469, 0x00200D, 0x01F9B1, 0,
+ 0x01F469, 0x00200D, 0x01F9B2, 0,
+ 0x01F469, 0x00200D, 0x01F9B3, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F33E, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F373, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F393, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F3A4, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F3A8, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F3EB, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F3ED, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F4BB, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F4BC, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F527, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F52C, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F680, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F692, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F9B0, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F9B1, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F9B2, 0,
+ 0x01F469, 0x01F3FB, 0x00200D, 0x01F9B3, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F33E, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F373, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F393, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F3A4, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F3A8, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F3EB, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F3ED, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F4BB, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F4BC, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F527, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F52C, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F680, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F692, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F9B0, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F9B1, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F9B2, 0,
+ 0x01F469, 0x01F3FC, 0x00200D, 0x01F9B3, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F33E, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F373, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F393, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F3A4, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F3A8, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F3EB, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F3ED, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F4BB, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F4BC, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F527, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F52C, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F680, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F692, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F9B0, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F9B1, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F9B2, 0,
+ 0x01F469, 0x01F3FD, 0x00200D, 0x01F9B3, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F33E, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F373, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F393, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F3A4, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F3A8, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F3EB, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F3ED, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F4BB, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F4BC, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F527, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F52C, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F680, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F692, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F9B0, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F9B1, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F9B2, 0,
+ 0x01F469, 0x01F3FE, 0x00200D, 0x01F9B3, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x002695, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x002696, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x002708, 0x00FE0F, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F33E, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F373, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F393, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F3A4, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F3A8, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F3EB, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F3ED, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F4BB, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F4BC, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F527, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F52C, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F680, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F692, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F9B0, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F9B1, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F9B2, 0,
+ 0x01F469, 0x01F3FF, 0x00200D, 0x01F9B3, 0,
+ 0x01F46E, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F46E, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F46E, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F46F, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F46F, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F471, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F471, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F471, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F473, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F473, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F473, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F477, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F477, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F477, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F481, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F481, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F481, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F482, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F482, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F482, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F486, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F486, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F486, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F487, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F487, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F487, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F575, 0x00FE0F, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F575, 0x00FE0F, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F575, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F645, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F645, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F645, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F646, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F646, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F646, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F647, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F647, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F647, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64B, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64B, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64B, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64D, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64D, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64D, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64E, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64E, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F64E, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6A3, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6A3, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6A3, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B4, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B4, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B4, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B5, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B5, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B5, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B6, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B6, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F6B6, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F926, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F926, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F926, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F937, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F937, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F937, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F938, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F938, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F938, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F939, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F939, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F939, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93C, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93C, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93D, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93D, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93D, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93E, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93E, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F93E, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B8, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B8, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B8, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B9, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B9, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9B9, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D6, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D6, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D6, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D7, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D7, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D7, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D8, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D8, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D8, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D9, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D9, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9D9, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DA, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DA, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DA, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DB, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DC, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FB, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FB, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FC, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FC, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FD, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FD, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DD, 0x01F3FF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DE, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DE, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0x01F9DF, 0x00200D, 0x002640, 0x00FE0F, 0,
+ 0x01F9DF, 0x00200D, 0x002642, 0x00FE0F, 0,
+ 0 // null-terminating the list
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/src/regexp/property-sequences.h b/deps/v8/src/regexp/property-sequences.h
new file mode 100644
index 0000000000..52d8a855c5
--- /dev/null
+++ b/deps/v8/src/regexp/property-sequences.h
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_PROPERTY_SEQUENCES_H_
+#define V8_REGEXP_PROPERTY_SEQUENCES_H_
+
+#ifdef V8_INTL_SUPPORT
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class UnicodePropertySequences : public AllStatic {
+ public:
+ static const uc32 kEmojiFlagSequences[];
+
+ static const uc32 kEmojiTagSequences[];
+ static const uc32 kEmojiZWJSequences[];
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTL_SUPPORT
+
+#endif // V8_REGEXP_PROPERTY_SEQUENCES_H_
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 1a94832f71..9c39dda64e 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -37,10 +37,9 @@ class RegExpCompiler;
class RegExpNode;
class RegExpTree;
-
-class RegExpVisitor BASE_EMBEDDED {
+class RegExpVisitor {
public:
- virtual ~RegExpVisitor() {}
+ virtual ~RegExpVisitor() = default;
#define MAKE_CASE(Name) \
virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
@@ -137,8 +136,7 @@ class CharacterRange {
uc32 to_;
};
-
-class CharacterSet final BASE_EMBEDDED {
+class CharacterSet final {
public:
explicit CharacterSet(uc16 standard_set_type)
: ranges_(nullptr), standard_set_type_(standard_set_type) {}
@@ -159,8 +157,7 @@ class CharacterSet final BASE_EMBEDDED {
uc16 standard_set_type_;
};
-
-class TextElement final BASE_EMBEDDED {
+class TextElement final {
public:
enum TextType { ATOM, CHAR_CLASS };
@@ -198,7 +195,7 @@ class TextElement final BASE_EMBEDDED {
class RegExpTree : public ZoneObject {
public:
static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() {}
+ virtual ~RegExpTree() = default;
virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) = 0;
@@ -580,7 +577,7 @@ class RegExpBackReference final : public RegExpTree {
class RegExpEmpty final : public RegExpTree {
public:
- RegExpEmpty() {}
+ RegExpEmpty() = default;
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpEmpty* AsEmpty() override;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index d311a09e41..945c6927b5 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -20,10 +20,7 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
-
-RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() {
-}
-
+RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() = default;
void RegExpMacroAssemblerTracer::AbortedCodeGeneration() {
PrintF(" AbortedCodeGeneration\n");
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
index 8a9ebe3683..d0b68bd59d 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -14,71 +14,62 @@ namespace internal {
class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
public:
RegExpMacroAssemblerTracer(Isolate* isolate, RegExpMacroAssembler* assembler);
- virtual ~RegExpMacroAssemblerTracer();
- virtual void AbortedCodeGeneration();
- virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
- virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
- virtual void AdvanceCurrentPosition(int by); // Signed cp change.
- virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, bool read_backward,
- Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
- Label* on_no_match);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned and_with,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 and_with,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
+ ~RegExpMacroAssemblerTracer() override;
+ void AbortedCodeGeneration() override;
+ int stack_limit_slack() override { return assembler_->stack_limit_slack(); }
+ bool CanReadUnaligned() override { return assembler_->CanReadUnaligned(); }
+ void AdvanceCurrentPosition(int by) override; // Signed cp change.
+ void AdvanceRegister(int reg, int by) override; // r[reg] += by.
+ void Backtrack() override;
+ void Bind(Label* label) override;
+ void CheckAtStart(Label* on_at_start) override;
+ void CheckCharacter(unsigned c, Label* on_equal) override;
+ void CheckCharacterAfterAnd(unsigned c, unsigned and_with,
+ Label* on_equal) override;
+ void CheckCharacterGT(uc16 limit, Label* on_greater) override;
+ void CheckCharacterLT(uc16 limit, Label* on_less) override;
+ void CheckGreedyLoop(Label* on_tos_equals_current_position) override;
+ void CheckNotAtStart(int cp_offset, Label* on_not_at_start) override;
+ void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match) override;
+ void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
+ bool unicode,
+ Label* on_no_match) override;
+ void CheckNotCharacter(unsigned c, Label* on_not_equal) override;
+ void CheckNotCharacterAfterAnd(unsigned c, unsigned and_with,
+ Label* on_not_equal) override;
+ void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 and_with,
+ Label* on_not_equal) override;
+ void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range) override;
+ void CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range) override;
+ void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set) override;
+ void CheckPosition(int cp_offset, Label* on_outside_input) override;
+ bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match) override;
+ void Fail() override;
+ Handle<HeapObject> GetCode(Handle<String> source) override;
+ void GoTo(Label* label) override;
+ void IfRegisterGE(int reg, int comparand, Label* if_ge) override;
+ void IfRegisterLT(int reg, int comparand, Label* if_lt) override;
+ void IfRegisterEqPos(int reg, Label* if_eq) override;
+ IrregexpImplementation Implementation() override;
+ void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1) override;
+ void PopCurrentPosition() override;
+ void PopRegister(int register_index) override;
+ void PushBacktrack(Label* label) override;
+ void PushCurrentPosition() override;
+ void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) override;
+ void ReadCurrentPositionFromRegister(int reg) override;
+ void ReadStackPointerFromRegister(int reg) override;
+ void SetCurrentPositionFromEnd(int by) override;
+ void SetRegister(int register_index, int to) override;
+ bool Succeed() override;
+ void WriteCurrentPositionToRegister(int reg, int cp_offset) override;
+ void ClearRegisters(int reg_from, int reg_to) override;
+ void WriteStackPointerToRegister(int reg) override;
private:
RegExpMacroAssembler* assembler_;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 77e8847d68..24bd10c616 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -23,10 +23,7 @@ RegExpMacroAssembler::RegExpMacroAssembler(Isolate* isolate, Zone* zone)
isolate_(isolate),
zone_(zone) {}
-
-RegExpMacroAssembler::~RegExpMacroAssembler() {
-}
-
+RegExpMacroAssembler::~RegExpMacroAssembler() = default;
int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
Address byte_offset2,
@@ -117,10 +114,7 @@ NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Isolate* isolate,
Zone* zone)
: RegExpMacroAssembler(isolate, zone) {}
-
-NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
-}
-
+NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() = default;
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
return FLAG_enable_regexp_unaligned_accesses && !slow_safe();
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 65da431568..e6bdd842c6 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -212,8 +212,8 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
NativeRegExpMacroAssembler(Isolate* isolate, Zone* zone);
- virtual ~NativeRegExpMacroAssembler();
- virtual bool CanReadUnaligned();
+ ~NativeRegExpMacroAssembler() override;
+ bool CanReadUnaligned() override;
static Result Match(Handle<Code> regexp,
Handle<String> subject,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index c1d2c7d5cd..797424baf8 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -12,6 +12,7 @@
#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
+#include "src/regexp/property-sequences.h"
#include "src/utils.h"
#ifdef V8_INTL_SUPPORT
@@ -344,13 +345,23 @@ RegExpTree* RegExpParser::ParseDisjunction() {
if (unicode()) {
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
- if (!ParsePropertyClass(ranges, p == 'P')) {
- return ReportError(CStrVector("Invalid property name"));
+ std::vector<char> name_1, name_2;
+ if (ParsePropertyClassName(&name_1, &name_2)) {
+ if (AddPropertyClassRange(ranges, p == 'P', name_1, name_2)) {
+ RegExpCharacterClass* cc = new (zone())
+ RegExpCharacterClass(zone(), ranges, builder->flags());
+ builder->AddCharacterClass(cc);
+ break;
+ }
+ if (p == 'p' && name_2.empty()) {
+ RegExpTree* sequence = GetPropertySequence(name_1);
+ if (sequence != nullptr) {
+ builder->AddAtom(sequence);
+ break;
+ }
+ }
}
- RegExpCharacterClass* cc = new (zone())
- RegExpCharacterClass(zone(), ranges, builder->flags());
- builder->AddCharacterClass(cc);
-
+ return ReportError(CStrVector("Invalid property name"));
} else {
builder->AddCharacter(p);
}
@@ -1346,8 +1357,10 @@ bool IsUnicodePropertyValueCharacter(char c) {
} // anonymous namespace
-bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
- bool negate) {
+bool RegExpParser::ParsePropertyClassName(std::vector<char>* name_1,
+ std::vector<char>* name_2) {
+ DCHECK(name_1->empty());
+ DCHECK(name_2->empty());
// Parse the property class as follows:
// - In \p{name}, 'name' is interpreted
// - either as a general category property value name.
@@ -1356,55 +1369,58 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
// and 'value' is interpreted as one of the available property value names.
// - Aliases in PropertyAlias.txt and PropertyValueAlias.txt can be used.
// - Loose matching is not applied.
- std::vector<char> first_part;
- std::vector<char> second_part;
if (current() == '{') {
// Parse \p{[PropertyName=]PropertyNameValue}
for (Advance(); current() != '}' && current() != '='; Advance()) {
if (!IsUnicodePropertyValueCharacter(current())) return false;
if (!has_next()) return false;
- first_part.push_back(static_cast<char>(current()));
+ name_1->push_back(static_cast<char>(current()));
}
if (current() == '=') {
for (Advance(); current() != '}'; Advance()) {
if (!IsUnicodePropertyValueCharacter(current())) return false;
if (!has_next()) return false;
- second_part.push_back(static_cast<char>(current()));
+ name_2->push_back(static_cast<char>(current()));
}
- second_part.push_back(0); // null-terminate string.
+ name_2->push_back(0); // null-terminate string.
}
} else {
return false;
}
Advance();
- first_part.push_back(0); // null-terminate string.
+ name_1->push_back(0); // null-terminate string.
- DCHECK(first_part.size() - 1 == std::strlen(first_part.data()));
- DCHECK(second_part.empty() ||
- second_part.size() - 1 == std::strlen(second_part.data()));
+ DCHECK(name_1->size() - 1 == std::strlen(name_1->data()));
+ DCHECK(name_2->empty() || name_2->size() - 1 == std::strlen(name_2->data()));
+ return true;
+}
- if (second_part.empty()) {
+bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
+ bool negate,
+ const std::vector<char>& name_1,
+ const std::vector<char>& name_2) {
+ if (name_2.empty()) {
// First attempt to interpret as general category property value name.
- const char* name = first_part.data();
+ const char* name = name_1.data();
if (LookupPropertyValueName(UCHAR_GENERAL_CATEGORY_MASK, name, negate,
- result, zone())) {
+ add_to, zone())) {
return true;
}
// Interpret "Any", "ASCII", and "Assigned".
- if (LookupSpecialPropertyValueName(name, result, negate, zone())) {
+ if (LookupSpecialPropertyValueName(name, add_to, negate, zone())) {
return true;
}
// Then attempt to interpret as binary property name with value name 'Y'.
UProperty property = u_getPropertyEnum(name);
if (!IsSupportedBinaryProperty(property)) return false;
if (!IsExactPropertyAlias(name, property)) return false;
- return LookupPropertyValueName(property, negate ? "N" : "Y", false, result,
+ return LookupPropertyValueName(property, negate ? "N" : "Y", false, add_to,
zone());
} else {
// Both property name and value name are specified. Attempt to interpret
// the property name as enumerated property.
- const char* property_name = first_part.data();
- const char* value_name = second_part.data();
+ const char* property_name = name_1.data();
+ const char* value_name = name_2.data();
UProperty property = u_getPropertyEnum(property_name);
if (!IsExactPropertyAlias(property_name, property)) return false;
if (property == UCHAR_GENERAL_CATEGORY) {
@@ -1414,18 +1430,93 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
property != UCHAR_SCRIPT_EXTENSIONS) {
return false;
}
- return LookupPropertyValueName(property, value_name, negate, result,
+ return LookupPropertyValueName(property, value_name, negate, add_to,
zone());
}
}
+RegExpTree* RegExpParser::GetPropertySequence(const std::vector<char>& name_1) {
+ if (!FLAG_harmony_regexp_sequence) return nullptr;
+ const char* name = name_1.data();
+ const uc32* sequence_list = nullptr;
+ JSRegExp::Flags flags = JSRegExp::kUnicode;
+ if (NameEquals(name, "Emoji_Flag_Sequence")) {
+ sequence_list = UnicodePropertySequences::kEmojiFlagSequences;
+ } else if (NameEquals(name, "Emoji_Tag_Sequence")) {
+ sequence_list = UnicodePropertySequences::kEmojiTagSequences;
+ } else if (NameEquals(name, "Emoji_ZWJ_Sequence")) {
+ sequence_list = UnicodePropertySequences::kEmojiZWJSequences;
+ }
+ if (sequence_list != nullptr) {
+ // TODO(yangguo): this creates huge regexp code. Alternative to this is
+ // to create a new operator that checks for these sequences at runtime.
+ RegExpBuilder builder(zone(), flags);
+ while (true) { // Iterate through list of sequences.
+ while (*sequence_list != 0) { // Iterate through sequence.
+ builder.AddUnicodeCharacter(*sequence_list);
+ sequence_list++;
+ }
+ sequence_list++;
+ if (*sequence_list == 0) break;
+ builder.NewAlternative();
+ }
+ return builder.ToRegExp();
+ }
+
+ if (NameEquals(name, "Emoji_Keycap_Sequence")) {
+ // https://unicode.org/reports/tr51/#def_emoji_keycap_sequence
+ // emoji_keycap_sequence := [0-9#*] \x{FE0F 20E3}
+ RegExpBuilder builder(zone(), flags);
+ ZoneList<CharacterRange>* prefix_ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ prefix_ranges->Add(CharacterRange::Range('0', '9'), zone());
+ prefix_ranges->Add(CharacterRange::Singleton('#'), zone());
+ prefix_ranges->Add(CharacterRange::Singleton('*'), zone());
+ builder.AddCharacterClass(
+ new (zone()) RegExpCharacterClass(zone(), prefix_ranges, flags));
+ builder.AddCharacter(0xFE0F);
+ builder.AddCharacter(0x20E3);
+ return builder.ToRegExp();
+ } else if (NameEquals(name, "Emoji_Modifier_Sequence")) {
+ // https://unicode.org/reports/tr51/#def_emoji_modifier_sequence
+ // emoji_modifier_sequence := emoji_modifier_base emoji_modifier
+ RegExpBuilder builder(zone(), flags);
+ ZoneList<CharacterRange>* modifier_base_ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ LookupPropertyValueName(UCHAR_EMOJI_MODIFIER_BASE, "Y", false,
+ modifier_base_ranges, zone());
+ builder.AddCharacterClass(
+ new (zone()) RegExpCharacterClass(zone(), modifier_base_ranges, flags));
+ ZoneList<CharacterRange>* modifier_ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ LookupPropertyValueName(UCHAR_EMOJI_MODIFIER, "Y", false, modifier_ranges,
+ zone());
+ builder.AddCharacterClass(
+ new (zone()) RegExpCharacterClass(zone(), modifier_ranges, flags));
+ return builder.ToRegExp();
+ }
+
+ return nullptr;
+}
+
#else // V8_INTL_SUPPORT
-bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
- bool negate) {
+bool RegExpParser::ParsePropertyClassName(std::vector<char>* name_1,
+ std::vector<char>* name_2) {
+ return false;
+}
+
+bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
+ bool negate,
+ const std::vector<char>& name_1,
+ const std::vector<char>& name_2) {
return false;
}
+RegExpTree* RegExpParser::GetPropertySequence(const std::vector<char>& name) {
+ return nullptr;
+}
+
#endif // V8_INTL_SUPPORT
bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
@@ -1591,7 +1682,9 @@ void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
if (unicode()) {
bool negate = Next() == 'P';
Advance(2);
- if (!ParsePropertyClass(ranges, negate)) {
+ std::vector<char> name_1, name_2;
+ if (!ParsePropertyClassName(&name_1, &name_2) ||
+ !AddPropertyClassRange(ranges, negate, name_1, name_2)) {
ReportError(CStrVector("Invalid property name in character class"));
}
*is_class_escape = true;
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 56d4ac8599..799017bb1c 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -151,8 +151,7 @@ class RegExpBuilder : public ZoneObject {
#endif
};
-
-class RegExpParser BASE_EMBEDDED {
+class RegExpParser {
public:
RegExpParser(FlatStringReader* in, Handle<String>* error,
JSRegExp::Flags flags, Isolate* isolate, Zone* zone);
@@ -177,7 +176,14 @@ class RegExpParser BASE_EMBEDDED {
bool ParseHexEscape(int length, uc32* value);
bool ParseUnicodeEscape(uc32* value);
bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
- bool ParsePropertyClass(ZoneList<CharacterRange>* result, bool negate);
+
+ bool ParsePropertyClassName(std::vector<char>* name_1,
+ std::vector<char>* name_2);
+ bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
+ const std::vector<char>& name_1,
+ const std::vector<char>& name_2);
+
+ RegExpTree* GetPropertySequence(const std::vector<char>& name_1);
RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
uc32 ParseOctalLiteral();
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 1cf2f73ac3..59d4b43397 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -19,74 +19,65 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save);
- virtual ~RegExpMacroAssemblerX64();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ ~RegExpMacroAssemblerX64() override;
+ int stack_limit_slack() override;
+ void AdvanceCurrentPosition(int by) override;
+ void AdvanceRegister(int reg, int by) override;
+ void Backtrack() override;
+ void Bind(Label* label) override;
+ void CheckAtStart(Label* on_at_start) override;
+ void CheckCharacter(uint32_t c, Label* on_equal) override;
+ void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal) override;
+ void CheckCharacterGT(uc16 limit, Label* on_greater) override;
+ void CheckCharacterLT(uc16 limit, Label* on_less) override;
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, bool read_backward,
- Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
- Label* on_no_match);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+ void CheckGreedyLoop(Label* on_tos_equals_current_position) override;
+ void CheckNotAtStart(int cp_offset, Label* on_not_at_start) override;
+ void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match) override;
+ void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
+ bool unicode,
+ Label* on_no_match) override;
+ void CheckNotCharacter(uint32_t c, Label* on_not_equal) override;
+ void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_not_equal) override;
+ void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
+ Label* on_not_equal) override;
+ void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range) override;
+ void CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range) override;
+ void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set) override;
// Checks whether the given offset from the current position is before
// the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
+ void CheckPosition(int cp_offset, Label* on_outside_input) override;
+ bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match) override;
+ void Fail() override;
+ Handle<HeapObject> GetCode(Handle<String> source) override;
+ void GoTo(Label* label) override;
+ void IfRegisterGE(int reg, int comparand, Label* if_ge) override;
+ void IfRegisterLT(int reg, int comparand, Label* if_lt) override;
+ void IfRegisterEqPos(int reg, Label* if_eq) override;
+ IrregexpImplementation Implementation() override;
+ void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1) override;
+ void PopCurrentPosition() override;
+ void PopRegister(int register_index) override;
+ void PushBacktrack(Label* label) override;
+ void PushCurrentPosition() override;
+ void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) override;
+ void ReadCurrentPositionFromRegister(int reg) override;
+ void ReadStackPointerFromRegister(int reg) override;
+ void SetCurrentPositionFromEnd(int by) override;
+ void SetRegister(int register_index, int to) override;
+ bool Succeed() override;
+ void WriteCurrentPositionToRegister(int reg, int cp_offset) override;
+ void ClearRegisters(int reg_from, int reg_to) override;
+ void WriteStackPointerToRegister(int reg) override;
static Result Match(Handle<Code> regexp,
Handle<String> subject,
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index 538c3331ec..46e2df79e9 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -144,7 +144,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
bool AreAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep, int other_index) const;
- virtual ~RegisterConfiguration() {}
+ virtual ~RegisterConfiguration() = default;
private:
const int num_general_registers_;
diff --git a/deps/v8/src/reloc-info.h b/deps/v8/src/reloc-info.h
index 53d52830a0..5f7071f845 100644
--- a/deps/v8/src/reloc-info.h
+++ b/deps/v8/src/reloc-info.h
@@ -341,7 +341,7 @@ class RelocInfo {
// RelocInfoWriter serializes a stream of relocation info. It writes towards
// lower addresses.
-class RelocInfoWriter BASE_EMBEDDED {
+class RelocInfoWriter {
public:
RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
@@ -402,7 +402,6 @@ class RelocIterator : public Malloced {
Vector<const byte> reloc_info, Address const_pool,
int mode_mask = -1);
RelocIterator(RelocIterator&&) = default;
- RelocIterator& operator=(RelocIterator&&) = default;
// Iteration
bool done() const { return done_; }
diff --git a/deps/v8/src/roots-inl.h b/deps/v8/src/roots-inl.h
index 4caa9d8f0a..fc6f86c8be 100644
--- a/deps/v8/src/roots-inl.h
+++ b/deps/v8/src/roots-inl.h
@@ -8,89 +8,74 @@
#include "src/roots.h"
#include "src/heap/heap-inl.h"
-#include "src/objects/api-callbacks.h"
namespace v8 {
-
namespace internal {
-ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate) : heap_(isolate->heap()) {}
+V8_INLINE bool operator<(RootIndex lhs, RootIndex rhs) {
+ typedef typename std::underlying_type<RootIndex>::type type;
+ return static_cast<type>(lhs) < static_cast<type>(rhs);
+}
+
+V8_INLINE RootIndex operator++(RootIndex& index) {
+ typedef typename std::underlying_type<RootIndex>::type type;
+ index = static_cast<RootIndex>(static_cast<type>(index) + 1);
+ return index;
+}
+
+ReadOnlyRoots::ReadOnlyRoots(Heap* heap) : roots_table_(heap->roots_table()) {}
-#define ROOT_ACCESSOR(type, name, camel_name) \
- type* ReadOnlyRoots::name() { \
- return type::cast(heap_->roots_[Heap::k##camel_name##RootIndex]); \
- } \
- Handle<type> ReadOnlyRoots::name##_handle() { \
- return Handle<type>( \
- bit_cast<type**>(&heap_->roots_[Heap::k##camel_name##RootIndex])); \
+ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
+ : roots_table_(isolate->heap()->roots_table()) {}
+
+#define ROOT_ACCESSOR(type, name, CamelName) \
+ type* ReadOnlyRoots::name() { \
+ return type::cast(roots_table_[RootIndex::k##CamelName]); \
+ } \
+ Handle<type> ReadOnlyRoots::name##_handle() { \
+ return Handle<type>( \
+ bit_cast<type**>(&roots_table_[RootIndex::k##CamelName])); \
}
-STRONG_READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
+
+READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define STRING_ACCESSOR(name, str) \
- String* ReadOnlyRoots::name() { \
- return String::cast(heap_->roots_[Heap::k##name##RootIndex]); \
- } \
- Handle<String> ReadOnlyRoots::name##_handle() { \
- return Handle<String>( \
- bit_cast<String**>(&heap_->roots_[Heap::k##name##RootIndex])); \
- }
-INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name) \
- Symbol* ReadOnlyRoots::name() { \
- return Symbol::cast(heap_->roots_[Heap::k##name##RootIndex]); \
- } \
- Handle<Symbol> ReadOnlyRoots::name##_handle() { \
- return Handle<Symbol>( \
- bit_cast<Symbol**>(&heap_->roots_[Heap::k##name##RootIndex])); \
- }
-PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, description) \
- Symbol* ReadOnlyRoots::name() { \
- return Symbol::cast(heap_->roots_[Heap::k##name##RootIndex]); \
- } \
- Handle<Symbol> ReadOnlyRoots::name##_handle() { \
- return Handle<Symbol>( \
- bit_cast<Symbol**>(&heap_->roots_[Heap::k##name##RootIndex])); \
- }
-PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
-WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- Map* ReadOnlyRoots::name##_map() { \
- return Map::cast(heap_->roots_[Heap::k##Name##MapRootIndex]); \
- } \
- Handle<Map> ReadOnlyRoots::name##_map_handle() { \
- return Handle<Map>( \
- bit_cast<Map**>(&heap_->roots_[Heap::k##Name##MapRootIndex])); \
- }
-STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
- Map* ReadOnlyRoots::name##_map() { \
- return Map::cast(heap_->roots_[Heap::k##Name##Size##MapRootIndex]); \
- } \
- Handle<Map> ReadOnlyRoots::name##_map_handle() { \
- return Handle<Map>( \
- bit_cast<Map**>(&heap_->roots_[Heap::k##Name##Size##MapRootIndex])); \
- }
-ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
-#undef ALLOCATION_SITE_MAP_ACCESSOR
+Map* ReadOnlyRoots::MapForFixedTypedArray(ExternalArrayType array_type) {
+ RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(array_type);
+ return Map::cast(roots_table_[root_index]);
+}
+
+Map* ReadOnlyRoots::MapForFixedTypedArray(ElementsKind elements_kind) {
+ RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(elements_kind);
+ return Map::cast(roots_table_[root_index]);
+}
FixedTypedArrayBase* ReadOnlyRoots::EmptyFixedTypedArrayForMap(const Map* map) {
- // TODO(delphick): All of these empty fixed type arrays are in RO_SPACE so
- // this the method below can be moved into ReadOnlyRoots.
- return heap_->EmptyFixedTypedArrayForMap(map);
+ RootIndex root_index =
+ RootsTable::RootIndexForEmptyFixedTypedArray(map->elements_kind());
+ return FixedTypedArrayBase::cast(roots_table_[root_index]);
}
-} // namespace internal
+Object** RootsTable::read_only_roots_end() {
+// Enumerate the read-only roots into an expression of the form:
+// (root_1, root_2, root_3, ..., root_n)
+// This evaluates to root_n, but Clang warns that the other values in the list
+// are unused so suppress that warning.
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-value"
+#endif
+#define ROOT(type, name, CamelName) , RootIndex::k##CamelName
+ constexpr RootIndex kLastReadOnlyRoot =
+ (RootIndex::kFirstRoot READ_ONLY_ROOT_LIST(ROOT));
+#undef ROOT
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+ return &roots_[static_cast<size_t>(kLastReadOnlyRoot) + 1];
+}
+} // namespace internal
} // namespace v8
#endif // V8_ROOTS_INL_H_
diff --git a/deps/v8/src/roots.cc b/deps/v8/src/roots.cc
new file mode 100644
index 0000000000..529d2ec472
--- /dev/null
+++ b/deps/v8/src/roots.cc
@@ -0,0 +1,54 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/roots.h"
+#include "src/elements-kind.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+RootIndex RootsTable::RootIndexForFixedTypedArray(
+ ExternalArrayType array_type) {
+ switch (array_type) {
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ return RootIndex::kFixed##Type##ArrayMap;
+
+ TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+ }
+ UNREACHABLE();
+}
+
+// static
+RootIndex RootsTable::RootIndexForFixedTypedArray(ElementsKind elements_kind) {
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ return RootIndex::kFixed##Type##ArrayMap;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
+ }
+}
+
+// static
+RootIndex RootsTable::RootIndexForEmptyFixedTypedArray(
+ ElementsKind elements_kind) {
+ switch (elements_kind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ return RootIndex::kEmptyFixed##Type##Array;
+
+ TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/roots.h b/deps/v8/src/roots.h
index a15cdcea2a..fc2b155604 100644
--- a/deps/v8/src/roots.h
+++ b/deps/v8/src/roots.h
@@ -5,14 +5,24 @@
#ifndef V8_ROOTS_H_
#define V8_ROOTS_H_
+#include "src/accessors.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/heap-symbols.h"
#include "src/objects-definitions.h"
namespace v8 {
-
namespace internal {
+// Forward declarations.
+enum ElementsKind : uint8_t;
+class FixedTypedArrayBase;
+class Heap;
+class Isolate;
+class Map;
+class String;
+class Symbol;
+
// Defines all the read-only roots in Heap.
#define STRONG_READ_ONLY_ROOT_LIST(V) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
@@ -64,6 +74,7 @@ namespace internal {
V(Map, module_context_map, ModuleContextMap) \
V(Map, eval_context_map, EvalContextMap) \
V(Map, script_context_map, ScriptContextMap) \
+ V(Map, await_context_map, AwaitContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
@@ -120,22 +131,23 @@ namespace internal {
V(Map, external_string_with_one_byte_data_map, \
ExternalStringWithOneByteDataMap) \
V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
- V(Map, short_external_string_map, ShortExternalStringMap) \
- V(Map, short_external_string_with_one_byte_data_map, \
- ShortExternalStringWithOneByteDataMap) \
+ V(Map, uncached_external_string_map, UncachedExternalStringMap) \
+ V(Map, uncached_external_string_with_one_byte_data_map, \
+ UncachedExternalStringWithOneByteDataMap) \
V(Map, internalized_string_map, InternalizedStringMap) \
V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
V(Map, external_internalized_string_with_one_byte_data_map, \
ExternalInternalizedStringWithOneByteDataMap) \
V(Map, external_one_byte_internalized_string_map, \
ExternalOneByteInternalizedStringMap) \
- V(Map, short_external_internalized_string_map, \
- ShortExternalInternalizedStringMap) \
- V(Map, short_external_internalized_string_with_one_byte_data_map, \
- ShortExternalInternalizedStringWithOneByteDataMap) \
- V(Map, short_external_one_byte_internalized_string_map, \
- ShortExternalOneByteInternalizedStringMap) \
- V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
+ V(Map, uncached_external_internalized_string_map, \
+ UncachedExternalInternalizedStringMap) \
+ V(Map, uncached_external_internalized_string_with_one_byte_data_map, \
+ UncachedExternalInternalizedStringWithOneByteDataMap) \
+ V(Map, uncached_external_one_byte_internalized_string_map, \
+ UncachedExternalOneByteInternalizedStringMap) \
+ V(Map, uncached_external_one_byte_string_map, \
+ UncachedExternalOneByteStringMap) \
/* Array element maps */ \
V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
@@ -222,6 +234,7 @@ namespace internal {
V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
V(Cell, promise_resolve_protector, PromiseResolveProtector) \
V(PropertyCell, promise_then_protector, PromiseThenProtector) \
+ V(PropertyCell, string_iterator_protector, StringIteratorProtector) \
/* Caches */ \
V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
@@ -235,7 +248,7 @@ namespace internal {
V(WeakArrayList, script_list, ScriptList) \
V(SimpleNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
- V(FixedArray, microtask_queue, MicrotaskQueue) \
+ V(MicrotaskQueue, default_microtask_queue, DefaultMicrotaskQueue) \
V(WeakArrayList, detached_contexts, DetachedContexts) \
V(WeakArrayList, retaining_path_targets, RetainingPathTargets) \
V(WeakArrayList, retained_maps, RetainedMaps) \
@@ -249,11 +262,6 @@ namespace internal {
V(FixedArray, serialized_objects, SerializedObjects) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
- /* DeserializeLazy handlers for lazy bytecode deserialization */ \
- V(Object, deserialize_lazy_handler, DeserializeLazyHandler) \
- V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide) \
- V(Object, deserialize_lazy_handler_extra_wide, \
- DeserializeLazyHandlerExtraWide) \
/* Hash seed */ \
V(ByteArray, hash_seed, HashSeed) \
/* JS Entries */ \
@@ -261,10 +269,6 @@ namespace internal {
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(Code, js_run_microtasks_entry_code, JsRunMicrotasksEntryCode)
-#define STRONG_ROOT_LIST(V) \
- STRONG_READ_ONLY_ROOT_LIST(V) \
- STRONG_MUTABLE_ROOT_LIST(V)
-
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
V(Smi, stack_limit, StackLimit) \
@@ -281,69 +285,158 @@ namespace internal {
ConstructStubInvokeDeoptPCOffset) \
V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
-#define MUTABLE_ROOT_LIST(V) \
- STRONG_MUTABLE_ROOT_LIST(V) \
- SMI_ROOT_LIST(V) \
- V(StringTable, string_table, StringTable)
+// Adapts one INTERNALIZED_STRING_LIST_GENERATOR entry to
+// the ROOT_LIST-compatible entry
+#define INTERNALIZED_STRING_LIST_ADAPTER(V, name, ...) V(String, name, name)
-#define ROOT_LIST(V) \
- MUTABLE_ROOT_LIST(V) \
- STRONG_READ_ONLY_ROOT_LIST(V)
+// Produces (String, name, CamelCase) entries
+#define INTERNALIZED_STRING_ROOT_LIST(V) \
+ INTERNALIZED_STRING_LIST_GENERATOR(INTERNALIZED_STRING_LIST_ADAPTER, V)
-class FixedTypedArrayBase;
-class Heap;
-class Isolate;
-class Map;
-class String;
-class Symbol;
+// Adapts one XXX_SYMBOL_LIST_GENERATOR entry to the ROOT_LIST-compatible entry
+#define SYMBOL_ROOT_LIST_ADAPTER(V, name, ...) V(Symbol, name, name)
+
+// Produces (Symbol, name, CamelCase) entries
+#define PRIVATE_SYMBOL_ROOT_LIST(V) \
+ PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_ROOT_LIST_ADAPTER, V)
+#define PUBLIC_SYMBOL_ROOT_LIST(V) \
+ PUBLIC_SYMBOL_LIST_GENERATOR(SYMBOL_ROOT_LIST_ADAPTER, V)
+#define WELL_KNOWN_SYMBOL_ROOT_LIST(V) \
+ WELL_KNOWN_SYMBOL_LIST_GENERATOR(SYMBOL_ROOT_LIST_ADAPTER, V)
+
+// Adapts one ACCESSOR_INFO_LIST_GENERATOR entry to the ROOT_LIST-compatible
+// entry
+#define ACCESSOR_INFO_ROOT_LIST_ADAPTER(V, name, CamelName, ...) \
+ V(AccessorInfo, name##_accessor, CamelName##Accessor)
+
+// Produces (AccessorInfo, name, CamelCase) entries
+#define ACCESSOR_INFO_ROOT_LIST(V) \
+ ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_INFO_ROOT_LIST_ADAPTER, V)
+
+#define READ_ONLY_ROOT_LIST(V) \
+ STRONG_READ_ONLY_ROOT_LIST(V) \
+ INTERNALIZED_STRING_ROOT_LIST(V) \
+ PRIVATE_SYMBOL_ROOT_LIST(V) \
+ PUBLIC_SYMBOL_ROOT_LIST(V) \
+ WELL_KNOWN_SYMBOL_ROOT_LIST(V) \
+ STRUCT_MAPS_LIST(V) \
+ ALLOCATION_SITE_MAPS_LIST(V) \
+ DATA_HANDLER_MAPS_LIST(V)
+
+#define MUTABLE_ROOT_LIST(V) \
+ STRONG_MUTABLE_ROOT_LIST(V) \
+ ACCESSOR_INFO_ROOT_LIST(V) \
+ V(StringTable, string_table, StringTable) \
+ SMI_ROOT_LIST(V)
+
+#define ROOT_LIST(V) \
+ READ_ONLY_ROOT_LIST(V) \
+ MUTABLE_ROOT_LIST(V)
+
+// Declare all the root indices. This defines the root list order.
+// clang-format off
+enum class RootIndex : uint16_t {
+#define DECL(type, name, CamelName) k##CamelName,
+ ROOT_LIST(DECL)
+#undef DECL
+
+ kRootListLength,
+
+ // Helper aliases for inclusive regions of root indices.
+ kFirstRoot = 0,
+ kLastRoot = kRootListLength - 1,
+
+ // kStringTable is not a strong root.
+ kFirstStrongRoot = kFirstRoot,
+ kLastStrongRoot = kStringTable - 1,
+
+ kFirstSmiRoot = kStringTable + 1,
+ kLastSmiRoot = kLastRoot
+};
+// clang-format on
+
+// Represents a storage of V8 heap roots.
+class RootsTable {
+ public:
+ static constexpr size_t kEntriesCount =
+ static_cast<size_t>(RootIndex::kRootListLength);
+
+ RootsTable() : roots_{} {}
+
+ bool IsRootHandleLocation(Object** handle_location, RootIndex* index) const {
+ if (handle_location >= &roots_[kEntriesCount]) return false;
+ if (handle_location < &roots_[0]) return false;
+ *index = static_cast<RootIndex>(handle_location - &roots_[0]);
+ return true;
+ }
+
+ template <typename T>
+ bool IsRootHandle(Handle<T> handle, RootIndex* index) const {
+ Object** handle_location = bit_cast<Object**>(handle.address());
+ return IsRootHandleLocation(handle_location, index);
+ }
+
+ Object* const& operator[](RootIndex root_index) const {
+ size_t index = static_cast<size_t>(root_index);
+ DCHECK_LT(index, kEntriesCount);
+ return roots_[index];
+ }
+
+ static RootIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
+ static RootIndex RootIndexForFixedTypedArray(ElementsKind elements_kind);
+ static RootIndex RootIndexForEmptyFixedTypedArray(ElementsKind elements_kind);
+
+ private:
+ Object** read_only_roots_begin() {
+ return &roots_[static_cast<size_t>(RootIndex::kFirstStrongRoot)];
+ }
+ inline Object** read_only_roots_end();
+
+ Object** strong_roots_begin() {
+ return &roots_[static_cast<size_t>(RootIndex::kFirstStrongRoot)];
+ }
+ Object** strong_roots_end() {
+ return &roots_[static_cast<size_t>(RootIndex::kLastStrongRoot) + 1];
+ }
+
+ Object** smi_roots_begin() {
+ return &roots_[static_cast<size_t>(RootIndex::kFirstSmiRoot)];
+ }
+ Object** smi_roots_end() {
+ return &roots_[static_cast<size_t>(RootIndex::kLastSmiRoot) + 1];
+ }
+
+ Object*& operator[](RootIndex root_index) {
+ size_t index = static_cast<size_t>(root_index);
+ DCHECK_LT(index, kEntriesCount);
+ return roots_[index];
+ }
+
+ Object* roots_[kEntriesCount];
+
+ friend class Heap;
+ friend class Factory;
+ friend class ReadOnlyRoots;
+};
class ReadOnlyRoots {
public:
- explicit ReadOnlyRoots(Heap* heap) : heap_(heap) {}
- inline explicit ReadOnlyRoots(Isolate* isolate);
+ V8_INLINE explicit ReadOnlyRoots(Heap* heap);
+ V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
+
+#define ROOT_ACCESSOR(type, name, CamelName) \
+ V8_INLINE class type* name(); \
+ V8_INLINE Handle<type> name##_handle();
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline class type* name(); \
- inline Handle<type> name##_handle();
- STRONG_READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
+ READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-#define STRING_ACCESSOR(name, str) \
- inline String* name(); \
- inline Handle<String> name##_handle();
- INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name) \
- inline Symbol* name(); \
- inline Handle<Symbol> name##_handle();
- PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, description) \
- inline Symbol* name(); \
- inline Handle<Symbol> name##_handle();
- PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-// Utility type maps.
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- inline Map* name##_map(); \
- inline class Handle<Map> name##_map_handle();
- STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define ALLOCATION_SITE_MAP_ACCESSOR(NAME, Name, Size, name) \
- inline Map* name##_map(); \
- inline class Handle<Map> name##_map_handle();
- ALLOCATION_SITE_LIST(ALLOCATION_SITE_MAP_ACCESSOR)
-#undef ALLOCATION_SITE_MAP_ACCESSOR
-
- inline FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
+ V8_INLINE Map* MapForFixedTypedArray(ExternalArrayType array_type);
+ V8_INLINE Map* MapForFixedTypedArray(ElementsKind elements_kind);
+ V8_INLINE FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
private:
- Heap* heap_;
+ const RootsTable& roots_table_;
};
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index d72159b0ac..abe3883097 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -30,6 +30,16 @@ RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
return *object;
}
+RUNTIME_FUNCTION(Runtime_TransitionElementsKindWithKind) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, elements_kind_smi, 1);
+ ElementsKind to_kind = static_cast<ElementsKind>(elements_kind_smi->value());
+ JSObject::TransitionElementsKind(object, to_kind);
+ return *object;
+}
+
namespace {
// Find the next free position. undefined and holes are both considered
// free spots. Returns "Nothing" if an exception occurred.
@@ -313,7 +323,7 @@ Maybe<bool> ConditionalCopy(Isolate* isolate, Handle<JSReceiver> source,
Handle<Object> source_element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, source_element, JSReceiver::GetElement(isolate, source, index),
+ isolate, source_element, JSReceiver::GetElement(isolate, target, index),
Nothing<bool>());
Handle<Object> set_result;
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 972e48bae6..3fd07af255 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -17,10 +17,32 @@
namespace v8 {
namespace internal {
+// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+
namespace {
#if V8_CC_GNU
+// GCC/Clang helpfully warn us that using 64-bit atomics on 32-bit platforms
+// can be slow. Good to know, but we don't have a choice.
+#ifdef V8_TARGET_ARCH_32_BIT
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpragmas"
+#pragma GCC diagnostic ignored "-Watomic-alignment"
+#endif // V8_TARGET_ARCH_32_BIT
+
+template <typename T>
+inline T LoadSeqCst(T* p) {
+ return __atomic_load_n(p, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline void StoreSeqCst(T* p, T value) {
+ __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
+}
+
template <typename T>
inline T ExchangeSeqCst(T* p, T value) {
return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
@@ -58,6 +80,10 @@ inline T XorSeqCst(T* p, T value) {
return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
}
+#ifdef V8_TARGET_ARCH_32_BIT
+#pragma GCC diagnostic pop
+#endif // V8_TARGET_ARCH_32_BIT
+
#elif V8_CC_MSVC
#define InterlockedExchange32 _InterlockedExchange
@@ -67,6 +93,7 @@ inline T XorSeqCst(T* p, T value) {
#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
#define InterlockedAnd32 _InterlockedAnd
+#define InterlockedOr64 _InterlockedOr64
#define InterlockedOr32 _InterlockedOr
#define InterlockedXor32 _InterlockedXor
@@ -107,6 +134,18 @@ ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
+ATOMIC_OPS(int64_t, 64, __int64)
+ATOMIC_OPS(uint64_t, 64, __int64)
+
+template <typename T>
+inline T LoadSeqCst(T* p) {
+ UNREACHABLE();
+}
+
+template <typename T>
+inline void StoreSeqCst(T* p, T value) {
+ UNREACHABLE();
+}
#undef ATOMIC_OPS
@@ -117,6 +156,7 @@ ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
#undef InterlockedExchangeAdd16
#undef InterlockedExchangeAdd8
#undef InterlockedAnd32
+#undef InterlockedOr64
#undef InterlockedOr32
#undef InterlockedXor32
@@ -159,6 +199,15 @@ inline int32_t FromObject<int32_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
+template <>
+inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
+ return Handle<BigInt>::cast(bigint)->AsUint64();
+}
+
+template <>
+inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
+ return Handle<BigInt>::cast(bigint)->AsInt64();
+}
inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
@@ -178,15 +227,42 @@ inline Object* ToObject(Isolate* isolate, uint32_t t) {
return *isolate->factory()->NewNumber(t);
}
-template <typename T>
-inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- T value = FromObject<T>(obj);
- T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
- return ToObject(isolate, result);
+inline Object* ToObject(Isolate* isolate, int64_t t) {
+ return *BigInt::FromInt64(isolate, t);
+}
+
+inline Object* ToObject(Isolate* isolate, uint64_t t) {
+ return *BigInt::FromUint64(isolate, t);
}
template <typename T>
+struct Load {
+ static inline Object* Do(Isolate* isolate, void* buffer, size_t index) {
+ T result = LoadSeqCst(static_cast<T*>(buffer) + index);
+ return ToObject(isolate, result);
+ }
+};
+
+template <typename T>
+struct Store {
+ static inline void Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ T value = FromObject<T>(obj);
+ StoreSeqCst(static_cast<T*>(buffer) + index, value);
+ }
+};
+
+template <typename T>
+struct Exchange {
+ static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ T value = FromObject<T>(obj);
+ T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
+ }
+};
+
+template <typename T>
inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
Handle<Object> oldobj, Handle<Object> newobj) {
T oldval = FromObject<T>(oldobj);
@@ -197,44 +273,54 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
}
template <typename T>
-inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- T value = FromObject<T>(obj);
- T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
- return ToObject(isolate, result);
-}
+struct Add {
+ static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ T value = FromObject<T>(obj);
+ T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
+ }
+};
template <typename T>
-inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- T value = FromObject<T>(obj);
- T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
- return ToObject(isolate, result);
-}
+struct Sub {
+ static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ T value = FromObject<T>(obj);
+ T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
+ }
+};
template <typename T>
-inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- T value = FromObject<T>(obj);
- T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
- return ToObject(isolate, result);
-}
+struct And {
+ static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ T value = FromObject<T>(obj);
+ T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
+ }
+};
template <typename T>
-inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- T value = FromObject<T>(obj);
- T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
- return ToObject(isolate, result);
-}
+struct Or {
+ static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ T value = FromObject<T>(obj);
+ T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
+ }
+};
template <typename T>
-inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- T value = FromObject<T>(obj);
- T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
- return ToObject(isolate, result);
-}
+struct Xor {
+ static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ T value = FromObject<T>(obj);
+ T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
+ }
+};
} // anonymous namespace
@@ -248,22 +334,44 @@ inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
V(Uint32, uint32, UINT32, uint32_t) \
V(Int32, int32, INT32, int32_t)
-RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
+// This is https://tc39.github.io/ecma262/#sec-getmodifysetvalueinbuffer
+// but also includes the ToInteger/ToBigInt conversion that's part of
+// https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
+template <template <typename> class Op>
+Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(sta->byte_offset());
+ sta->byte_offset();
+
+ if (sta->type() >= kExternalBigInt64Array) {
+ Handle<BigInt> bigint;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
+ BigInt::FromObject(isolate, value_obj));
+ // SharedArrayBuffers are not neuterable.
+ CHECK_LT(index, NumberToSize(sta->length()));
+ if (sta->type() == kExternalBigInt64Array) {
+ return Op<int64_t>::Do(isolate, source, index, bigint);
+ }
+ DCHECK(sta->type() == kExternalBigUint64Array);
+ return Op<uint64_t>::Do(isolate, source, index, bigint);
+ }
+
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToInteger(isolate, value_obj));
+ // SharedArrayBuffers are not neuterable.
+ CHECK_LT(index, NumberToSize(sta->length()));
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
case kExternal##Type##Array: \
- return DoExchange<ctype>(isolate, source, index, value);
+ return Op<ctype>::Do(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -275,81 +383,104 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
+RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(sta->byte_offset());
+ sta->byte_offset();
- switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
- case kExternal##Type##Array: \
- return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj);
-
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- default:
- break;
+ DCHECK(sta->type() == kExternalBigInt64Array ||
+ sta->type() == kExternalBigUint64Array);
+ // SharedArrayBuffers are not neuterable.
+ CHECK_LT(index, NumberToSize(sta->length()));
+ if (sta->type() == kExternalBigInt64Array) {
+ return Load<int64_t>::Do(isolate, source, index);
}
-
- UNREACHABLE();
+ DCHECK(sta->type() == kExternalBigUint64Array);
+ return Load<uint64_t>::Do(isolate, source, index);
}
-// ES #sec-atomics.add
-// Atomics.add( typedArray, index, value )
-RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
+RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(sta->byte_offset());
+ sta->byte_offset();
- switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
- case kExternal##Type##Array: \
- return DoAdd<ctype>(isolate, source, index, value);
+ Handle<BigInt> bigint;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
+ BigInt::FromObject(isolate, value_obj));
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- default:
- break;
+ DCHECK(sta->type() == kExternalBigInt64Array ||
+ sta->type() == kExternalBigUint64Array);
+ // SharedArrayBuffers are not neuterable.
+ CHECK_LT(index, NumberToSize(sta->length()));
+ if (sta->type() == kExternalBigInt64Array) {
+ Store<int64_t>::Do(isolate, source, index, bigint);
+ return *bigint;
}
+ DCHECK(sta->type() == kExternalBigUint64Array);
+ Store<uint64_t>::Do(isolate, source, index, bigint);
+ return *bigint;
+}
- UNREACHABLE();
+RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
+ return GetModifySetValueInBuffer<Exchange>(args, isolate);
}
-// ES #sec-atomics.sub
-// Atomics.sub( typedArray, index, value )
-RUNTIME_FUNCTION(Runtime_AtomicsSub) {
+RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3);
CHECK(sta->GetBuffer()->is_shared());
CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(sta->byte_offset());
+ sta->byte_offset();
+
+ if (sta->type() >= kExternalBigInt64Array) {
+ Handle<BigInt> old_bigint;
+ Handle<BigInt> new_bigint;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
+ // SharedArrayBuffers are not neuterable.
+ CHECK_LT(index, NumberToSize(sta->length()));
+ if (sta->type() == kExternalBigInt64Array) {
+ return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
+ new_bigint);
+ }
+ DCHECK(sta->type() == kExternalBigUint64Array);
+ return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
+ new_bigint);
+ }
+
+ Handle<Object> old_value;
+ Handle<Object> new_value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
+ Object::ToInteger(isolate, old_value_obj));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
+ Object::ToInteger(isolate, new_value_obj));
+ // SharedArrayBuffers are not neuterable.
+ CHECK_LT(index, NumberToSize(sta->length()));
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
- case kExternal##Type##Array: \
- return DoSub<ctype>(isolate, source, index, value);
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ return DoCompareExchange<ctype>(isolate, source, index, old_value, \
+ new_value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -361,94 +492,60 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
UNREACHABLE();
}
+// ES #sec-atomics.add
+// Atomics.add( typedArray, index, value )
+RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
+ return GetModifySetValueInBuffer<Add>(args, isolate);
+}
+
+// ES #sec-atomics.sub
+// Atomics.sub( typedArray, index, value )
+RUNTIME_FUNCTION(Runtime_AtomicsSub) {
+ return GetModifySetValueInBuffer<Sub>(args, isolate);
+}
+
// ES #sec-atomics.and
// Atomics.and( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
-
- uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(sta->byte_offset());
-
- switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
- case kExternal##Type##Array: \
- return DoAnd<ctype>(isolate, source, index, value);
-
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- default:
- break;
- }
-
- UNREACHABLE();
+ return GetModifySetValueInBuffer<And>(args, isolate);
}
// ES #sec-atomics.or
// Atomics.or( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsOr) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
+ return GetModifySetValueInBuffer<Or>(args, isolate);
+}
- uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(sta->byte_offset());
+// ES #sec-atomics.xor
+// Atomics.xor( typedArray, index, value )
+RUNTIME_FUNCTION(Runtime_AtomicsXor) {
+ return GetModifySetValueInBuffer<Xor>(args, isolate);
+}
- switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
- case kExternal##Type##Array: \
- return DoOr<ctype>(isolate, source, index, value);
+#undef INTEGER_TYPED_ARRAYS
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+#else
- default:
- break;
- }
+RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }
- UNREACHABLE();
-}
+RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }
-// ES #sec-atomics.xor
-// Atomics.xor( typedArray, index, value )
-RUNTIME_FUNCTION(Runtime_AtomicsXor) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
+RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
- uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(sta->byte_offset());
+RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }
- switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
- case kExternal##Type##Array: \
- return DoXor<ctype>(isolate, source, index, value);
+RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); }
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); }
- default:
- break;
- }
+RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); }
- UNREACHABLE();
-}
+RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }
-#undef INTEGER_TYPED_ARRAYS
+RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
+
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
+ // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index d4fb0df3c3..0aea983f41 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -731,9 +731,9 @@ MaybeHandle<Object> StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
SuperMode::kStore, name, 0),
Object);
LookupIterator it(receiver, name, holder);
- MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED),
- MaybeHandle<Object>());
+ MAYBE_RETURN(
+ Object::SetSuperProperty(&it, value, language_mode, StoreOrigin::kNamed),
+ MaybeHandle<Object>());
return value;
}
@@ -750,7 +750,7 @@ MaybeHandle<Object> StoreElementToSuper(Isolate* isolate,
Object);
LookupIterator it(isolate, receiver, index, holder);
MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
- Object::MAY_BE_STORE_FROM_KEYED),
+ StoreOrigin::kMaybeKeyed),
MaybeHandle<Object>());
return value;
}
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 6c64802963..03a24139f3 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -39,16 +39,6 @@ RUNTIME_FUNCTION(Runtime_SetShrink) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_SetIteratorClone) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
- return *isolate->factory()->NewJSSetIterator(
- handle(holder->map(), isolate),
- handle(OrderedHashSet::cast(holder->table()), isolate),
- Smi::ToInt(holder->index()));
-}
-
RUNTIME_FUNCTION(Runtime_MapShrink) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -69,25 +59,6 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
- return *isolate->factory()->NewJSMapIterator(
- handle(holder->map(), isolate),
- handle(OrderedHashMap::cast(holder->table()), isolate),
- Smi::ToInt(holder->index()));
-}
-
-RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
- CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
- CHECK_GE(max_entries, 0);
- return *JSWeakCollection::GetEntries(holder, max_entries);
-}
-
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -110,15 +81,6 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
return isolate->heap()->ToBoolean(was_present);
}
-RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
- CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
- CHECK_GE(max_values, 0);
- return *JSWeakCollection::GetEntries(holder, max_values);
-}
-
RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index e459da4da3..102f89ac14 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -14,13 +14,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_IsDate) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSDate());
-}
-
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index c1dc4ec9df..4381fa6dcf 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -450,8 +450,8 @@ RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
RUNTIME_FUNCTION(Runtime_CollectGarbage) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask,
- GarbageCollectionReason::kRuntime);
+ isolate->heap()->PreciseCollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kRuntime);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -641,11 +641,6 @@ RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugIsActive) {
- SealHandleScope shs(isolate);
- return Smi::FromInt(isolate->debug()->is_active());
-}
-
namespace {
Handle<JSObject> MakeRangeObject(Isolate* isolate, const CoverageBlock& range) {
Factory* factory = isolate->factory();
@@ -810,5 +805,19 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
}
return ReadOnlyRoots(isolate).undefined_value();
}
+
+RUNTIME_FUNCTION(Runtime_PerformSideEffectCheckForObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+
+ DCHECK_EQ(isolate->debug_execution_mode(), DebugInfo::kSideEffects);
+ if (!isolate->debug()->PerformSideEffectCheckForObject(object)) {
+ DCHECK(isolate->has_pending_exception());
+ return ReadOnlyRoots(isolate).exception();
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 22f4a4fb48..769ccc528b 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -12,20 +12,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_FunctionGetName) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
- if (function->IsJSBoundFunction()) {
- RETURN_RESULT_OR_FAILURE(
- isolate, JSBoundFunction::GetName(
- isolate, Handle<JSBoundFunction>::cast(function)));
- } else {
- return *JSFunction::GetName(isolate, Handle<JSFunction>::cast(function));
- }
-}
-
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_FunctionGetScriptSource) {
HandleScope scope(isolate);
@@ -87,64 +73,6 @@ RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
}
-RUNTIME_FUNCTION(Runtime_SetCode) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, source, 1);
-
- Handle<SharedFunctionInfo> target_shared(target->shared(), isolate);
- Handle<SharedFunctionInfo> source_shared(source->shared(), isolate);
-
- if (!source->is_compiled() &&
- !Compiler::Compile(source, Compiler::KEEP_EXCEPTION)) {
- return ReadOnlyRoots(isolate).exception();
- }
-
- // Set the function data, scope info, formal parameter count, and the length
- // of the target shared function info.
- target_shared->set_function_data(source_shared->function_data());
- target_shared->set_length(source_shared->GetLength());
- target_shared->set_raw_outer_scope_info_or_feedback_metadata(
- source_shared->raw_outer_scope_info_or_feedback_metadata());
- target_shared->set_internal_formal_parameter_count(
- source_shared->internal_formal_parameter_count());
- bool was_native = target_shared->native();
- target_shared->set_flags(source_shared->flags());
- target_shared->set_native(was_native);
- target_shared->set_scope_info(source_shared->scope_info());
-
- Handle<Object> source_script(source_shared->script(), isolate);
- int function_literal_id = source_shared->FunctionLiteralId(isolate);
- if (source_script->IsScript()) {
- SharedFunctionInfo::SetScript(source_shared,
- isolate->factory()->undefined_value(),
- function_literal_id);
- }
- SharedFunctionInfo::SetScript(target_shared, source_script,
- function_literal_id);
-
- // Set the code of the target function.
- target->set_code(source_shared->GetCode());
- Handle<Context> context(source->context(), isolate);
- target->set_context(*context);
-
- // Make sure we get a fresh copy of the feedback vector to avoid cross
- // context contamination, and that the feedback vector makes it's way into
- // the target_shared optimized code map.
- JSFunction::EnsureFeedbackVector(target);
-
- if (isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling()) {
- isolate->logger()->LogExistingFunction(
- source_shared, handle(source_shared->abstract_code(), isolate));
- }
-
- return *target;
-}
-
-
// Set the native flag on the function.
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
@@ -162,14 +90,6 @@ RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
}
-RUNTIME_FUNCTION(Runtime_IsConstructor) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, object, 0);
- return isolate->heap()->ToBoolean(object->IsConstructor());
-}
-
-
RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index 3c9a90fbbd..c891b6582c 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -23,12 +23,13 @@ RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CHECK(!sta->WasNeutered());
CHECK(sta->GetBuffer()->is_shared());
CHECK_LT(index, NumberToSize(sta->length()));
CHECK_EQ(sta->type(), kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = (index << 2) + NumberToSize(sta->byte_offset());
+ size_t addr = (index << 2) + sta->byte_offset();
return FutexEmulation::NumWaitersForTesting(array_buffer, addr);
}
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 636aa63879..9d652599c1 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -53,12 +53,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
return generator->function();
}
-RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
- // Runtime call is implemented in InterpreterIntrinsics and lowered in
- // JSIntrinsicLowering
- UNREACHABLE();
-}
-
RUNTIME_FUNCTION(Runtime_AsyncGeneratorResolve) {
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index c98b27da27..8c227a1703 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -19,6 +19,7 @@
#include "src/parsing/parsing.h"
#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
+#include "src/string-builder-inl.h"
namespace v8 {
namespace internal {
@@ -343,6 +344,31 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
return false;
}
+Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
+ IncrementalStringBuilder builder(isolate);
+
+ builder.AppendString(Object::TypeOf(isolate, object));
+ if (object->IsString()) {
+ builder.AppendCString(" \"");
+ builder.AppendString(Handle<String>::cast(object));
+ builder.AppendCString("\"");
+ } else if (object->IsNull(isolate)) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->null_string());
+ } else if (object->IsTrue(isolate)) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->true_string());
+ } else if (object->IsFalse(isolate)) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->false_string());
+ } else if (object->IsNumber()) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->NumberToString(object));
+ }
+
+ return builder.Finish().ToHandleChecked();
+}
+
Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
CallPrinter::ErrorHint* hint) {
MessageLocation location;
@@ -358,7 +384,7 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
isolate->clear_pending_exception();
}
}
- return Object::TypeOf(isolate, object);
+ return BuildDefaultCallSite(isolate, object);
}
MessageTemplate::Template UpdateErrorTemplate(
@@ -388,11 +414,11 @@ MaybeHandle<Object> Runtime::ThrowIteratorError(Isolate* isolate,
Handle<Object> object) {
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, object, &hint);
- MessageTemplate::Template id = MessageTemplate::kNonObjectPropertyLoad;
+ MessageTemplate::Template id = MessageTemplate::kNotIterableNoSymbolLoad;
if (hint == CallPrinter::kNone) {
Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
- THROW_NEW_ERROR(isolate, NewTypeError(id, iterator_symbol, callsite),
+ THROW_NEW_ERROR(isolate, NewTypeError(id, callsite, iterator_symbol),
Object);
}
@@ -400,6 +426,14 @@ MaybeHandle<Object> Runtime::ThrowIteratorError(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(id, callsite), Object);
}
+RUNTIME_FUNCTION(Runtime_ThrowIteratorError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Runtime::ThrowIteratorError(isolate, object));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -476,6 +510,12 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
HandleScope scope(isolate);
+
+ // Append any worker thread runtime call stats to the main table before
+ // printing.
+ isolate->counters()->worker_thread_runtime_call_stats()->AddToMainTable(
+ isolate->counters()->runtime_call_stats());
+
if (args.length() == 0) {
// Without arguments, the result is returned as a string.
DCHECK_EQ(0, args.length());
@@ -591,5 +631,14 @@ RUNTIME_FUNCTION(Runtime_ReportMessage) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_GetInitializerFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, constructor, 0);
+ Handle<Symbol> key = isolate->factory()->class_fields_symbol();
+ Handle<Object> initializer = JSReceiver::GetDataProperty(constructor, key);
+ return *initializer;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 7f07d084a1..e87feac361 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -23,7 +23,6 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_InterpreterDeserializeLazy) {
HandleScope scope(isolate);
- DCHECK(FLAG_lazy_handler_deserialization);
DCHECK(FLAG_lazy_deserialization);
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(bytecode_int, 0);
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index ad75952824..32e7a46b6e 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -22,8 +22,10 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collator-inl.h"
+#include "src/objects/js-date-time-format-inl.h"
#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-list-format.h"
+#include "src/objects/js-number-format-inl.h"
#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/managed.h"
#include "src/runtime/runtime-utils.h"
@@ -42,7 +44,6 @@
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
#include "unicode/plurrule.h"
-#include "unicode/rbbi.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
#include "unicode/uchar.h"
@@ -77,40 +78,6 @@ RUNTIME_FUNCTION(Runtime_FormatListToParts) {
isolate, JSListFormat::FormatListToParts(isolate, list_format, list));
}
-RUNTIME_FUNCTION(Runtime_GetNumberOption) {
- HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, options, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, property, 1);
- CONVERT_SMI_ARG_CHECKED(min, 2);
- CONVERT_SMI_ARG_CHECKED(max, 3);
- CONVERT_SMI_ARG_CHECKED(fallback, 4);
-
- Maybe<int> num =
- Intl::GetNumberOption(isolate, options, property, min, max, fallback);
- if (num.IsNothing()) {
- return ReadOnlyRoots(isolate).exception();
- }
- return Smi::FromInt(num.FromJust());
-}
-
-RUNTIME_FUNCTION(Runtime_DefaultNumberOption) {
- HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- CONVERT_SMI_ARG_CHECKED(min, 1);
- CONVERT_SMI_ARG_CHECKED(max, 2);
- CONVERT_SMI_ARG_CHECKED(fallback, 3);
- CONVERT_ARG_HANDLE_CHECKED(String, property, 4);
-
- Maybe<int> num =
- Intl::DefaultNumberOption(isolate, value, min, max, fallback, property);
- if (num.IsNothing()) {
- return ReadOnlyRoots(isolate).exception();
- }
- return Smi::FromInt(num.FromJust());
-}
-
// ECMA 402 6.2.3
RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
HandleScope scope(isolate);
@@ -143,340 +110,6 @@ RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
Intl::DefaultLocale(isolate).c_str());
}
-RUNTIME_FUNCTION(Runtime_IsWellFormedCurrencyCode) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, currency, 0);
- return *(isolate->factory()->ToBoolean(
- Intl::IsWellFormedCurrencyCode(isolate, currency)));
-}
-
-RUNTIME_FUNCTION(Runtime_DefineWEProperty) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Intl::DefineWEProperty(isolate, target, key, value);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- CONVERT_SMI_ARG_CHECKED(expected_type_int, 1);
-
- Intl::Type expected_type = Intl::TypeFromInt(expected_type_int);
-
- return isolate->heap()->ToBoolean(
- Intl::IsObjectOfType(isolate, input, expected_type));
-}
-
-RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, type, 1);
-
-#ifdef DEBUG
- // TypeFromSmi does correctness checks.
- Intl::Type type_intl = Intl::TypeFromSmi(*type);
- USE(type_intl);
-#endif
-
- Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- JSObject::SetProperty(isolate, input, marker, type, LanguageMode::kStrict)
- .Assert();
-
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_date_time_format_function(), isolate);
-
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
- JSObject::New(constructor, constructor));
-
- // Set date time formatter as embedder field of the resulting JS object.
- icu::SimpleDateFormat* date_format =
- DateFormat::InitializeDateTimeFormat(isolate, locale, options, resolved);
- CHECK_NOT_NULL(date_format);
-
- local_object->SetEmbedderField(DateFormat::kSimpleDateFormatIndex,
- reinterpret_cast<Smi*>(date_format));
-
- // Make object handle weak so we can delete the data format once GC kicks in.
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
- DateFormat::DeleteDateFormat,
- WeakCallbackType::kInternalFields);
- return *local_object;
-}
-
-RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
- RETURN_RESULT_OR_FAILURE(
- isolate, Intl::CreateNumberFormat(isolate, locale, options, resolved));
-}
-
-RUNTIME_FUNCTION(Runtime_CurrencyDigits) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, currency, 0);
- return *Intl::CurrencyDigits(isolate, currency);
-}
-
-RUNTIME_FUNCTION(Runtime_CollatorResolvedOptions) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, collator_obj, 0);
-
- // 3. If pr does not have an [[InitializedCollator]] internal
- // slot, throw a TypeError exception.
- if (!collator_obj->IsJSCollator()) {
- Handle<String> method_str = isolate->factory()->NewStringFromStaticChars(
- "Intl.Collator.prototype.resolvedOptions");
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- method_str, collator_obj));
- }
-
- Handle<JSCollator> collator = Handle<JSCollator>::cast(collator_obj);
-
- return *JSCollator::ResolvedOptions(isolate, collator);
-}
-
-RUNTIME_FUNCTION(Runtime_PluralRulesResolvedOptions) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, plural_rules_obj, 0);
-
- // 3. If pr does not have an [[InitializedPluralRules]] internal
- // slot, throw a TypeError exception.
- if (!plural_rules_obj->IsJSPluralRules()) {
- Handle<String> method_str = isolate->factory()->NewStringFromStaticChars(
- "Intl.PluralRules.prototype.resolvedOptions");
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- method_str, plural_rules_obj));
- }
-
- Handle<JSPluralRules> plural_rules =
- Handle<JSPluralRules>::cast(plural_rules_obj);
-
- return *JSPluralRules::ResolvedOptions(isolate, plural_rules);
-}
-
-RUNTIME_FUNCTION(Runtime_ParseExtension) {
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, extension, 0);
- std::map<std::string, std::string> map;
- Intl::ParseExtension(isolate, std::string(extension->ToCString().get()), map);
- Handle<JSObject> extension_map =
- isolate->factory()->NewJSObjectWithNullProto();
- for (std::map<std::string, std::string>::iterator it = map.begin();
- it != map.end(); it++) {
- JSObject::AddProperty(
- isolate, extension_map,
- factory->NewStringFromAsciiChecked(it->first.c_str()),
- factory->NewStringFromAsciiChecked(it->second.c_str()), NONE);
- }
- return *extension_map;
-}
-
-RUNTIME_FUNCTION(Runtime_PluralRulesSelect) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, plural_rules_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
-
- // 3. If pr does not have an [[InitializedPluralRules]] internal
- // slot, throw a TypeError exception.
- if (!plural_rules_obj->IsJSPluralRules()) {
- Handle<String> method_str = isolate->factory()->NewStringFromStaticChars(
- "Intl.PluralRules.prototype.select");
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- method_str, plural_rules_obj));
- }
-
- Handle<JSPluralRules> plural_rules =
- Handle<JSPluralRules>::cast(plural_rules_obj);
-
- // 4. Return ? ResolvePlural(pr, n).
-
- RETURN_RESULT_OR_FAILURE(
- isolate, JSPluralRules::ResolvePlural(isolate, plural_rules, number));
-}
-
-RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_v8_break_iterator_function(), isolate);
-
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
- JSObject::New(constructor, constructor));
-
- // Set break iterator as embedder field of the resulting JS object.
- icu::BreakIterator* break_iterator = V8BreakIterator::InitializeBreakIterator(
- isolate, locale, options, resolved);
- CHECK_NOT_NULL(break_iterator);
-
- if (!break_iterator) return isolate->ThrowIllegalOperation();
-
- local_object->SetEmbedderField(V8BreakIterator::kBreakIteratorIndex,
- reinterpret_cast<Smi*>(break_iterator));
- // Make sure that the pointer to adopted text is nullptr.
- local_object->SetEmbedderField(V8BreakIterator::kUnicodeStringIndex,
- static_cast<Smi*>(nullptr));
-
- // Make object handle weak so we can delete the break iterator once GC kicks
- // in.
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
- V8BreakIterator::DeleteBreakIterator,
- WeakCallbackType::kInternalFields);
- return *local_object;
-}
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
- CHECK_NOT_NULL(break_iterator);
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->first());
-}
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
- CHECK_NOT_NULL(break_iterator);
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->next());
-}
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
- CHECK_NOT_NULL(break_iterator);
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->current());
-}
-
-RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
-
- icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
- CHECK_NOT_NULL(break_iterator);
-
- // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
- icu::RuleBasedBreakIterator* rule_based_iterator =
- static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
- int32_t status = rule_based_iterator->getRuleStatus();
- // Keep return values in sync with JavaScript BreakType enum.
- if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("none");
- } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return ReadOnlyRoots(isolate).number_string();
- } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("letter");
- } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("kana");
- } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("ideo");
- } else {
- return *isolate->factory()->NewStringFromStaticChars("unknown");
- }
-}
-
-RUNTIME_FUNCTION(Runtime_ToLocaleDateTime) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(6, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(Object, date, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, locales, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, options, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, required, 3);
- CONVERT_ARG_HANDLE_CHECKED(String, defaults, 4);
- CONVERT_ARG_HANDLE_CHECKED(String, service, 5);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, DateFormat::ToLocaleDateTime(
- isolate, date, locales, options, required->ToCString().get(),
- defaults->ToCString().get(), service->ToCString().get()));
-}
-
-RUNTIME_FUNCTION(Runtime_ToDateTimeOptions) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 3);
- CONVERT_ARG_HANDLE_CHECKED(Object, options, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, required, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, defaults, 2);
- RETURN_RESULT_OR_FAILURE(
- isolate, DateFormat::ToDateTimeOptions(isolate, options,
- required->ToCString().get(),
- defaults->ToCString().get()));
-}
-
RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
@@ -511,33 +144,5 @@ RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
return date_cache_version->get(0);
}
-RUNTIME_FUNCTION(Runtime_IntlUnwrapReceiver) {
- HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_SMI_ARG_CHECKED(type_int, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, method, 3);
- CONVERT_BOOLEAN_ARG_CHECKED(check_legacy_constructor, 4);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, Intl::UnwrapReceiver(isolate, receiver, constructor,
- Intl::TypeFromInt(type_int), method,
- check_legacy_constructor));
-}
-
-RUNTIME_FUNCTION(Runtime_SupportedLocalesOf) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(args.length(), 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, locales, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, options, 2);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, service, locales, options));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index d5111f7efa..8632388388 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/allocation-site-scopes.h"
+#include "src/allocation-site-scopes-inl.h"
#include "src/arguments-inl.h"
#include "src/ast/ast.h"
#include "src/isolate-inl.h"
@@ -497,7 +497,8 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
Handle<HeapObject> description, int flags) {
FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
CHECK(literals_slot.ToInt() < vector->length());
- Handle<Object> literal_site(vector->Get(literals_slot)->ToObject(), isolate);
+ Handle<Object> literal_site(vector->Get(literals_slot)->cast<Object>(),
+ isolate);
DeepCopyHints copy_hints = DecodeCopyHints(flags);
Handle<AllocationSite> site;
@@ -597,7 +598,8 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
// Check if boilerplate exists. If not, create it first.
- Handle<Object> literal_site(vector->Get(literal_slot)->ToObject(), isolate);
+ Handle<Object> literal_site(vector->Get(literal_slot)->cast<Object>(),
+ isolate);
Handle<Object> boilerplate;
if (!HasBoilerplate(literal_site)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
deleted file mode 100644
index 7695c14657..0000000000
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
-#include "src/counters.h"
-#include "src/double.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
-
- Handle<Context> native_context = isolate->native_context();
- DCHECK_EQ(0, native_context->math_random_index()->value());
-
- static const int kCacheSize = 64;
- static const int kState0Offset = kCacheSize - 1;
- static const int kState1Offset = kState0Offset - 1;
- // The index is decremented before used to access the cache.
- static const int kInitialIndex = kState1Offset;
-
- Handle<FixedDoubleArray> cache;
- uint64_t state0 = 0;
- uint64_t state1 = 0;
- if (native_context->math_random_cache()->IsFixedDoubleArray()) {
- cache = Handle<FixedDoubleArray>(
- FixedDoubleArray::cast(native_context->math_random_cache()), isolate);
- state0 = double_to_uint64(cache->get_scalar(kState0Offset));
- state1 = double_to_uint64(cache->get_scalar(kState1Offset));
- } else {
- cache = Handle<FixedDoubleArray>::cast(
- isolate->factory()->NewFixedDoubleArray(kCacheSize, TENURED));
- native_context->set_math_random_cache(*cache);
- // Initialize state if not yet initialized. If a fixed random seed was
- // requested, use it to reset our state the first time a script asks for
- // random numbers in this context. This ensures the script sees a consistent
- // sequence.
- if (FLAG_random_seed != 0) {
- state0 = FLAG_random_seed;
- state1 = FLAG_random_seed;
- } else {
- while (state0 == 0 || state1 == 0) {
- isolate->random_number_generator()->NextBytes(&state0, sizeof(state0));
- isolate->random_number_generator()->NextBytes(&state1, sizeof(state1));
- }
- }
- }
-
- DisallowHeapAllocation no_gc;
- FixedDoubleArray* raw_cache = *cache;
- // Create random numbers.
- for (int i = 0; i < kInitialIndex; i++) {
- // Generate random numbers using xorshift128+.
- base::RandomNumberGenerator::XorShift128(&state0, &state1);
- raw_cache->set(i, base::RandomNumberGenerator::ToDouble(state0, state1));
- }
-
- // Persist current state.
- raw_cache->set(kState0Offset, uint64_to_double(state0));
- raw_cache->set(kState1Offset, uint64_to_double(state1));
- return Smi::FromInt(kInitialIndex);
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 14b91c8f1b..a8f62099a4 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -82,80 +82,16 @@ RUNTIME_FUNCTION(Runtime_NumberToString) {
// -1 if x < y
// 0 if x == y
// 1 if x > y
+// TODO(szuend): Remove once the call-site in src/js/array.js is gone.
RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(x_value, 0);
- CONVERT_SMI_ARG_CHECKED(y_value, 1);
-
- // If the integers are equal so are the string representations.
- if (x_value == y_value) return Smi::FromInt(0);
-
- // If one of the integers is zero the normal integer order is the
- // same as the lexicographic order of the string representations.
- if (x_value == 0 || y_value == 0)
- return Smi::FromInt(x_value < y_value ? -1 : 1);
-
- // If only one of the integers is negative the negative number is
- // smallest because the char code of '-' is less than the char code
- // of any digit. Otherwise, we make both values positive.
-
- // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
- // architectures using 32-bit Smis.
- uint32_t x_scaled = x_value;
- uint32_t y_scaled = y_value;
- if (x_value < 0 || y_value < 0) {
- if (y_value >= 0) return Smi::FromInt(-1);
- if (x_value >= 0) return Smi::FromInt(1);
- x_scaled = -x_value;
- y_scaled = -y_value;
- }
-
- static const uint32_t kPowersOf10[] = {
- 1, 10, 100, 1000,
- 10 * 1000, 100 * 1000, 1000 * 1000, 10 * 1000 * 1000,
- 100 * 1000 * 1000, 1000 * 1000 * 1000};
-
- // If the integers have the same number of decimal digits they can be
- // compared directly as the numeric order is the same as the
- // lexicographic order. If one integer has fewer digits, it is scaled
- // by some power of 10 to have the same number of digits as the longer
- // integer. If the scaled integers are equal it means the shorter
- // integer comes first in the lexicographic order.
-
- // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
- int x_log2 = 31 - base::bits::CountLeadingZeros(x_scaled);
- int x_log10 = ((x_log2 + 1) * 1233) >> 12;
- x_log10 -= x_scaled < kPowersOf10[x_log10];
-
- int y_log2 = 31 - base::bits::CountLeadingZeros(y_scaled);
- int y_log10 = ((y_log2 + 1) * 1233) >> 12;
- y_log10 -= y_scaled < kPowersOf10[y_log10];
-
- int tie = 0;
-
- if (x_log10 < y_log10) {
- // X has fewer digits. We would like to simply scale up X but that
- // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
- // be scaled up to 9_000_000_000. So we scale up by the next
- // smallest power and scale down Y to drop one digit. It is OK to
- // drop one digit from the longer integer since the final digit is
- // past the length of the shorter integer.
- x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
- y_scaled /= 10;
- tie = -1;
- } else if (y_log10 < x_log10) {
- y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
- x_scaled /= 10;
- tie = 1;
- }
+ CONVERT_ARG_CHECKED(Smi, x_value, 0);
+ CONVERT_ARG_CHECKED(Smi, y_value, 1);
- if (x_scaled < y_scaled) return Smi::FromInt(-1);
- if (x_scaled > y_scaled) return Smi::FromInt(1);
- return Smi::FromInt(tie);
+ return Smi::LexicographicCompare(isolate, x_value, y_value);
}
-
RUNTIME_FUNCTION(Runtime_MaxSmi) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -184,6 +120,5 @@ RUNTIME_FUNCTION(Runtime_GetHoleNaNLower) {
return *isolate->factory()->NewNumberFromUint(kHoleNanLower32);
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 81478b0e1b..3778e0576c 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -49,95 +49,6 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
return result;
}
-static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
- Handle<Object> receiver_obj,
- Handle<Object> key_obj) {
- // Fast cases for getting named properties of the receiver JSObject
- // itself.
- //
- // The global proxy objects has to be excluded since LookupOwn on
- // the global proxy object can return a valid result even though the
- // global proxy object never has properties. This is the case
- // because the global proxy object forwards everything to its hidden
- // prototype including own lookups.
- //
- // Additionally, we need to make sure that we do not cache results
- // for objects that require access checks.
-
- // Convert string-index keys to their number variant to avoid internalization
- // below; and speed up subsequent conversion to index.
- uint32_t index;
- if (key_obj->IsString() && String::cast(*key_obj)->AsArrayIndex(&index)) {
- key_obj = isolate->factory()->NewNumberFromUint(index);
- }
- if (receiver_obj->IsJSObject()) {
- if (!receiver_obj->IsJSGlobalProxy() &&
- !receiver_obj->IsAccessCheckNeeded() && key_obj->IsName()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
- Handle<Name> key = Handle<Name>::cast(key_obj);
- key_obj = key = isolate->factory()->InternalizeName(key);
-
- DisallowHeapAllocation no_allocation;
- if (receiver->IsJSGlobalObject()) {
- // Attempt dictionary lookup.
- GlobalDictionary* dictionary =
- JSGlobalObject::cast(*receiver)->global_dictionary();
- int entry = dictionary->FindEntry(isolate, key);
- if (entry != GlobalDictionary::kNotFound) {
- PropertyCell* cell = dictionary->CellAt(entry);
- if (cell->property_details().kind() == kData) {
- Object* value = cell->value();
- if (!value->IsTheHole(isolate)) {
- return Handle<Object>(value, isolate);
- }
- // If value is the hole (meaning, absent) do the general lookup.
- }
- }
- } else if (!receiver->HasFastProperties()) {
- // Attempt dictionary lookup.
- NameDictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(isolate, key);
- if ((entry != NameDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).kind() == kData)) {
- Object* value = dictionary->ValueAt(entry);
- return Handle<Object>(value, isolate);
- }
- }
- } else if (key_obj->IsSmi()) {
- // JSObject without a name key. If the key is a Smi, check for a
- // definite out-of-bounds access to elements, which is a strong indicator
- // that subsequent accesses will also call the runtime. Proactively
- // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
- // doubles for those future calls in the case that the elements would
- // become PACKED_DOUBLE_ELEMENTS.
- Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj);
- ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsDoubleElementsKind(elements_kind)) {
- if (Smi::ToInt(*key_obj) >= js_object->elements()->length()) {
- elements_kind = IsHoleyElementsKind(elements_kind) ? HOLEY_ELEMENTS
- : PACKED_ELEMENTS;
- JSObject::TransitionElementsKind(js_object, elements_kind);
- }
- } else {
- DCHECK(IsSmiOrObjectElementsKind(elements_kind) ||
- !IsFastElementsKind(elements_kind));
- }
- }
- } else if (receiver_obj->IsString() && key_obj->IsSmi()) {
- // Fast case for string indexing using [] with a smi index.
- Handle<String> str = Handle<String>::cast(receiver_obj);
- int index = Handle<Smi>::cast(key_obj)->value();
- if (index >= 0 && index < str->length()) {
- Factory* factory = isolate->factory();
- return factory->LookupSingleCharacterStringFromCode(
- String::Flatten(isolate, str)->Get(index));
- }
- }
-
- // Fall back to GetObjectProperty.
- return Runtime::GetObjectProperty(isolate, receiver_obj, key_obj);
-}
-
namespace {
bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
@@ -431,7 +342,8 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
- LanguageMode language_mode) {
+ LanguageMode language_mode,
+ StoreOrigin store_origin) {
if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR(
isolate,
@@ -453,17 +365,10 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Object);
}
- MAYBE_RETURN_NULL(Object::SetProperty(&it, value, language_mode,
- Object::MAY_BE_STORE_FROM_KEYED));
- return value;
-}
-
+ MAYBE_RETURN_NULL(
+ Object::SetProperty(&it, value, language_mode, store_origin));
-RUNTIME_FUNCTION(Runtime_GetPrototype) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
- RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
+ return value;
}
@@ -561,24 +466,91 @@ RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
-
- RETURN_RESULT_OR_FAILURE(isolate,
- Runtime::GetObjectProperty(isolate, object, key));
-}
+ // Fast cases for getting named properties of the receiver JSObject
+ // itself.
+ //
+ // The global proxy objects has to be excluded since LookupOwn on
+ // the global proxy object can return a valid result even though the
+ // global proxy object never has properties. This is the case
+ // because the global proxy object forwards everything to its hidden
+ // prototype including own lookups.
+ //
+ // Additionally, we need to make sure that we do not cache results
+ // for objects that require access checks.
-// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
-RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ // Convert string-index keys to their number variant to avoid internalization
+ // below; and speed up subsequent conversion to index.
+ uint32_t index;
+ if (key_obj->IsString() && String::cast(*key_obj)->AsArrayIndex(&index)) {
+ key_obj = isolate->factory()->NewNumberFromUint(index);
+ }
+ if (receiver_obj->IsJSObject()) {
+ if (!receiver_obj->IsJSGlobalProxy() &&
+ !receiver_obj->IsAccessCheckNeeded() && key_obj->IsName()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
+ Handle<Name> key = Handle<Name>::cast(key_obj);
+ key_obj = key = isolate->factory()->InternalizeName(key);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
+ DisallowHeapAllocation no_allocation;
+ if (receiver->IsJSGlobalObject()) {
+ // Attempt dictionary lookup.
+ GlobalDictionary* dictionary =
+ JSGlobalObject::cast(*receiver)->global_dictionary();
+ int entry = dictionary->FindEntry(isolate, key);
+ if (entry != GlobalDictionary::kNotFound) {
+ PropertyCell* cell = dictionary->CellAt(entry);
+ if (cell->property_details().kind() == kData) {
+ Object* value = cell->value();
+ if (!value->IsTheHole(isolate)) return value;
+ // If value is the hole (meaning, absent) do the general lookup.
+ }
+ }
+ } else if (!receiver->HasFastProperties()) {
+ // Attempt dictionary lookup.
+ NameDictionary* dictionary = receiver->property_dictionary();
+ int entry = dictionary->FindEntry(isolate, key);
+ if ((entry != NameDictionary::kNotFound) &&
+ (dictionary->DetailsAt(entry).kind() == kData)) {
+ return dictionary->ValueAt(entry);
+ }
+ }
+ } else if (key_obj->IsSmi()) {
+ // JSObject without a name key. If the key is a Smi, check for a
+ // definite out-of-bounds access to elements, which is a strong indicator
+ // that subsequent accesses will also call the runtime. Proactively
+ // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
+ // doubles for those future calls in the case that the elements would
+ // become PACKED_DOUBLE_ELEMENTS.
+ Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj);
+ ElementsKind elements_kind = js_object->GetElementsKind();
+ if (IsDoubleElementsKind(elements_kind)) {
+ if (Smi::ToInt(*key_obj) >= js_object->elements()->length()) {
+ elements_kind = IsHoleyElementsKind(elements_kind) ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
+ JSObject::TransitionElementsKind(js_object, elements_kind);
+ }
+ } else {
+ DCHECK(IsSmiOrObjectElementsKind(elements_kind) ||
+ !IsFastElementsKind(elements_kind));
+ }
+ }
+ } else if (receiver_obj->IsString() && key_obj->IsSmi()) {
+ // Fast case for string indexing using [] with a smi index.
+ Handle<String> str = Handle<String>::cast(receiver_obj);
+ int index = Handle<Smi>::cast(key_obj)->value();
+ if (index >= 0 && index < str->length()) {
+ Factory* factory = isolate->factory();
+ return *factory->LookupSingleCharacterStringFromCode(
+ String::Flatten(isolate, str)->Get(index));
+ }
+ }
+ // Fall back to GetObjectProperty.
RETURN_RESULT_OR_FAILURE(
- isolate, KeyedGetObjectProperty(isolate, receiver_obj, key_obj));
+ isolate, Runtime::GetObjectProperty(isolate, receiver_obj, key_obj));
}
RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
@@ -634,8 +606,7 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
object, index, value, NONE));
}
-
-RUNTIME_FUNCTION(Runtime_SetProperty) {
+RUNTIME_FUNCTION(Runtime_SetKeyedProperty) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -646,9 +617,48 @@ RUNTIME_FUNCTION(Runtime_SetProperty) {
RETURN_RESULT_OR_FAILURE(
isolate,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
+ StoreOrigin::kMaybeKeyed));
+}
+
+RUNTIME_FUNCTION(Runtime_SetNamedProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Runtime::SetObjectProperty(isolate, object, key, value,
+ language_mode, StoreOrigin::kNamed));
}
+// Similar to DefineDataPropertyInLiteral, but does not update feedback, and
+// and does not have a flags parameter for performing SetFunctionName().
+//
+// Currently, this is used for ObjectLiteral spread properties.
+RUNTIME_FUNCTION(Runtime_StoreDataPropertyInLiteral) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::OWN);
+
+ Maybe<bool> result =
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE, kDontThrow);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ DCHECK(result.IsJust());
+ USE(result);
+
+ return *value;
+}
namespace {
@@ -755,7 +765,9 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
- RETURN_RESULT_OR_FAILURE(isolate, JSObject::New(target, new_target));
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
}
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
@@ -905,15 +917,6 @@ RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
}
-RUNTIME_FUNCTION(Runtime_ValueOf) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj->IsJSValue()) return obj;
- return JSValue::cast(obj)->value();
-}
-
-
RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -1114,22 +1117,6 @@ RUNTIME_FUNCTION(Runtime_ToObject) {
UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_ToPrimitive) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ToPrimitive(input));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ToPrimitive_Number) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- RETURN_RESULT_OR_FAILURE(
- isolate, Object::ToPrimitive(input, ToPrimitiveHint::kNumber));
-}
-
RUNTIME_FUNCTION(Runtime_ToNumber) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1144,13 +1131,6 @@ RUNTIME_FUNCTION(Runtime_ToNumeric) {
RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumeric(isolate, input));
}
-RUNTIME_FUNCTION(Runtime_ToInteger) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ToInteger(isolate, input));
-}
-
RUNTIME_FUNCTION(Runtime_ToLength) {
HandleScope scope(isolate);
@@ -1175,24 +1155,6 @@ RUNTIME_FUNCTION(Runtime_ToName) {
RETURN_RESULT_OR_FAILURE(isolate, Object::ToName(isolate, input));
}
-
-RUNTIME_FUNCTION(Runtime_SameValue) {
- SealHandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, x, 0);
- CONVERT_ARG_CHECKED(Object, y, 1);
- return isolate->heap()->ToBoolean(x->SameValue(y));
-}
-
-
-RUNTIME_FUNCTION(Runtime_SameValueZero) {
- SealHandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, x, 0);
- CONVERT_ARG_CHECKED(Object, y, 1);
- return isolate->heap()->ToBoolean(x->SameValueZero(y));
-}
-
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -1264,7 +1226,7 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
}
CHECK(Object::AddDataProperty(&it, value, NONE, kDontThrow,
- Object::MAY_BE_STORE_FROM_KEYED)
+ StoreOrigin::kMaybeKeyed)
.FromJust());
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 7eeee631be..69b7c9795c 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -77,8 +77,8 @@ RUNTIME_FUNCTION(Runtime_SetPropertyWithReceiver) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
}
- Maybe<bool> result = Object::SetSuperProperty(
- &it, value, language_mode, Object::MAY_BE_STORE_FROM_KEYED);
+ Maybe<bool> result = Object::SetSuperProperty(&it, value, language_mode,
+ StoreOrigin::kMaybeKeyed);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 3e77bf1f3b..e66319bfb5 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -38,7 +38,7 @@ uint32_t GetArgcForReplaceCallable(uint32_t num_captures,
// Looks up the capture of the given name. Returns the (1-based) numbered
// capture index or -1 on failure.
-int LookupNamedCapture(std::function<bool(String*)> name_matches,
+int LookupNamedCapture(const std::function<bool(String*)>& name_matches,
FixedArray* capture_name_map) {
// TODO(jgruber): Sort capture_name_map and do binary search via
// internalized strings.
@@ -1097,7 +1097,7 @@ class VectorBackedMatch : public String::Match {
// RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo).
Handle<JSObject> ConstructNamedCaptureGroupsObject(
Isolate* isolate, Handle<FixedArray> capture_map,
- std::function<Object*(int)> f_get_capture) {
+ const std::function<Object*(int)>& f_get_capture) {
Handle<JSObject> groups = isolate->factory()->NewJSObjectWithNullProto();
const int capture_count = capture_map->length() >> 1;
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 4772f400b3..7a24b066c1 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -158,8 +158,8 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
FeedbackSlot feedback_cells_slot(
Smi::ToInt(*possibly_feedback_cell_slot));
Handle<FeedbackCell> feedback_cell(
- FeedbackCell::cast(
- feedback_vector->Get(feedback_cells_slot)->ToStrongHeapObject()),
+ FeedbackCell::cast(feedback_vector->Get(feedback_cells_slot)
+ ->GetHeapObjectAssumeStrong()),
isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -461,8 +461,7 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
return result;
}
-
-class HandleArguments BASE_EMBEDDED {
+class HandleArguments {
public:
explicit HandleArguments(Handle<Object>* array) : array_(array) {}
Object* operator[](int index) { return *array_[index]; }
@@ -471,8 +470,7 @@ class HandleArguments BASE_EMBEDDED {
Handle<Object>* array_;
};
-
-class ParameterArguments BASE_EMBEDDED {
+class ParameterArguments {
public:
explicit ParameterArguments(Object** parameters) : parameters_(parameters) {}
Object*& operator[](int index) { return *(parameters_ - index - 1); }
@@ -803,6 +801,8 @@ MaybeHandle<Object> LoadLookupSlot(Isolate* isolate, Handle<String> name,
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
if (!holder.is_null() && holder->IsModule()) {
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ if (receiver_return) *receiver_return = receiver;
return Module::LoadVariable(isolate, Handle<Module>::cast(holder), index);
}
if (index != Context::kNotFound) {
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index f6537fd073..d57959687c 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -581,7 +581,8 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
elements->set(i, value, mode);
}
if (i < length) {
- static_assert(Smi::kZero == 0, "Can use memset since Smi::kZero is 0");
+ static_assert(Smi::kZero == nullptr,
+ "Can use memset since Smi::kZero is 0");
memset(elements->data_start() + i, 0, kPointerSize * (length - i));
}
#ifdef DEBUG
@@ -693,14 +694,6 @@ RUNTIME_FUNCTION(Runtime_StringEqual) {
return isolate->heap()->ToBoolean(String::Equals(isolate, x, y));
}
-RUNTIME_FUNCTION(Runtime_StringNotEqual) {
- HandleScope handle_scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
- return isolate->heap()->ToBoolean(!String::Equals(isolate, x, y));
-}
-
RUNTIME_FUNCTION(Runtime_FlattenString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -708,17 +701,6 @@ RUNTIME_FUNCTION(Runtime_FlattenString) {
return *String::Flatten(isolate, str);
}
-RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
- HandleScope handlescope(isolate);
- DCHECK_EQ(1, args.length());
- if (args[0]->IsNumber()) {
- CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
- code &= 0xFFFF;
- return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
- }
- return ReadOnlyRoots(isolate).empty_string();
-}
-
RUNTIME_FUNCTION(Runtime_StringMaxLength) {
SealHandleScope shs(isolate);
return Smi::FromInt(String::kMaxLength);
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 94376e1364..bcc36e9d87 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -10,6 +10,7 @@
#include "src/api-inl.h"
#include "src/arguments-inl.h"
#include "src/assembler-inl.h"
+#include "src/base/platform/mutex.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
@@ -25,6 +26,9 @@
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
+namespace v8 {
+namespace internal {
+
namespace {
struct WasmCompileControls {
uint32_t MaxWasmBufferSize = std::numeric_limits<uint32_t>::max();
@@ -32,14 +36,16 @@ struct WasmCompileControls {
};
// We need per-isolate controls, because we sometimes run tests in multiple
-// isolates
-// concurrently.
+// isolates concurrently. Methods need to hold the accompanying mutex on access.
// To avoid upsetting the static initializer count, we lazy initialize this.
-v8::base::LazyInstance<std::map<v8::Isolate*, WasmCompileControls>>::type
+base::LazyInstance<std::map<v8::Isolate*, WasmCompileControls>>::type
g_PerIsolateWasmControls = LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<base::Mutex>::type g_PerIsolateWasmControlsMutex =
+ LAZY_INSTANCE_INITIALIZER;
bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
bool is_async) {
+ base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
return (is_async && ctrls.AllowAnySizeForAsync) ||
@@ -52,6 +58,7 @@ bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
v8::Local<v8::Value> module_or_bytes,
bool is_async) {
+ base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
if (is_async && ctrls.AllowAnySizeForAsync) return true;
@@ -91,9 +98,6 @@ bool WasmInstanceOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
} // namespace
-namespace v8 {
-namespace internal {
-
RUNTIME_FUNCTION(Runtime_ConstructDouble) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -477,6 +481,7 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
CHECK_EQ(args.length(), 2);
CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
+ base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
WasmCompileControls& ctrl = (*g_PerIsolateWasmControls.Pointer())[v8_isolate];
ctrl.AllowAnySizeForAsync = allow_async;
ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
@@ -533,17 +538,18 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
MaybeObject* maybe_object = reinterpret_cast<MaybeObject*>(args[0]);
StdoutStream os;
- if (maybe_object->IsClearedWeakHeapObject()) {
+ if (maybe_object->IsCleared()) {
os << "[weak cleared]";
} else {
Object* object;
+ HeapObject* heap_object;
bool weak = false;
- if (maybe_object->IsWeakHeapObject()) {
+ if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
weak = true;
- object = maybe_object->ToWeakHeapObject();
+ object = heap_object;
} else {
// Strong reference or SMI.
- object = maybe_object->ToObject();
+ object = maybe_object->cast<Object>();
}
#ifdef DEBUG
@@ -830,12 +836,39 @@ RUNTIME_FUNCTION(Runtime_IsWasmTrapHandlerEnabled) {
}
RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
- HandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
size_t trap_count = trap_handler::GetRecoveredTrapCount();
return *isolate->factory()->NewNumberFromSize(trap_count);
}
+RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1);
+ Handle<Object> tag;
+ if (JSReceiver::GetProperty(isolate, exception,
+ isolate->factory()->wasm_exception_tag_symbol())
+ .ToHandle(&tag)) {
+ Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate);
+ for (int index = 0; index < exceptions_table->length(); ++index) {
+ if (exceptions_table->get(index) == *tag) return Smi::FromInt(index);
+ }
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSReceiver::GetProperty(
+ isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol()));
+}
+
namespace {
bool EnableWasmThreads(v8::Local<v8::Context> context) { return true; }
@@ -902,6 +935,13 @@ RUNTIME_FUNCTION(Runtime_PromiseSpeciesProtector) {
isolate->IsPromiseSpeciesLookupChainIntact());
}
+RUNTIME_FUNCTION(Runtime_StringIteratorProtector) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(
+ isolate->IsStringIteratorLookupChainIntact());
+}
+
// Take a compiled wasm module and serialize it into an array buffer, which is
// then returned.
RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
@@ -937,9 +977,9 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
wasm::DeserializeNativeModule(
isolate,
{reinterpret_cast<uint8_t*>(buffer->backing_store()),
- static_cast<size_t>(buffer->byte_length()->Number())},
+ buffer->byte_length()},
{reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
- static_cast<size_t>(wire_bytes->byte_length()->Number())});
+ wire_bytes->byte_length()});
Handle<WasmModuleObject> module_object;
if (!maybe_module_object.ToHandle(&module_object)) {
return ReadOnlyRoots(isolate).undefined_value();
@@ -971,7 +1011,7 @@ RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
int instance_count = 0;
WeakArrayList* weak_instance_list = module_obj->weak_instance_list();
for (int i = 0; i < weak_instance_list->length(); ++i) {
- if (weak_instance_list->Get(i)->IsWeakHeapObject()) instance_count++;
+ if (weak_instance_list->Get(i)->IsWeak()) instance_count++;
}
return Smi::FromInt(instance_count);
}
@@ -980,7 +1020,7 @@ RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- if (!instance->has_debug_info()) return 0;
+ if (!instance->has_debug_info()) return nullptr;
uint64_t num = instance->debug_info()->NumInterpretedCalls();
return *isolate->factory()->NewNumberFromSize(static_cast<size_t>(num));
}
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index c101219d2c..8a9d6fe366 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -30,14 +30,14 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
return ReadOnlyRoots(isolate).undefined_value();
}
if (array_buffer->backing_store() == nullptr) {
- CHECK_EQ(Smi::kZero, array_buffer->byte_length());
+ CHECK_EQ(0, array_buffer->byte_length());
return ReadOnlyRoots(isolate).undefined_value();
}
// Shared array buffers should never be neutered.
CHECK(!array_buffer->is_shared());
DCHECK(!array_buffer->is_external());
void* backing_store = array_buffer->backing_store();
- size_t byte_length = NumberToSize(array_buffer->byte_length());
+ size_t byte_length = array_buffer->byte_length();
array_buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*array_buffer);
array_buffer->Neuter();
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 5a6c782292..f852df0d85 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -24,41 +24,28 @@ namespace internal {
namespace {
-WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
+Context* GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
StackFrameIterator it(isolate, isolate->thread_local_top());
// On top: C entry stub.
DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
it.Advance();
- // Next: the wasm (compiled or interpreted) frame.
- WasmInstanceObject* result = nullptr;
- if (it.frame()->is_wasm_compiled()) {
- result = WasmCompiledFrame::cast(it.frame())->wasm_instance();
- } else {
- DCHECK(it.frame()->is_wasm_interpreter_entry());
- result = WasmInterpreterEntryFrame::cast(it.frame())->wasm_instance();
- }
- return result;
-}
-
-Context* GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
- return GetWasmInstanceOnStackTop(isolate)->native_context();
+ // Next: the wasm compiled frame.
+ DCHECK(it.frame()->is_wasm_compiled());
+ WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ return frame->wasm_instance()->native_context();
}
class ClearThreadInWasmScope {
public:
- explicit ClearThreadInWasmScope(bool coming_from_wasm)
- : coming_from_wasm_(coming_from_wasm) {
- DCHECK_EQ(trap_handler::IsTrapHandlerEnabled() && coming_from_wasm,
+ ClearThreadInWasmScope() {
+ DCHECK_EQ(trap_handler::IsTrapHandlerEnabled(),
trap_handler::IsThreadInWasm());
- if (coming_from_wasm) trap_handler::ClearThreadInWasm();
+ trap_handler::ClearThreadInWasm();
}
~ClearThreadInWasmScope() {
DCHECK(!trap_handler::IsThreadInWasm());
- if (coming_from_wasm_) trap_handler::SetThreadInWasm();
+ trap_handler::SetThreadInWasm();
}
-
- private:
- const bool coming_from_wasm_;
};
} // namespace
@@ -72,11 +59,7 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
CONVERT_UINT32_ARG_CHECKED(delta_pages, 1);
// This runtime function is always being called from wasm code.
- ClearThreadInWasmScope flag_scope(true);
-
- // Set the current isolate's context.
- DCHECK_NULL(isolate->context());
- isolate->set_context(instance->native_context());
+ ClearThreadInWasmScope flag_scope;
int ret = WasmMemoryObject::Grow(
isolate, handle(instance->memory_object(), isolate), delta_pages);
@@ -88,11 +71,9 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(message_id, 0);
- ClearThreadInWasmScope clear_wasm_flag(isolate->context() == nullptr);
+ ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
- DCHECK_NULL(isolate->context());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
static_cast<MessageTemplate::Template>(message_id));
return isolate->Throw(*error_obj);
@@ -116,81 +97,73 @@ RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
DCHECK_NULL(isolate->context());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(HeapObject, tag_raw, 0);
+ CONVERT_SMI_ARG_CHECKED(size, 1);
+ // TODO(mstarzinger): Manually box because parameters are not visited yet.
+ Handle<Object> tag(tag_raw, isolate);
Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
static_cast<MessageTemplate::Template>(
MessageTemplate::kWasmExceptionError));
- isolate->set_wasm_caught_exception(*exception);
- CONVERT_ARG_HANDLE_CHECKED(Smi, id, 0);
- CHECK(!JSReceiver::SetProperty(isolate, exception,
- isolate->factory()->InternalizeUtf8String(
- wasm::WasmException::kRuntimeIdStr),
- id, LanguageMode::kStrict)
- .is_null());
- CONVERT_SMI_ARG_CHECKED(size, 1);
+ CHECK(
+ !JSReceiver::SetProperty(isolate, exception,
+ isolate->factory()->wasm_exception_tag_symbol(),
+ tag, LanguageMode::kStrict)
+ .is_null());
Handle<JSTypedArray> values =
isolate->factory()->NewJSTypedArray(ElementsKind::UINT16_ELEMENTS, size);
- CHECK(!JSReceiver::SetProperty(isolate, exception,
- isolate->factory()->InternalizeUtf8String(
- wasm::WasmException::kRuntimeValuesStr),
- values, LanguageMode::kStrict)
+ CHECK(!JSReceiver::SetProperty(
+ isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol(), values,
+ LanguageMode::kStrict)
.is_null());
- return ReadOnlyRoots(isolate).undefined_value();
+ return *exception;
}
-RUNTIME_FUNCTION(Runtime_WasmThrow) {
- // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
- HandleScope scope(isolate);
- DCHECK_NULL(isolate->context());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- DCHECK_EQ(0, args.length());
- Handle<Object> exception(isolate->get_wasm_caught_exception(), isolate);
- CHECK(!exception.is_null());
- isolate->clear_wasm_caught_exception();
- return isolate->Throw(*exception);
-}
-
-RUNTIME_FUNCTION(Runtime_WasmGetExceptionRuntimeId) {
+RUNTIME_FUNCTION(Runtime_WasmExceptionGetTag) {
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
DCHECK_NULL(isolate->context());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
+ CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
+ // TODO(mstarzinger): Manually box because parameters are not visited yet.
+ Handle<Object> except_obj(except_obj_raw, isolate);
if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
Handle<Object> tag;
if (JSReceiver::GetProperty(isolate, exception,
- isolate->factory()->InternalizeUtf8String(
- wasm::WasmException::kRuntimeIdStr))
+ isolate->factory()->wasm_exception_tag_symbol())
.ToHandle(&tag)) {
- if (tag->IsSmi()) {
- return *tag;
- }
+ return *tag;
}
}
- return Smi::FromInt(wasm::kInvalidExceptionTag);
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
DCHECK_NULL(isolate->context());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- DCHECK_EQ(1, args.length());
- Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
+ CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
+ // TODO(mstarzinger): Manually box because parameters are not visited yet.
+ Handle<Object> except_obj(except_obj_raw, isolate);
if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
Handle<Object> values_obj;
- if (JSReceiver::GetProperty(isolate, exception,
- isolate->factory()->InternalizeUtf8String(
- wasm::WasmException::kRuntimeValuesStr))
+ if (JSReceiver::GetProperty(
+ isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol())
.ToHandle(&values_obj)) {
if (values_obj->IsJSTypedArray()) {
Handle<JSTypedArray> values = Handle<JSTypedArray>::cast(values_obj);
CHECK_EQ(values->type(), kExternalUint16Array);
- CONVERT_SMI_ARG_CHECKED(index, 0);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CHECK(!values->WasNeutered());
CHECK_LT(index, Smi::ToInt(values->length()));
auto* vals =
reinterpret_cast<uint16_t*>(values->GetBuffer()->backing_store());
@@ -204,23 +177,26 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
DCHECK_NULL(isolate->context());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- Handle<Object> except_obj(isolate->get_wasm_caught_exception(), isolate);
+ CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
+ // TODO(mstarzinger): Manually box because parameters are not visited yet.
+ Handle<Object> except_obj(except_obj_raw, isolate);
if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
Handle<Object> values_obj;
- if (JSReceiver::GetProperty(isolate, exception,
- isolate->factory()->InternalizeUtf8String(
- wasm::WasmException::kRuntimeValuesStr))
+ if (JSReceiver::GetProperty(
+ isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol())
.ToHandle(&values_obj)) {
if (values_obj->IsJSTypedArray()) {
Handle<JSTypedArray> values = Handle<JSTypedArray>::cast(values_obj);
CHECK_EQ(values->type(), kExternalUint16Array);
- CONVERT_SMI_ARG_CHECKED(index, 0);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CHECK(!values->WasNeutered());
CHECK_LT(index, Smi::ToInt(values->length()));
- CONVERT_SMI_ARG_CHECKED(value, 1);
+ CONVERT_SMI_ARG_CHECKED(value, 2);
auto* vals =
reinterpret_cast<uint16_t*>(values->GetBuffer()->backing_store());
vals[index] = static_cast<uint16_t>(value);
@@ -235,8 +211,6 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
HandleScope scope(isolate);
CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[0]);
CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 1);
- Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
- isolate);
// The arg buffer is the raw pointer to the caller's stack. It looks like a
// Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
@@ -245,13 +219,10 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
CHECK(arg_buffer_obj->IsSmi());
Address arg_buffer = reinterpret_cast<Address>(*arg_buffer_obj);
- ClearThreadInWasmScope wasm_flag(true);
-
- // Set the current isolate's context.
- DCHECK_NULL(isolate->context());
- isolate->set_context(instance->native_context());
+ ClearThreadInWasmScope wasm_flag;
- // Find the frame pointer of the interpreter entry.
+ // Find the frame pointer and instance of the interpreter frame on the stack.
+ Handle<WasmInstanceObject> instance;
Address frame_pointer = 0;
{
StackFrameIterator it(isolate, isolate->thread_local_top());
@@ -260,9 +231,15 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
it.Advance();
// Next: the wasm interpreter entry.
DCHECK_EQ(StackFrame::WASM_INTERPRETER_ENTRY, it.frame()->type());
+ instance = handle(
+ WasmInterpreterEntryFrame::cast(it.frame())->wasm_instance(), isolate);
frame_pointer = it.frame()->fp();
}
+ // Set the current isolate's context.
+ DCHECK_NULL(isolate->context());
+ isolate->set_context(instance->native_context());
+
// Run the function in the interpreter. Note that neither the {WasmDebugInfo}
// nor the {InterpreterHandle} have to exist, because interpretation might
// have been triggered by another Isolate sharing the same WasmEngine.
@@ -284,11 +261,7 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
DCHECK(!trap_handler::IsTrapHandlerEnabled() ||
trap_handler::IsThreadInWasm());
- ClearThreadInWasmScope wasm_flag(true);
-
- // Set the current isolate's context.
- DCHECK_NULL(isolate->context());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
+ ClearThreadInWasmScope wasm_flag;
// Check if this is a real stack overflow.
StackLimitCheck check(isolate);
@@ -303,7 +276,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(func_index, 1);
- ClearThreadInWasmScope wasm_flag(true);
+ ClearThreadInWasmScope wasm_flag;
#ifdef DEBUG
StackFrameIterator it(isolate, isolate->thread_local_top());
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index ec35131c90..d9d7d85664 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -45,9 +45,7 @@ FOR_EACH_INTRINSIC_RETURN_PAIR(P)
,
static const Runtime::Function kIntrinsicFunctions[] = {
- FOR_EACH_INTRINSIC(F)
- FOR_EACH_INTRINSIC(I)
-};
+ FOR_EACH_INTRINSIC(F) FOR_EACH_INLINE_INTRINSIC(I)};
#undef I
#undef F
@@ -98,6 +96,44 @@ void InitializeIntrinsicFunctionNames() {
} // namespace
+bool Runtime::NeedsExactContext(FunctionId id) {
+ switch (id) {
+ case Runtime::kAddPrivateField:
+ case Runtime::kCopyDataProperties:
+ case Runtime::kCreateDataProperty:
+ case Runtime::kCreatePrivateFieldSymbol:
+ case Runtime::kReThrow:
+ case Runtime::kThrow:
+ case Runtime::kThrowApplyNonFunction:
+ case Runtime::kThrowCalledNonCallable:
+ case Runtime::kThrowConstAssignError:
+ case Runtime::kThrowConstructorNonCallableError:
+ case Runtime::kThrowConstructedNonConstructable:
+ case Runtime::kThrowConstructorReturnedNonObject:
+ case Runtime::kThrowInvalidStringLength:
+ case Runtime::kThrowInvalidTypedArrayAlignment:
+ case Runtime::kThrowIteratorError:
+ case Runtime::kThrowIteratorResultNotAnObject:
+ case Runtime::kThrowNotConstructor:
+ case Runtime::kThrowRangeError:
+ case Runtime::kThrowReferenceError:
+ case Runtime::kThrowStackOverflow:
+ case Runtime::kThrowStaticPrototypeError:
+ case Runtime::kThrowSuperAlreadyCalledError:
+ case Runtime::kThrowSuperNotCalled:
+ case Runtime::kThrowSymbolAsyncIteratorInvalid:
+ case Runtime::kThrowSymbolIteratorInvalid:
+ case Runtime::kThrowThrowMethodMissing:
+ case Runtime::kThrowTypeError:
+ case Runtime::kThrowUnsupportedSuperError:
+ case Runtime::kThrowWasmError:
+ case Runtime::kThrowWasmStackOverflow:
+ return false;
+ default:
+ return true;
+ }
+}
+
bool Runtime::IsNonReturning(FunctionId id) {
switch (id) {
case Runtime::kThrowUnsupportedSuperError:
@@ -113,6 +149,7 @@ bool Runtime::IsNonReturning(FunctionId id) {
case Runtime::kThrowConstructorReturnedNonObject:
case Runtime::kThrowInvalidStringLength:
case Runtime::kThrowInvalidTypedArrayAlignment:
+ case Runtime::kThrowIteratorError:
case Runtime::kThrowIteratorResultNotAnObject:
case Runtime::kThrowThrowMethodMissing:
case Runtime::kThrowSymbolIteratorInvalid:
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 5a6364f644..f091d99092 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -17,10 +17,10 @@
namespace v8 {
namespace internal {
-// * Each intrinsic is consistently exposed in JavaScript via 2 names:
+// * Each intrinsic is exposed in JavaScript via:
// * %#name, which is always a runtime call.
-// * %_#name, which can be inlined or just a runtime call, the compiler in
-// question decides.
+// * (optionally) %_#name, which can be inlined or just a runtime call, the
+// compiler in question decides.
//
// * IntrinsicTypes are Runtime::RUNTIME and Runtime::INLINE, respectively.
//
@@ -31,53 +31,57 @@ namespace internal {
// * Each compiler has an explicit list of intrisics it supports, falling back
// to a simple runtime call if necessary.
-
// Entries have the form F(name, number of arguments, number of values):
// A variable number of arguments is specified by a -1, additional restrictions
-// are specified by inline comments
-
-#define FOR_EACH_INTRINSIC_ARRAY(F) \
- F(ArrayIncludes_Slow, 3, 1) \
- F(ArrayIndexOf, 3, 1) \
- F(ArrayIsArray, 1, 1) \
- F(ArraySpeciesConstructor, 1, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
- F(GrowArrayElements, 2, 1) \
- F(HasComplexElements, 1, 1) \
- F(IsArray, 1, 1) \
- F(MoveArrayContents, 2, 1) \
- F(NewArray, -1 /* >= 3 */, 1) \
- F(NormalizeElements, 1, 1) \
- F(PrepareElementsForSort, 2, 1) \
- F(TransitionElementsKind, 2, 1) \
+// are specified by inline comments. To declare only the runtime version (no
+// inline), use the F macro below. To declare the runtime version and the inline
+// version simultaneously, use the I macro below.
+
+#define FOR_EACH_INTRINSIC_ARRAY(F, I) \
+ F(ArrayIncludes_Slow, 3, 1) \
+ F(ArrayIndexOf, 3, 1) \
+ F(ArrayIsArray, 1, 1) \
+ F(ArraySpeciesConstructor, 1, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(GrowArrayElements, 2, 1) \
+ F(HasComplexElements, 1, 1) \
+ I(IsArray, 1, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(NewArray, -1 /* >= 3 */, 1) \
+ F(NormalizeElements, 1, 1) \
+ F(PrepareElementsForSort, 2, 1) \
+ F(TransitionElementsKind, 2, 1) \
+ F(TransitionElementsKindWithKind, 2, 1) \
F(TrySliceSimpleNonFastElements, 3, 1)
-#define FOR_EACH_INTRINSIC_ATOMICS(F) \
- F(AtomicsAdd, 3, 1) \
- F(AtomicsAnd, 3, 1) \
- F(AtomicsCompareExchange, 4, 1) \
- F(AtomicsExchange, 3, 1) \
- F(AtomicsNumWaitersForTesting, 2, 1) \
- F(AtomicsOr, 3, 1) \
- F(AtomicsSub, 3, 1) \
- F(AtomicsXor, 3, 1) \
+#define FOR_EACH_INTRINSIC_ATOMICS(F, I) \
+ F(AtomicsLoad64, 2, 1) \
+ F(AtomicsStore64, 3, 1) \
+ F(AtomicsAdd, 3, 1) \
+ F(AtomicsAnd, 3, 1) \
+ F(AtomicsCompareExchange, 4, 1) \
+ F(AtomicsExchange, 3, 1) \
+ F(AtomicsNumWaitersForTesting, 2, 1) \
+ F(AtomicsOr, 3, 1) \
+ F(AtomicsSub, 3, 1) \
+ F(AtomicsXor, 3, 1) \
F(SetAllowAtomicsWait, 1, 1)
-#define FOR_EACH_INTRINSIC_BIGINT(F) \
- F(BigIntBinaryOp, 3, 1) \
- F(BigIntCompareToBigInt, 3, 1) \
- F(BigIntCompareToNumber, 3, 1) \
- F(BigIntCompareToString, 3, 1) \
- F(BigIntEqualToBigInt, 2, 1) \
- F(BigIntEqualToNumber, 2, 1) \
- F(BigIntEqualToString, 2, 1) \
- F(BigIntToBoolean, 1, 1) \
- F(BigIntToNumber, 1, 1) \
- F(BigIntUnaryOp, 2, 1) \
+#define FOR_EACH_INTRINSIC_BIGINT(F, I) \
+ F(BigIntBinaryOp, 3, 1) \
+ F(BigIntCompareToBigInt, 3, 1) \
+ F(BigIntCompareToNumber, 3, 1) \
+ F(BigIntCompareToString, 3, 1) \
+ F(BigIntEqualToBigInt, 2, 1) \
+ F(BigIntEqualToNumber, 2, 1) \
+ F(BigIntEqualToString, 2, 1) \
+ F(BigIntToBoolean, 1, 1) \
+ F(BigIntToNumber, 1, 1) \
+ F(BigIntUnaryOp, 2, 1) \
F(ToBigInt, 1, 1)
-#define FOR_EACH_INTRINSIC_CLASSES(F) \
+#define FOR_EACH_INTRINSIC_CLASSES(F, I) \
F(DefineClass, -1 /* >= 3 */, 1) \
F(HomeObjectSymbol, 0, 1) \
F(LoadFromSuper, 3, 1) \
@@ -93,20 +97,16 @@ namespace internal {
F(ThrowSuperNotCalled, 0, 1) \
F(ThrowUnsupportedSuperError, 0, 1)
-#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
- F(GetWeakMapEntries, 2, 1) \
- F(GetWeakSetValues, 2, 1) \
- F(MapGrow, 1, 1) \
- F(MapIteratorClone, 1, 1) \
- F(MapShrink, 1, 1) \
- F(SetGrow, 1, 1) \
- F(SetIteratorClone, 1, 1) \
- F(SetShrink, 1, 1) \
- F(TheHole, 0, 1) \
- F(WeakCollectionDelete, 3, 1) \
+#define FOR_EACH_INTRINSIC_COLLECTIONS(F, I) \
+ F(MapGrow, 1, 1) \
+ F(MapShrink, 1, 1) \
+ F(SetGrow, 1, 1) \
+ F(SetShrink, 1, 1) \
+ F(TheHole, 0, 1) \
+ F(WeakCollectionDelete, 3, 1) \
F(WeakCollectionSet, 4, 1)
-#define FOR_EACH_INTRINSIC_COMPILER(F) \
+#define FOR_EACH_INTRINSIC_COMPILER(F, I) \
F(CompileForOnStackReplacement, 1, 1) \
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
@@ -117,17 +117,14 @@ namespace internal {
F(NotifyDeoptimized, 0, 1) \
F(ResolvePossiblyDirectEval, 6, 1)
-#define FOR_EACH_INTRINSIC_DATE(F) \
- F(DateCurrentTime, 0, 1) \
- F(IsDate, 1, 1)
+#define FOR_EACH_INTRINSIC_DATE(F, I) F(DateCurrentTime, 0, 1)
-#define FOR_EACH_INTRINSIC_DEBUG(F) \
+#define FOR_EACH_INTRINSIC_DEBUG(F, I) \
F(ClearStepping, 0, 1) \
F(CollectGarbage, 1, 1) \
F(DebugBreakAtEntry, 1, 1) \
F(DebugCollectCoverage, 0, 1) \
F(DebugGetLoadedScriptIds, 0, 1) \
- F(DebugIsActive, 0, 1) \
F(DebugOnFunctionCall, 2, 1) \
F(DebugPopPromise, 0, 1) \
F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
@@ -149,164 +146,136 @@ namespace internal {
F(SetGeneratorScopeVariableValue, 4, 1) \
F(LiveEditPatchScript, 2, 1)
-#define FOR_EACH_INTRINSIC_FORIN(F) \
- F(ForInEnumerate, 1, 1) \
+#define FOR_EACH_INTRINSIC_FORIN(F, I) \
+ F(ForInEnumerate, 1, 1) \
F(ForInHasProperty, 2, 1)
#ifdef V8_TRACE_IGNITION
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
- F(InterpreterTraceBytecodeEntry, 3, 1) \
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I) \
+ F(InterpreterTraceBytecodeEntry, 3, 1) \
F(InterpreterTraceBytecodeExit, 3, 1)
#else
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F)
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I)
#endif
#ifdef V8_TRACE_FEEDBACK_UPDATES
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I) \
F(InterpreterTraceUpdateFeedback, 3, 1)
#else
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F)
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I)
#endif
-#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
+#define FOR_EACH_INTRINSIC_INTERPRETER(F, I) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I) \
F(InterpreterDeserializeLazy, 2, 1)
-#define FOR_EACH_INTRINSIC_FUNCTION(F) \
- F(Call, -1 /* >= 2 */, 1) \
- F(FunctionGetName, 1, 1) \
+#define FOR_EACH_INTRINSIC_FUNCTION(F, I) \
+ I(Call, -1 /* >= 2 */, 1) \
F(FunctionGetScriptSource, 1, 1) \
F(FunctionGetScriptId, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionIsAPIFunction, 1, 1) \
- F(IsConstructor, 1, 1) \
F(IsFunction, 1, 1) \
- F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1)
-#define FOR_EACH_INTRINSIC_GENERATOR(F) \
+#define FOR_EACH_INTRINSIC_GENERATOR(F, I) \
F(AsyncGeneratorHasCatchHandlerForPC, 1, 1) \
- F(AsyncGeneratorReject, 2, 1) \
- F(AsyncGeneratorResolve, 3, 1) \
- F(AsyncGeneratorYield, 3, 1) \
- F(CreateJSGeneratorObject, 2, 1) \
- F(GeneratorClose, 1, 1) \
+ I(AsyncGeneratorReject, 2, 1) \
+ I(AsyncGeneratorResolve, 3, 1) \
+ I(AsyncGeneratorYield, 3, 1) \
+ I(CreateJSGeneratorObject, 2, 1) \
+ I(GeneratorClose, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
- F(GeneratorGetInputOrDebugPos, 1, 1) \
- F(GeneratorGetResumeMode, 1, 1)
+ I(GeneratorGetResumeMode, 1, 1)
#ifdef V8_INTL_SUPPORT
-#define FOR_EACH_INTRINSIC_INTL(F) \
- F(AvailableLocalesOf, 1, 1) \
- F(BreakIteratorBreakType, 1, 1) \
- F(BreakIteratorCurrent, 1, 1) \
- F(BreakIteratorFirst, 1, 1) \
- F(BreakIteratorNext, 1, 1) \
- F(CanonicalizeLanguageTag, 1, 1) \
- F(CollatorResolvedOptions, 1, 1) \
- F(CreateBreakIterator, 3, 1) \
- F(CreateDateTimeFormat, 3, 1) \
- F(CreateNumberFormat, 3, 1) \
- F(CurrencyDigits, 1, 1) \
- F(DateCacheVersion, 0, 1) \
- F(DefaultNumberOption, 5, 1) \
- F(DefineWEProperty, 3, 1) \
- F(FormatList, 2, 1) \
- F(FormatListToParts, 2, 1) \
- F(GetDefaultICULocale, 0, 1) \
- F(GetNumberOption, 5, 1) \
- F(IntlUnwrapReceiver, 5, 1) \
- F(IsInitializedIntlObjectOfType, 2, 1) \
- F(IsWellFormedCurrencyCode, 1, 1) \
- F(MarkAsInitializedIntlObjectOfType, 2, 1) \
- F(ParseExtension, 1, 1) \
- F(PluralRulesResolvedOptions, 1, 1) \
- F(PluralRulesSelect, 2, 1) \
- F(ToDateTimeOptions, 3, 1) \
- F(ToLocaleDateTime, 6, 1) \
- F(StringToLowerCaseIntl, 1, 1) \
- F(StringToUpperCaseIntl, 1, 1) \
- F(SupportedLocalesOf, 3, 1) \
-// End of macro.
+#define FOR_EACH_INTRINSIC_INTL(F, I) \
+ F(AvailableLocalesOf, 1, 1) \
+ F(CanonicalizeLanguageTag, 1, 1) \
+ F(DateCacheVersion, 0, 1) \
+ F(FormatList, 2, 1) \
+ F(FormatListToParts, 2, 1) \
+ F(GetDefaultICULocale, 0, 1) \
+ F(StringToLowerCaseIntl, 1, 1) \
+ F(StringToUpperCaseIntl, 1, 1) // End of macro.
#else
-#define FOR_EACH_INTRINSIC_INTL(F)
+#define FOR_EACH_INTRINSIC_INTL(F, I)
#endif // V8_INTL_SUPPORT
-#define FOR_EACH_INTRINSIC_INTERNAL(F) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
- F(AllocateSeqOneByteString, 1, 1) \
- F(AllocateSeqTwoByteString, 1, 1) \
- F(AllowDynamicFunction, 1, 1) \
- F(CheckIsBootstrapping, 0, 1) \
- F(CreateAsyncFromSyncIterator, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(CreateTemplateObject, 1, 1) \
- F(DeserializeLazy, 1, 1) \
- F(ExportFromRuntime, 1, 1) \
- F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
- F(IncrementUseCounter, 1, 1) \
- F(InstallToContext, 1, 1) \
- F(Interrupt, 0, 1) \
- F(IS_VAR, 1, 1) \
- F(NewReferenceError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
- F(NewTypeError, 2, 1) \
- F(OrdinaryHasInstance, 2, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ReportMessage, 1, 1) \
- F(ReThrow, 1, 1) \
- F(RunMicrotaskCallback, 2, 1) \
- F(RunMicrotasks, 0, 1) \
- F(StackGuard, 0, 1) \
- F(Throw, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowConstructedNonConstructable, 1, 1) \
- F(ThrowConstructorReturnedNonObject, 0, 1) \
- F(ThrowInvalidStringLength, 0, 1) \
- F(ThrowInvalidTypedArrayAlignment, 2, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowNotConstructor, 1, 1) \
- F(ThrowRangeError, -1 /* >= 1 */, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
- F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
- F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowThrowMethodMissing, 0, 1) \
- F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(Typeof, 1, 1) \
+#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateSeqOneByteString, 1, 1) \
+ F(AllocateSeqTwoByteString, 1, 1) \
+ F(AllowDynamicFunction, 1, 1) \
+ F(CheckIsBootstrapping, 0, 1) \
+ I(CreateAsyncFromSyncIterator, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(CreateTemplateObject, 1, 1) \
+ F(DeserializeLazy, 1, 1) \
+ F(ExportFromRuntime, 1, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
+ F(IncrementUseCounter, 1, 1) \
+ F(InstallToContext, 1, 1) \
+ F(Interrupt, 0, 1) \
+ F(IS_VAR, 1, 1) \
+ F(NewReferenceError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewTypeError, 2, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ReportMessage, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(RunMicrotaskCallback, 2, 1) \
+ F(RunMicrotasks, 0, 1) \
+ F(StackGuard, 0, 1) \
+ F(Throw, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowConstructorReturnedNonObject, 0, 1) \
+ F(ThrowInvalidStringLength, 0, 1) \
+ F(ThrowInvalidTypedArrayAlignment, 2, 1) \
+ F(ThrowIteratorError, 1, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowNotConstructor, 1, 1) \
+ F(ThrowRangeError, -1 /* >= 1 */, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowThrowMethodMissing, 0, 1) \
+ F(ThrowTypeError, -1 /* >= 1 */, 1) \
+ F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1)
-#define FOR_EACH_INTRINSIC_LITERALS(F) \
+#define FOR_EACH_INTRINSIC_LITERALS(F, I) \
F(CreateArrayLiteral, 4, 1) \
F(CreateArrayLiteralWithoutAllocationSite, 2, 1) \
F(CreateObjectLiteral, 4, 1) \
F(CreateObjectLiteralWithoutAllocationSite, 2, 1) \
F(CreateRegExpLiteral, 4, 1)
-#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
-
-#define FOR_EACH_INTRINSIC_MODULE(F) \
- F(DynamicImportCall, 2, 1) \
- F(GetImportMetaObject, 0, 1) \
+#define FOR_EACH_INTRINSIC_MODULE(F, I) \
+ F(DynamicImportCall, 2, 1) \
+ I(GetImportMetaObject, 0, 1) \
F(GetModuleNamespace, 1, 1)
-#define FOR_EACH_INTRINSIC_NUMBERS(F) \
- F(GetHoleNaNLower, 0, 1) \
- F(GetHoleNaNUpper, 0, 1) \
- F(IsSmi, 1, 1) \
- F(IsValidSmi, 1, 1) \
- F(MaxSmi, 0, 1) \
- F(NumberToString, 1, 1) \
- F(SmiLexicographicCompare, 2, 1) \
- F(StringParseFloat, 1, 1) \
- F(StringParseInt, 2, 1) \
+#define FOR_EACH_INTRINSIC_NUMBERS(F, I) \
+ F(GetHoleNaNLower, 0, 1) \
+ F(GetHoleNaNUpper, 0, 1) \
+ I(IsSmi, 1, 1) \
+ F(IsValidSmi, 1, 1) \
+ F(MaxSmi, 0, 1) \
+ F(NumberToString, 1, 1) \
+ F(SmiLexicographicCompare, 2, 1) \
+ F(StringParseFloat, 1, 1) \
+ F(StringParseInt, 2, 1) \
F(StringToNumber, 1, 1)
-#define FOR_EACH_INTRINSIC_OBJECT(F) \
+#define FOR_EACH_INTRINSIC_OBJECT(F, I) \
F(AddDictionaryProperty, 3, 1) \
F(AddElement, 3, 1) \
F(AddNamedProperty, 4, 1) \
@@ -317,8 +286,8 @@ namespace internal {
F(CompleteInobjectSlackTrackingForMap, 1, 1) \
F(CopyDataProperties, 2, 1) \
F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
- F(CreateDataProperty, 3, 1) \
- F(CreateIterResultObject, 2, 1) \
+ I(CreateDataProperty, 3, 1) \
+ I(CreateIterResultObject, 2, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
F(DefineDataPropertyInLiteral, 6, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
@@ -329,13 +298,11 @@ namespace internal {
F(GetOwnPropertyDescriptor, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
F(GetProperty, 2, 1) \
- F(GetPrototype, 1, 1) \
F(HasFastPackedElements, 1, 1) \
F(HasInPrototypeChain, 2, 1) \
- F(HasProperty, 2, 1) \
+ I(HasProperty, 2, 1) \
F(InternalSetPrototype, 2, 1) \
- F(IsJSReceiver, 1, 1) \
- F(KeyedGetProperty, 2, 1) \
+ I(IsJSReceiver, 1, 1) \
F(NewObject, 2, 1) \
F(ObjectCreate, 2, 1) \
F(ObjectEntries, 1, 1) \
@@ -347,62 +314,59 @@ namespace internal {
F(ObjectValues, 1, 1) \
F(ObjectValuesSkipFastPath, 1, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(SameValue, 2, 1) \
- F(SameValueZero, 2, 1) \
+ F(PerformSideEffectCheckForObject, 1, 1) \
F(SetDataProperties, 2, 1) \
- F(SetProperty, 4, 1) \
+ F(SetKeyedProperty, 4, 1) \
+ F(SetNamedProperty, 4, 1) \
+ F(StoreDataPropertyInLiteral, 3, 1) \
F(ShrinkPropertyDictionary, 1, 1) \
F(ToFastProperties, 1, 1) \
- F(ToInteger, 1, 1) \
- F(ToLength, 1, 1) \
+ I(ToLength, 1, 1) \
F(ToName, 1, 1) \
- F(ToNumber, 1, 1) \
+ I(ToNumber, 1, 1) \
F(ToNumeric, 1, 1) \
- F(ToObject, 1, 1) \
- F(ToPrimitive, 1, 1) \
- F(ToPrimitive_Number, 1, 1) \
- F(ToString, 1, 1) \
- F(TryMigrateInstance, 1, 1) \
- F(ValueOf, 1, 1)
-
-#define FOR_EACH_INTRINSIC_OPERATORS(F) \
- F(Add, 2, 1) \
- F(Equal, 2, 1) \
- F(GreaterThan, 2, 1) \
- F(GreaterThanOrEqual, 2, 1) \
- F(LessThan, 2, 1) \
- F(LessThanOrEqual, 2, 1) \
- F(NotEqual, 2, 1) \
- F(StrictEqual, 2, 1) \
+ I(ToObject, 1, 1) \
+ I(ToString, 1, 1) \
+ F(TryMigrateInstance, 1, 1)
+
+#define FOR_EACH_INTRINSIC_OPERATORS(F, I) \
+ F(Add, 2, 1) \
+ F(Equal, 2, 1) \
+ F(GreaterThan, 2, 1) \
+ F(GreaterThanOrEqual, 2, 1) \
+ F(LessThan, 2, 1) \
+ F(LessThanOrEqual, 2, 1) \
+ F(NotEqual, 2, 1) \
+ F(StrictEqual, 2, 1) \
F(StrictNotEqual, 2, 1)
-#define FOR_EACH_INTRINSIC_PROMISE(F) \
- F(EnqueueMicrotask, 1, 1) \
- F(PromiseHookAfter, 1, 1) \
- F(PromiseHookBefore, 1, 1) \
- F(PromiseHookInit, 2, 1) \
- F(AwaitPromisesInit, 3, 1) \
- F(PromiseMarkAsHandled, 1, 1) \
- F(PromiseRejectEventFromStack, 2, 1) \
- F(PromiseResult, 1, 1) \
- F(PromiseRevokeReject, 1, 1) \
- F(PromiseStatus, 1, 1) \
- F(RejectPromise, 3, 1) \
- F(ResolvePromise, 2, 1) \
- F(PromiseRejectAfterResolved, 2, 1) \
+#define FOR_EACH_INTRINSIC_PROMISE(F, I) \
+ F(EnqueueMicrotask, 1, 1) \
+ F(PromiseHookAfter, 1, 1) \
+ F(PromiseHookBefore, 1, 1) \
+ F(PromiseHookInit, 2, 1) \
+ F(AwaitPromisesInit, 3, 1) \
+ F(PromiseMarkAsHandled, 1, 1) \
+ F(PromiseRejectEventFromStack, 2, 1) \
+ F(PromiseResult, 1, 1) \
+ F(PromiseRevokeReject, 1, 1) \
+ F(PromiseStatus, 1, 1) \
+ I(RejectPromise, 3, 1) \
+ I(ResolvePromise, 2, 1) \
+ F(PromiseRejectAfterResolved, 2, 1) \
F(PromiseResolveAfterResolved, 2, 1)
-#define FOR_EACH_INTRINSIC_PROXY(F) \
- F(CheckProxyGetSetTrapResult, 2, 1) \
- F(CheckProxyHasTrap, 2, 1) \
- F(GetPropertyWithReceiver, 3, 1) \
- F(IsJSProxy, 1, 1) \
- F(JSProxyGetHandler, 1, 1) \
- F(JSProxyGetTarget, 1, 1) \
+#define FOR_EACH_INTRINSIC_PROXY(F, I) \
+ F(CheckProxyGetSetTrapResult, 2, 1) \
+ F(CheckProxyHasTrap, 2, 1) \
+ F(GetPropertyWithReceiver, 3, 1) \
+ F(IsJSProxy, 1, 1) \
+ F(JSProxyGetHandler, 1, 1) \
+ F(JSProxyGetTarget, 1, 1) \
F(SetPropertyWithReceiver, 5, 1)
-#define FOR_EACH_INTRINSIC_REGEXP(F) \
- F(IsRegExp, 1, 1) \
+#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
+ I(IsRegExp, 1, 1) \
F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
@@ -412,7 +376,7 @@ namespace internal {
F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
F(StringSplit, 3, 1)
-#define FOR_EACH_INTRINSIC_SCOPES(F) \
+#define FOR_EACH_INTRINSIC_SCOPES(F, I) \
F(DeclareEvalFunction, 2, 1) \
F(DeclareEvalVar, 1, 1) \
F(DeclareGlobals, 3, 1) \
@@ -438,7 +402,7 @@ namespace internal {
F(StoreLookupSlot_Strict, 2, 1) \
F(ThrowConstAssignError, 0, 1)
-#define FOR_EACH_INTRINSIC_STRINGS(F) \
+#define FOR_EACH_INTRINSIC_STRINGS(F, I) \
F(FlattenString, 1, 1) \
F(GetSubstitution, 5, 1) \
F(InternalizeString, 1, 1) \
@@ -447,7 +411,6 @@ namespace internal {
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
F(StringCharCodeAt, 2, 1) \
- F(StringCharFromCode, 1, 1) \
F(StringEqual, 2, 1) \
F(StringGreaterThan, 2, 1) \
F(StringGreaterThanOrEqual, 2, 1) \
@@ -458,19 +421,18 @@ namespace internal {
F(StringLessThan, 2, 1) \
F(StringLessThanOrEqual, 2, 1) \
F(StringMaxLength, 0, 1) \
- F(StringNotEqual, 2, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringSubstring, 3, 1) \
F(StringToArray, 2, 1) \
F(StringTrim, 2, 1)
-#define FOR_EACH_INTRINSIC_SYMBOL(F) \
+#define FOR_EACH_INTRINSIC_SYMBOL(F, I) \
F(CreatePrivateFieldSymbol, 0, 1) \
F(CreatePrivateSymbol, -1 /* <= 1 */, 1) \
F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
-#define FOR_EACH_INTRINSIC_TEST(F) \
+#define FOR_EACH_INTRINSIC_TEST(F, I) \
F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
F(ClearFunctionFeedback, 1, 1) \
@@ -482,7 +444,7 @@ namespace internal {
F(DebugTrace, 0, 1) \
F(DebugTrackRetainingPath, -1, 1) \
F(DeoptimizeFunction, 1, 1) \
- F(DeoptimizeNow, 0, 1) \
+ I(DeoptimizeNow, 0, 1) \
F(DeserializeWasmModule, 2, 1) \
F(DisallowCodegenFromStrings, 1, 1) \
F(DisallowWasmCodegen, 1, 1) \
@@ -490,8 +452,11 @@ namespace internal {
F(FreezeWasmLazyCompilation, 1, 1) \
F(GetCallable, 0, 1) \
F(GetDeoptCount, 1, 1) \
+ F(GetInitializerFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetUndetectable, 0, 1) \
+ F(GetWasmExceptionId, 2, 1) \
+ F(GetWasmExceptionValues, 1, 1) \
F(GetWasmRecoveredTrapCount, 0, 1) \
F(GlobalPrint, 1, 1) \
F(HasDictionaryElements, 1, 1) \
@@ -538,6 +503,7 @@ namespace internal {
F(ArraySpeciesProtector, 0, 1) \
F(TypedArraySpeciesProtector, 0, 1) \
F(PromiseSpeciesProtector, 0, 1) \
+ F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
@@ -548,37 +514,36 @@ namespace internal {
F(WasmMemoryHasFullGuardRegion, 1, 1) \
F(SetWasmThreadsEnabled, 1, 1)
-#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- F(ArrayBufferNeuter, 1, 1) \
- F(ArrayBufferViewWasNeutered, 1, 1) \
- F(IsTypedArray, 1, 1) \
- F(TypedArrayCopyElements, 3, 1) \
- F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArrayGetLength, 1, 1) \
- F(TypedArraySet, 2, 1) \
+#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
+ F(ArrayBufferNeuter, 1, 1) \
+ F(ArrayBufferViewWasNeutered, 1, 1) \
+ I(IsTypedArray, 1, 1) \
+ F(TypedArrayCopyElements, 3, 1) \
+ F(TypedArrayGetBuffer, 1, 1) \
+ F(TypedArrayGetLength, 1, 1) \
+ F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F) \
- F(ThrowWasmError, 1, 1) \
- F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmExceptionGetElement, 1, 1) \
- F(WasmExceptionSetElement, 2, 1) \
- F(WasmGetExceptionRuntimeId, 0, 1) \
- F(WasmGrowMemory, 2, 1) \
- F(WasmRunInterpreter, 2, 1) \
- F(WasmStackGuard, 0, 1) \
- F(WasmThrow, 0, 1) \
- F(WasmThrowCreate, 2, 1) \
- F(WasmThrowTypeError, 0, 1) \
+#define FOR_EACH_INTRINSIC_WASM(F, I) \
+ F(ThrowWasmError, 1, 1) \
+ F(ThrowWasmStackOverflow, 0, 1) \
+ F(WasmExceptionGetElement, 2, 1) \
+ F(WasmExceptionSetElement, 3, 1) \
+ F(WasmExceptionGetTag, 1, 1) \
+ F(WasmGrowMemory, 2, 1) \
+ F(WasmRunInterpreter, 2, 1) \
+ F(WasmStackGuard, 0, 1) \
+ F(WasmThrowCreate, 2, 1) \
+ F(WasmThrowTypeError, 0, 1) \
F(WasmCompileLazy, 2, 1)
-#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
- F(DebugBreakOnBytecode, 1, 2) \
+#define FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
+ F(DebugBreakOnBytecode, 1, 2) \
F(LoadLookupSlotForCall, 1, 2)
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
-#define FOR_EACH_INTRINSIC_IC(F) \
+#define FOR_EACH_INTRINSIC_IC(F, I) \
F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedStoreIC_Miss, 5, 1) \
@@ -599,44 +564,55 @@ namespace internal {
F(CloneObjectIC_Miss, 4, 1) \
F(CloneObjectIC_Slow, 2, 1)
+#define FOR_EACH_INTRINSIC_RETURN_OBJECT_IMPL(F, I) \
+ FOR_EACH_INTRINSIC_ARRAY(F, I) \
+ FOR_EACH_INTRINSIC_ATOMICS(F, I) \
+ FOR_EACH_INTRINSIC_BIGINT(F, I) \
+ FOR_EACH_INTRINSIC_CLASSES(F, I) \
+ FOR_EACH_INTRINSIC_COLLECTIONS(F, I) \
+ FOR_EACH_INTRINSIC_COMPILER(F, I) \
+ FOR_EACH_INTRINSIC_DATE(F, I) \
+ FOR_EACH_INTRINSIC_DEBUG(F, I) \
+ FOR_EACH_INTRINSIC_FORIN(F, I) \
+ FOR_EACH_INTRINSIC_FUNCTION(F, I) \
+ FOR_EACH_INTRINSIC_GENERATOR(F, I) \
+ FOR_EACH_INTRINSIC_IC(F, I) \
+ FOR_EACH_INTRINSIC_INTERNAL(F, I) \
+ FOR_EACH_INTRINSIC_INTERPRETER(F, I) \
+ FOR_EACH_INTRINSIC_INTL(F, I) \
+ FOR_EACH_INTRINSIC_LITERALS(F, I) \
+ FOR_EACH_INTRINSIC_MODULE(F, I) \
+ FOR_EACH_INTRINSIC_NUMBERS(F, I) \
+ FOR_EACH_INTRINSIC_OBJECT(F, I) \
+ FOR_EACH_INTRINSIC_OPERATORS(F, I) \
+ FOR_EACH_INTRINSIC_PROMISE(F, I) \
+ FOR_EACH_INTRINSIC_PROXY(F, I) \
+ FOR_EACH_INTRINSIC_REGEXP(F, I) \
+ FOR_EACH_INTRINSIC_SCOPES(F, I) \
+ FOR_EACH_INTRINSIC_STRINGS(F, I) \
+ FOR_EACH_INTRINSIC_SYMBOL(F, I) \
+ FOR_EACH_INTRINSIC_TEST(F, I) \
+ FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
+ FOR_EACH_INTRINSIC_WASM(F, I)
+
+// Defines the list of all intrinsics, coming in 2 flavors, either returning an
+// object or a pair.
+#define FOR_EACH_INTRINSIC_IMPL(F, I) \
+ FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
+ FOR_EACH_INTRINSIC_RETURN_OBJECT_IMPL(F, I)
+
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
- FOR_EACH_INTRINSIC_ARRAY(F) \
- FOR_EACH_INTRINSIC_ATOMICS(F) \
- FOR_EACH_INTRINSIC_BIGINT(F) \
- FOR_EACH_INTRINSIC_CLASSES(F) \
- FOR_EACH_INTRINSIC_COLLECTIONS(F) \
- FOR_EACH_INTRINSIC_COMPILER(F) \
- FOR_EACH_INTRINSIC_DATE(F) \
- FOR_EACH_INTRINSIC_DEBUG(F) \
- FOR_EACH_INTRINSIC_FORIN(F) \
- FOR_EACH_INTRINSIC_FUNCTION(F) \
- FOR_EACH_INTRINSIC_GENERATOR(F) \
- FOR_EACH_INTRINSIC_IC(F) \
- FOR_EACH_INTRINSIC_INTERNAL(F) \
- FOR_EACH_INTRINSIC_INTERPRETER(F) \
- FOR_EACH_INTRINSIC_INTL(F) \
- FOR_EACH_INTRINSIC_LITERALS(F) \
- FOR_EACH_INTRINSIC_MATHS(F) \
- FOR_EACH_INTRINSIC_MODULE(F) \
- FOR_EACH_INTRINSIC_NUMBERS(F) \
- FOR_EACH_INTRINSIC_OBJECT(F) \
- FOR_EACH_INTRINSIC_OPERATORS(F) \
- FOR_EACH_INTRINSIC_PROMISE(F) \
- FOR_EACH_INTRINSIC_PROXY(F) \
- FOR_EACH_INTRINSIC_REGEXP(F) \
- FOR_EACH_INTRINSIC_SCOPES(F) \
- FOR_EACH_INTRINSIC_STRINGS(F) \
- FOR_EACH_INTRINSIC_SYMBOL(F) \
- FOR_EACH_INTRINSIC_TEST(F) \
- FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- FOR_EACH_INTRINSIC_WASM(F)
-
-// FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
-// either returning an object or a pair.
-#define FOR_EACH_INTRINSIC(F) \
- FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
- FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
+ FOR_EACH_INTRINSIC_RETURN_OBJECT_IMPL(F, F)
+
+#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
+ FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, F)
+
+// The list of all intrinsics, including those that have inline versions, but
+// not the inline versions themselves.
+#define FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC_IMPL(F, F)
+// The list of all inline intrinsics only.
+#define FOR_EACH_INLINE_INTRINSIC(I) FOR_EACH_INTRINSIC_IMPL(NOTHING, I)
#define F(name, nargs, ressize) \
Object* Runtime_##name(int args_length, Object** args_object, \
@@ -652,12 +628,17 @@ class Runtime : public AllStatic {
enum FunctionId : int32_t {
#define F(name, nargs, ressize) k##name,
#define I(name, nargs, ressize) kInline##name,
- FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC(I)
+ FOR_EACH_INTRINSIC(F) FOR_EACH_INLINE_INTRINSIC(I)
#undef I
#undef F
kNumFunctions,
};
+ static constexpr int kNumInlineFunctions =
+#define COUNT(...) +1
+ FOR_EACH_INLINE_INTRINSIC(COUNT);
+#undef COUNT
+
enum IntrinsicType { RUNTIME, INLINE };
// Intrinsic function descriptor.
@@ -680,6 +661,11 @@ class Runtime : public AllStatic {
static const int kNotFound = -1;
+ // Checks whether the runtime function with the given {id} depends on the
+ // "current context", i.e. because it does scoped lookups, or whether it's
+ // fine to just pass any context within the same "native context".
+ static bool NeedsExactContext(FunctionId id);
+
// Checks whether the runtime function with the given {id} never returns
// to it's caller normally, i.e. whether it'll always raise an exception.
// More specifically: The C++ implementation returns the Heap::exception
@@ -704,7 +690,8 @@ class Runtime : public AllStatic {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
- Handle<Object> value, LanguageMode language_mode);
+ Handle<Object> value, LanguageMode language_mode,
+ StoreOrigin store_origin);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
@@ -739,7 +726,7 @@ class RuntimeState {
}
private:
- RuntimeState() {}
+ RuntimeState() = default;
#ifndef V8_INTL_SUPPORT
unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
@@ -781,6 +768,8 @@ enum class OptimizationStatus {
kTopmostFrameIsTurboFanned = 1 << 11,
};
+Smi* SmiLexicographicCompare(Smi* x_value, Smi* y_value);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 6f1cbcd2f9..f25d79ab5a 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -51,6 +51,7 @@
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/s390/assembler-s390-inl.h"
+#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -180,13 +181,14 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
// As such, we require only 1 double word
int64_t facilities[3] = {0L};
+ int16_t reg0;
// LHI sets up GPR0
// STFLE is specified as .insn, as opcode is not recognized.
// We register the instructions kill r0 (LHI) and the CC (STFLE).
asm volatile(
- "lhi 0,2\n"
+ "lhi %%r0,2\n"
".insn s,0xb2b00000,%0\n"
- : "=Q"(facilities)
+ : "=Q"(facilities), "=r"(reg0)
:
: "cc", "r0");
@@ -315,6 +317,13 @@ Operand Operand::EmbeddedNumber(double value) {
return result;
}
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
MemOperand::MemOperand(Register rn, int32_t offset)
: baseRegister(rn), indexRegister(r0), offset_(offset) {}
@@ -322,24 +331,33 @@ MemOperand::MemOperand(Register rx, Register rb, int32_t offset)
: baseRegister(rb), indexRegister(rx), offset_(offset) {}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
Address pc = reinterpret_cast<Address>(buffer_ + request.offset());
switch (request.kind()) {
- case HeapObjectRequest::kHeapNumber:
+ case HeapObjectRequest::kHeapNumber: {
object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
- set_target_address_at(pc, kNullAddress,
- reinterpret_cast<Address>(object.location()),
+ set_target_address_at(pc, kNullAddress, object.address(),
SKIP_ICACHE_FLUSH);
break;
- case HeapObjectRequest::kCodeStub:
+ }
+ case HeapObjectRequest::kCodeStub: {
request.code_stub()->set_isolate(isolate);
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
int index = instr & 0xFFFFFFFF;
UpdateCodeTarget(index, request.code_stub()->GetCode());
break;
+ }
+ case HeapObjectRequest::kStringConstant: {
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ set_target_address_at(pc, kNullAddress,
+ str->AllocateStringConstant(isolate).address());
+ break;
+ }
}
}
}
@@ -794,13 +812,7 @@ void Assembler::dp(uintptr_t data) {
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- if (options().disable_reloc_info_for_patching) return;
- if (RelocInfo::IsNone(rmode) ||
- // Don't record external references unless the heap will be serialized.
- (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code())) {
- return;
- }
+ if (!ShouldRecordRelocInfo(rmode)) return;
DeferredRelocInfo rinfo(pc_offset(), rmode, data);
relocations_.push_back(rinfo);
}
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 9e0a9ab32f..8e494543f8 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -346,7 +346,7 @@ C_REGISTERS(DECLARE_C_REGISTER)
// Class Operand represents a shifter operand in data processing instructions
// defining immediate numbers and masks
-class Operand BASE_EMBEDDED {
+class Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
@@ -368,6 +368,7 @@ class Operand BASE_EMBEDDED {
V8_INLINE explicit Operand(Register rm);
static Operand EmbeddedNumber(double value); // Smi or HeapNumber
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Return true if this is a register operand.
V8_INLINE bool is_reg() const { return rm_.is_valid(); }
@@ -424,7 +425,7 @@ typedef int32_t Disp;
// 1) a base register + 16 bit unsigned displacement
// 2) a base register + index register + 16 bit unsigned displacement
// 3) a base register + index register + 20 bit signed displacement
-class MemOperand BASE_EMBEDDED {
+class MemOperand {
public:
explicit MemOperand(Register rx, Disp offset = 0);
explicit MemOperand(Register rx, Register rb, Disp offset = 0);
@@ -1663,7 +1664,7 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
friend class EnsureSpace;
};
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index c06a3f636a..9a8111ffcf 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -134,7 +134,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
IsolateAddressId::kPendingExceptionAddress, isolate())));
__ StoreP(r2, MemOperand(ip));
- __ LoadRoot(r2, Heap::kExceptionRootIndex);
+ __ LoadRoot(r2, RootIndex::kException);
__ b(&exit, Label::kNear);
// Invoke: Link this frame into the handler chain.
@@ -475,7 +475,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ Move(r7, ExternalReference::scheduled_exception_address(isolate));
__ LoadP(r7, MemOperand(r7));
- __ CompareRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r7, RootIndex::kTheHoleValue);
__ bne(&promote_scheduled_exception, Label::kNear);
__ b(r14);
@@ -523,13 +523,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
// new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// call data
__ push(call_data);
Register scratch = call_data;
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
// return value
__ push(scratch);
// return value default
@@ -609,7 +609,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Push data from AccessorInfo.
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ push(scratch);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index cae0cf60e6..00342955e6 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -7,7 +7,6 @@
#include <memory>
#include "src/codegen.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/s390/simulator-s390.h"
@@ -16,16 +15,17 @@ namespace internal {
#define __ masm.
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
return nullptr;
#else
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
__ MovFromFloatParameter(d0);
__ sqdbr(d0, d0);
@@ -33,13 +33,14 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 4b30bc0547..dee5452ea2 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -87,9 +87,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments (on the stack, not including receiver)
// r3 : the target to call
- // r4 : arguments list (FixedArray)
// r6 : arguments list length (untagged)
- Register registers[] = {r3, r2, r4, r6};
+ // r4 : arguments list (FixedArray)
+ Register registers[] = {r3, r2, r6, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -124,9 +124,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// r2 : number of arguments (on the stack, not including receiver)
// r3 : the target to call
// r5 : the new target
- // r4 : arguments list (FixedArray)
// r6 : arguments list length (untagged)
- Register registers[] = {r3, r5, r2, r4, r6};
+ // r4 : arguments list (FixedArray)
+ Register registers[] = {r3, r5, r2, r6, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -192,7 +192,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // JSFunction
@@ -236,10 +236,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (not including receiver)
- r5, // new target
+ r6, // address of the first argument
r3, // constructor to call
+ r5, // new target
r4, // allocation site feedback if available, undefined otherwise
- r6 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 3ddcd9fd9b..1a047e3eba 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -123,14 +123,14 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
+ RootIndex::kBuiltinsConstantsTable));
const uint32_t offset =
FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
- LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
LoadP(destination, MemOperand(destination, offset), r1);
}
@@ -429,7 +429,7 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
AddP(location, location, Operand(stack_offset));
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition) {
LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
}
@@ -514,8 +514,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -527,7 +525,6 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter);
Pop(object_parameter);
- Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -1388,7 +1385,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ LoadRoot(r5, RootIndex::kUndefinedValue);
}
Label done;
@@ -1515,7 +1512,7 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
CmpP(type_reg, Operand(type));
}
-void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
CmpP(obj, MemOperand(kRootRegister, RootRegisterOffset(index)));
}
@@ -1839,7 +1836,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
- CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ CompareRoot(object, RootIndex::kUndefinedValue);
beq(&done_checking, Label::kNear);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
@@ -2882,6 +2879,12 @@ void TurboAssembler::LoadAndSub32(Register dst, Register src,
laa(dst, dst, opnd);
}
+void TurboAssembler::LoadAndSub64(Register dst, Register src,
+ const MemOperand& opnd) {
+ lcgr(dst, src);
+ laag(dst, dst, opnd);
+}
+
//----------------------------------------------------------------------------
// Subtract Logical Instructions
//----------------------------------------------------------------------------
@@ -3372,6 +3375,12 @@ void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
}
}
+void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
+ const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ csg(old_val, new_val, opnd);
+}
+
//-----------------------------------------------------------------------------
// Compare Logical Helpers
//-----------------------------------------------------------------------------
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 54aecf5896..98f8bb6e03 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -52,11 +52,6 @@ inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
return MemOperand(object, index, offset - kHeapObjectTag);
}
-// Generate a MemOperand for loading a field from Root register
-inline MemOperand RootMemOperand(Heap::RootListIndex index) {
- return MemOperand(kRootRegister, index << kPointerSizeLog2);
-}
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -153,6 +148,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -255,11 +253,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register exclusion3 = no_reg);
// Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index) override {
+ void LoadRoot(Register destination, RootIndex index) override {
LoadRoot(destination, index, al);
}
- void LoadRoot(Register destination, Heap::RootListIndex index,
- Condition cond);
+ void LoadRoot(Register destination, RootIndex index, Condition cond);
//--------------------------------------------------------------------------
// S390 Macro Assemblers for Instructions
//--------------------------------------------------------------------------
@@ -328,6 +325,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SubP(Register dst, const MemOperand& opnd);
void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
+ void LoadAndSub64(Register dst, Register src, const MemOperand& opnd);
// Subtract Logical (Register - Mem)
void SubLogical(Register dst, const MemOperand& opnd);
@@ -395,6 +393,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Cmp32(Register dst, const MemOperand& opnd);
void CmpP(Register dst, const MemOperand& opnd);
void CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd);
+ void CmpAndSwap64(Register old_val, Register new_val, const MemOperand& opnd);
// Compare Logical
void CmpLogical32(Register src1, Register src2);
@@ -1027,10 +1026,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -1082,8 +1085,8 @@ class MacroAssembler : public TurboAssembler {
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
- void CompareRoot(Register obj, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index) {
+ void CompareRoot(Register obj, RootIndex index);
+ void PushRoot(RootIndex index) {
LoadRoot(r0, index);
Push(r0);
}
@@ -1096,14 +1099,13 @@ class MacroAssembler : public TurboAssembler {
void JumpToInstructionStream(Address entry);
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
CompareRoot(with, index);
beq(if_equal);
}
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal) {
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
CompareRoot(with, index);
bne(if_not_equal);
}
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 0fec28b69e..e6761ca610 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -1369,6 +1369,7 @@ void Simulator::EvalTableInit() {
EvalTable[SRLG] = &Simulator::Evaluate_SRLG;
EvalTable[SLLG] = &Simulator::Evaluate_SLLG;
EvalTable[CSY] = &Simulator::Evaluate_CSY;
+ EvalTable[CSG] = &Simulator::Evaluate_CSG;
EvalTable[RLLG] = &Simulator::Evaluate_RLLG;
EvalTable[RLL] = &Simulator::Evaluate_RLL;
EvalTable[STMG] = &Simulator::Evaluate_STMG;
@@ -8778,9 +8779,26 @@ EVALUATE(CSY) {
}
EVALUATE(CSG) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(CSG);
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ int32_t offset = d2;
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t target_addr = static_cast<intptr_t>(b2_val) + offset;
+
+ int64_t r1_val = get_register(r1);
+ int64_t r3_val = get_register(r3);
+
+ DCHECK_EQ(target_addr & 0x3, 0);
+ bool is_success = __atomic_compare_exchange_n(
+ reinterpret_cast<int64_t*>(target_addr), &r1_val, r3_val, true,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ if (!is_success) {
+ set_register(r1, r1_val);
+ condition_reg_ = 0x4;
+ } else {
+ condition_reg_ = 0x8;
+ }
+ return length;
}
EVALUATE(RLLG) {
@@ -9153,28 +9171,38 @@ EVALUATE(STOCG) {
return 0;
}
+#define ATOMIC_LOAD_AND_UPDATE_WORD64(op) \
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2); \
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2); \
+ intptr_t addr = static_cast<intptr_t>(b2_val) + d2; \
+ int64_t r3_val = get_register(r3); \
+ DCHECK_EQ(addr & 0x3, 0); \
+ int64_t r1_val = \
+ op(reinterpret_cast<int64_t*>(addr), r3_val, __ATOMIC_SEQ_CST); \
+ set_register(r1, r1_val);
+
EVALUATE(LANG) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LANG);
+ ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_and);
+ return length;
}
EVALUATE(LAOG) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LAOG);
+ ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_or);
+ return length;
}
EVALUATE(LAXG) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LAXG);
+ ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_xor);
+ return length;
}
EVALUATE(LAAG) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LAAG);
+ ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_add);
+ return length;
}
EVALUATE(LAALG) {
@@ -9183,6 +9211,8 @@ EVALUATE(LAALG) {
return 0;
}
+#undef ATOMIC_LOAD_AND_UPDATE_WORD64
+
EVALUATE(LOC) {
UNIMPLEMENTED();
USE(instr);
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index e85a27fcb3..475b4a80b1 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -17,7 +17,7 @@ namespace internal {
class Register;
-class SafepointEntry BASE_EMBEDDED {
+class SafepointEntry {
public:
SafepointEntry() : info_(0), bits_(nullptr), trampoline_pc_(-1) {}
@@ -87,8 +87,7 @@ class SafepointEntry BASE_EMBEDDED {
int trampoline_pc_;
};
-
-class SafepointTable BASE_EMBEDDED {
+class SafepointTable {
public:
explicit SafepointTable(Code* code);
explicit SafepointTable(Address instruction_start,
@@ -171,8 +170,7 @@ class SafepointTable BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SafepointTable);
};
-
-class Safepoint BASE_EMBEDDED {
+class Safepoint {
public:
typedef enum {
kSimple = 0,
@@ -201,8 +199,7 @@ class Safepoint BASE_EMBEDDED {
friend class SafepointTableBuilder;
};
-
-class SafepointTableBuilder BASE_EMBEDDED {
+class SafepointTableBuilder {
public:
explicit SafepointTableBuilder(Zone* zone)
: deoptimization_info_(zone),
diff --git a/deps/v8/src/setup-isolate-deserialize.cc b/deps/v8/src/setup-isolate-deserialize.cc
index a99d735af6..fdfaa7e188 100644
--- a/deps/v8/src/setup-isolate-deserialize.cc
+++ b/deps/v8/src/setup-isolate-deserialize.cc
@@ -17,19 +17,6 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
// No actual work to be done; builtins will be deserialized from the snapshot.
}
-void SetupIsolateDelegate::SetupInterpreter(
- interpreter::Interpreter* interpreter) {
-#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
- if (FLAG_perf_prof_unwinding_info) {
- StdoutStream{}
- << "Warning: The --perf-prof-unwinding-info flag can be passed at "
- "mksnapshot time to get better results."
- << std::endl;
- }
-#endif
- CHECK(interpreter->IsDispatchTableInitialized());
-}
-
bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
CHECK(!create_heap_objects_);
// No actual work to be done; heap will be deserialized from the snapshot.
diff --git a/deps/v8/src/setup-isolate-full.cc b/deps/v8/src/setup-isolate-full.cc
index c3a367986c..c902f06b30 100644
--- a/deps/v8/src/setup-isolate-full.cc
+++ b/deps/v8/src/setup-isolate-full.cc
@@ -7,7 +7,6 @@
#include "src/base/logging.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
-#include "src/interpreter/setup-interpreter.h"
#include "src/isolate.h"
namespace v8 {
@@ -21,15 +20,6 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
}
}
-void SetupIsolateDelegate::SetupInterpreter(
- interpreter::Interpreter* interpreter) {
- if (create_heap_objects_) {
- interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
- } else {
- CHECK(interpreter->IsDispatchTableInitialized());
- }
-}
-
bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
if (create_heap_objects_) {
return SetupHeapInternal(heap);
diff --git a/deps/v8/src/setup-isolate.h b/deps/v8/src/setup-isolate.h
index 2003caeac9..61dedd6fe8 100644
--- a/deps/v8/src/setup-isolate.h
+++ b/deps/v8/src/setup-isolate.h
@@ -34,12 +34,10 @@ class SetupIsolateDelegate {
public:
explicit SetupIsolateDelegate(bool create_heap_objects)
: create_heap_objects_(create_heap_objects) {}
- virtual ~SetupIsolateDelegate() {}
+ virtual ~SetupIsolateDelegate() = default;
virtual void SetupBuiltins(Isolate* isolate);
- virtual void SetupInterpreter(interpreter::Interpreter* interpreter);
-
virtual bool SetupHeap(Heap* heap);
protected:
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.cc b/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
index 4e3d546fa0..80300c9f1d 100644
--- a/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
@@ -19,16 +19,6 @@ BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
-BuiltinDeserializerAllocator::~BuiltinDeserializerAllocator() {
- delete handler_allocations_;
-}
-
-namespace {
-int HandlerAllocationIndex(int code_object_id) {
- return code_object_id - BuiltinSnapshotUtils::kFirstHandlerIndex;
-}
-} // namespace
-
Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
const int code_object_id = deserializer()->CurrentCodeObjectId();
@@ -39,30 +29,14 @@ Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
RegisterCodeObjectAllocation(code_object_id);
#endif
- if (BSU::IsBuiltinIndex(code_object_id)) {
- Object* obj = isolate()->builtins()->builtin(code_object_id);
- DCHECK(Internals::HasHeapObjectTag(obj));
- return HeapObject::cast(obj)->address();
- } else if (BSU::IsHandlerIndex(code_object_id)) {
- if (handler_allocation_ != kNullAddress) {
- // Lazy deserialization.
- DCHECK_NULL(handler_allocations_);
- return handler_allocation_;
- } else {
- // Eager deserialization.
- DCHECK_EQ(kNullAddress, handler_allocation_);
- DCHECK_NOT_NULL(handler_allocations_);
- int index = HandlerAllocationIndex(code_object_id);
- DCHECK_NE(kNullAddress, handler_allocations_->at(index));
- return handler_allocations_->at(index);
- }
- }
-
- UNREACHABLE();
+ DCHECK(Builtins::IsBuiltinId(code_object_id));
+ Object* obj = isolate()->builtins()->builtin(code_object_id);
+ DCHECK(Internals::HasHeapObjectTag(obj));
+ return HeapObject::cast(obj)->address();
}
Heap::Reservation
-BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
+BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
Heap::Reservation result;
// Reservations for builtins.
@@ -77,7 +51,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (i == Builtins::kDeserializeLazy) continue;
// Skip lazy builtins. These will be replaced by the DeserializeLazy code
@@ -91,28 +65,6 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
- // Reservations for bytecode handlers.
-
- BSU::ForEachBytecode(
- [=, &result](Bytecode bytecode, OperandScale operand_scale) {
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
- // Bytecodes without a handler don't require a reservation.
- return;
- } else if (FLAG_lazy_handler_deserialization &&
- deserializer()->IsLazyDeserializationEnabled() &&
- Bytecodes::IsLazy(bytecode)) {
- // Skip lazy handlers. These will be replaced by the DeserializeLazy
- // code object in InitializeFromReservations and thus require no
- // reserved space.
- return;
- }
-
- const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
- uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
- DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({handler_size, kNullAddress, kNullAddress});
- });
-
return result;
}
@@ -130,26 +82,6 @@ void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
#endif
}
-void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
- const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale) {
- DCHECK_EQ(deserializer()->ExtractCodeObjectSize(
- BSU::BytecodeToIndex(bytecode, operand_scale)),
- chunk.size);
- DCHECK_EQ(chunk.size, chunk.end - chunk.start);
-
- SkipList::Update(chunk.start, chunk.size);
-
- DCHECK_NOT_NULL(handler_allocations_);
- const int index =
- HandlerAllocationIndex(BSU::BytecodeToIndex(bytecode, operand_scale));
- handler_allocations_->at(index) = chunk.start;
-
-#ifdef DEBUG
- RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
-#endif
-}
-
void BuiltinDeserializerAllocator::InitializeFromReservations(
const Heap::Reservation& reservation) {
DCHECK(!AllowHeapAllocation::IsAllowed());
@@ -168,41 +100,18 @@ void BuiltinDeserializerAllocator::InitializeFromReservations(
reservation_index++;
}
- Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
-
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (i == Builtins::kDeserializeLazy) continue;
if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
- builtins->set_builtin(i, deserialize_lazy);
+ builtins->set_builtin(
+ i, builtins->builtin(builtins->LazyDeserializerForBuiltin(i)));
} else {
InitializeBuiltinFromReservation(reservation[reservation_index], i);
reservation_index++;
}
}
- // Initialize interpreter bytecode handler reservations.
-
- DCHECK_NULL(handler_allocations_);
- handler_allocations_ = new std::vector<Address>(BSU::kNumberOfHandlers);
-
- BSU::ForEachBytecode(
- [=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
- // Bytecodes without a handler don't have a reservation.
- return;
- } else if (FLAG_lazy_handler_deserialization &&
- deserializer()->IsLazyDeserializationEnabled() &&
- Bytecodes::IsLazy(bytecode)) {
- // Likewise, bytecodes with lazy handlers don't either.
- return;
- }
-
- InitializeHandlerFromReservation(reservation[reservation_index],
- bytecode, operand_scale);
- reservation_index++;
- });
-
DCHECK_EQ(reservation.size(), reservation_index);
}
@@ -211,9 +120,9 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(isolate()->builtins()->is_initialized());
DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
- DCHECK_EQ(Builtins::kDeserializeLazy,
- isolate()->builtins()->builtin(builtin_id)->builtin_index());
+ DCHECK(!Builtins::IsLazyDeserializer(builtin_id));
+ DCHECK(Builtins::IsLazyDeserializer(
+ isolate()->builtins()->builtin(builtin_id)->builtin_index()));
const uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(builtin_id);
@@ -236,28 +145,6 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
#endif
}
-void BuiltinDeserializerAllocator::ReserveForHandler(
- Bytecode bytecode, OperandScale operand_scale) {
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
-
- const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
- const uint32_t handler_size =
- deserializer()->ExtractCodeObjectSize(code_object_id);
- DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
-
- handler_allocation_ =
- isolate()->factory()->NewCodeForDeserialization(handler_size)->address();
-
-// Note: After this point and until deserialization finishes, heap allocation
-// is disallowed. We currently can't safely assert this since we'd need to
-// pass the DisallowHeapAllocation scope out of this function.
-
-#ifdef DEBUG
- RegisterCodeObjectReservation(code_object_id);
-#endif
-}
-
#ifdef DEBUG
void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
int code_object_id) {
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.h b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
index 65c5872d7a..b606eb2749 100644
--- a/deps/v8/src/snapshot/builtin-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
@@ -30,8 +30,6 @@ class BuiltinDeserializerAllocator final {
BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer);
- ~BuiltinDeserializerAllocator();
-
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
@@ -42,13 +40,10 @@ class BuiltinDeserializerAllocator final {
// deserialization) in order to avoid having to patch builtin references
// later on. See also the kBuiltin case in deserializer.cc.
//
- // There are three ways that we use to reserve / allocate space. In all
- // cases, required objects are requested from the GC prior to
- // deserialization. 1. pre-allocated builtin code objects are written into
- // the builtins table (this is to make deserialization of builtin references
- // easier). Pre-allocated handler code objects are 2. stored in the
- // {handler_allocations_} vector (at eager-deserialization time) and 3.
- // stored in {handler_allocation_} (at lazy-deserialization time).
+ // There is one way that we use to reserve / allocate space. Required objects
+ // are requested from the GC prior to deserialization. Pre-allocated builtin
+ // code objects are written into the builtins table (this is to make
+ // deserialization of builtin references easier).
//
// Allocate simply returns the pre-allocated object prepared by
// InitializeFromReservations.
@@ -83,23 +78,19 @@ class BuiltinDeserializerAllocator final {
// Builtin deserialization does not bake reservations into the snapshot, hence
// this is a nop.
- void DecodeReservation(std::vector<SerializedData::Reservation> res) {}
+ void DecodeReservation(const std::vector<SerializedData::Reservation>& res) {}
// These methods are used to pre-allocate builtin objects prior to
// deserialization.
// TODO(jgruber): Refactor reservation/allocation logic in deserializers to
// make this less messy.
- Heap::Reservation CreateReservationsForEagerBuiltinsAndHandlers();
+ Heap::Reservation CreateReservationsForEagerBuiltins();
void InitializeFromReservations(const Heap::Reservation& reservation);
// Creates reservations and initializes the builtins table in preparation for
// lazily deserializing a single builtin.
void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
- // Pre-allocates a code object preparation for lazily deserializing a single
- // handler.
- void ReserveForHandler(Bytecode bytecode, OperandScale operand_scale);
-
#ifdef DEBUG
bool ReservationsAreFullyUsed() const;
#endif
@@ -113,11 +104,6 @@ class BuiltinDeserializerAllocator final {
void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
int builtin_id);
- // As above, but for interpreter bytecode handlers.
- void InitializeHandlerFromReservation(
- const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale);
-
#ifdef DEBUG
void RegisterCodeObjectReservation(int code_object_id);
void RegisterCodeObjectAllocation(int code_object_id);
@@ -130,13 +116,6 @@ class BuiltinDeserializerAllocator final {
// construction since that makes vtable-based checks fail.
Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
- // Stores allocated space for bytecode handlers during eager deserialization.
- std::vector<Address>* handler_allocations_ = nullptr;
-
- // Stores the allocated space for a single handler during lazy
- // deserialization.
- Address handler_allocation_ = kNullAddress;
-
bool next_reference_is_weak_ = false;
DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
index 0e32844ba0..136b74b26e 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -42,24 +42,24 @@ BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
const BuiltinSnapshotData* data)
: Deserializer(data, false) {
code_offsets_ = data->BuiltinOffsets();
- DCHECK_EQ(BSU::kNumberOfCodeObjects, code_offsets_.length());
+ DCHECK_EQ(Builtins::builtin_count, code_offsets_.length());
DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
Initialize(isolate);
}
-void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
+void BuiltinDeserializer::DeserializeEagerBuiltins() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
// Deserialize builtins.
Builtins* builtins = isolate()->builtins();
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
// InitializeFromReservations.
- DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
+ DCHECK_EQ(builtins->builtin(builtins->LazyDeserializerForBuiltin(i)),
builtins->builtin(i));
} else {
builtins->set_builtin(i, DeserializeBuiltinRaw(i));
@@ -67,7 +67,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
}
#ifdef DEBUG
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
Object* o = builtins->builtin(i);
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
}
@@ -77,7 +77,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
if (FLAG_print_builtin_code) {
// We can't print builtins during deserialization because they may refer
// to not yet deserialized builtins.
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
if (!IsLazyDeserializationEnabled() || !Builtins::IsLazy(i)) {
Code* code = builtins->builtin(i);
const char* name = Builtins::name(i);
@@ -86,38 +86,6 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
}
}
#endif
-
- // Deserialize bytecode handlers.
-
- Interpreter* interpreter = isolate()->interpreter();
- DCHECK(!isolate()->interpreter()->IsDispatchTableInitialized());
-
- BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
- // Bytecodes without a dedicated handler are patched up in a second pass.
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
-
- // If lazy-deserialization is enabled and the current bytecode is lazy,
- // we write the generic LazyDeserialization handler into the dispatch table
- // and deserialize later upon first use.
- Code* code = (FLAG_lazy_handler_deserialization &&
- IsLazyDeserializationEnabled() && Bytecodes::IsLazy(bytecode))
- ? GetDeserializeLazyHandler(operand_scale)
- : DeserializeHandlerRaw(bytecode, operand_scale);
-
- interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
- });
-
- // Patch up holes in the dispatch table.
-
- Code* illegal_handler = interpreter->GetBytecodeHandler(
- Bytecode::kIllegal, OperandScale::kSingle);
-
- BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
- if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
- interpreter->SetBytecodeHandler(bytecode, operand_scale, illegal_handler);
- });
-
- DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
}
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
@@ -135,13 +103,6 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
return code;
}
-Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
- OperandScale operand_scale) {
- allocator()->ReserveForHandler(bytecode, operand_scale);
- DisallowHeapAllocation no_gc;
- return DeserializeHandlerRaw(bytecode, operand_scale);
-}
-
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
@@ -162,8 +123,19 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
Assembler::FlushICache(code->raw_instruction_start(),
code->raw_instruction_size());
- PROFILE(isolate(), CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
- AbstractCode::cast(code),
+ CodeEventListener::LogEventsAndTags code_tag;
+ switch (code->kind()) {
+ case AbstractCode::BUILTIN:
+ code_tag = CodeEventListener::BUILTIN_TAG;
+ break;
+ case AbstractCode::BYTECODE_HANDLER:
+ code_tag = CodeEventListener::BYTECODE_HANDLER_TAG;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ PROFILE(isolate(), CodeCreateEvent(code_tag, AbstractCode::cast(code),
Builtins::name(builtin_id)));
LOG_CODE_EVENT(isolate(),
CodeLinePosInfoRecordEvent(
@@ -172,42 +144,8 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
return code;
}
-Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
- OperandScale operand_scale) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
-
- const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
- DeserializingCodeObjectScope scope(this, code_object_id);
-
- const int initial_position = source()->position();
- source()->set_position(code_offsets_[code_object_id]);
-
- Object* o = ReadDataSingle();
- DCHECK(o->IsCode() && Code::cast(o)->kind() == Code::BYTECODE_HANDLER);
-
- // Rewind.
- source()->set_position(initial_position);
-
- // Flush the instruction cache.
- Code* code = Code::cast(o);
- Assembler::FlushICache(code->raw_instruction_start(),
- code->raw_instruction_size());
-
- std::string name = Bytecodes::ToString(bytecode, operand_scale);
- PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
- AbstractCode::cast(code), name.c_str()));
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- code->PrintBuiltinCode(isolate(), name.c_str());
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
-}
-
uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
- DCHECK_LT(code_object_id, BSU::kNumberOfCodeObjects);
+ DCHECK_LT(code_object_id, Builtins::builtin_count);
const int initial_position = source()->position();
@@ -225,20 +163,5 @@ uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
return result;
}
-Code* BuiltinDeserializer::GetDeserializeLazyHandler(
- interpreter::OperandScale operand_scale) const {
- STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
- switch (operand_scale) {
- case OperandScale::kSingle:
- return Code::cast(isolate()->heap()->deserialize_lazy_handler());
- case OperandScale::kDouble:
- return Code::cast(isolate()->heap()->deserialize_lazy_handler_wide());
- case OperandScale::kQuadruple:
- return Code::cast(
- isolate()->heap()->deserialize_lazy_handler_extra_wide());
- }
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-deserializer.h b/deps/v8/src/snapshot/builtin-deserializer.h
index 1ae49686b8..e77598db68 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.h
+++ b/deps/v8/src/snapshot/builtin-deserializer.h
@@ -7,7 +7,6 @@
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
-#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
@@ -32,25 +31,17 @@ class BuiltinDeserializer final
//
// After this, the instruction cache must be flushed by the caller (we don't
// do it ourselves since the startup serializer batch-flushes all code pages).
- void DeserializeEagerBuiltinsAndHandlers();
+ void DeserializeEagerBuiltins();
// Deserializes the single given builtin. This is used whenever a builtin is
// lazily deserialized at runtime.
Code* DeserializeBuiltin(int builtin_id);
- // Deserializes the single given handler. This is used whenever a handler is
- // lazily deserialized at runtime.
- Code* DeserializeHandler(Bytecode bytecode, OperandScale operand_scale);
-
private:
// Deserializes the single given builtin. Assumes that reservations have
// already been allocated.
Code* DeserializeBuiltinRaw(int builtin_id);
- // Deserializes the single given bytecode handler. Assumes that reservations
- // have already been allocated.
- Code* DeserializeHandlerRaw(Bytecode bytecode, OperandScale operand_scale);
-
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractCodeObjectSize(int builtin_id);
diff --git a/deps/v8/src/snapshot/builtin-serializer.cc b/deps/v8/src/snapshot/builtin-serializer.cc
index 0109a85b6b..6c71606b2e 100644
--- a/deps/v8/src/snapshot/builtin-serializer.cc
+++ b/deps/v8/src/snapshot/builtin-serializer.cc
@@ -26,42 +26,24 @@ BuiltinSerializer::~BuiltinSerializer() {
void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
// Serialize builtins.
- STATIC_ASSERT(0 == BSU::kFirstBuiltinIndex);
-
- for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code* code = isolate()->builtins()->builtin(i);
+ DCHECK_IMPLIES(Builtins::IsLazyDeserializer(code),
+ Builtins::IsLazyDeserializer(i));
SetBuiltinOffset(i, sink_.Position());
- SerializeBuiltin(isolate()->builtins()->builtin(i));
+ SerializeBuiltin(code);
}
- // Serialize bytecode handlers.
-
- STATIC_ASSERT(BSU::kNumberOfBuiltins == BSU::kFirstHandlerIndex);
-
- BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
- SetHandlerOffset(bytecode, operand_scale, sink_.Position());
- if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
-
- SerializeHandler(
- isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
- });
-
- STATIC_ASSERT(BSU::kFirstHandlerIndex + BSU::kNumberOfHandlers ==
- BSU::kNumberOfCodeObjects);
-
- // The DeserializeLazy handlers are serialized by the StartupSerializer
- // during strong root iteration.
-
- DCHECK(isolate()->heap()->deserialize_lazy_handler()->IsCode());
- DCHECK(isolate()->heap()->deserialize_lazy_handler_wide()->IsCode());
- DCHECK(isolate()->heap()->deserialize_lazy_handler_extra_wide()->IsCode());
+ // Append the offset table. During deserialization, the offset table is
+ // extracted by BuiltinSnapshotData.
+ const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
+ int data_length = static_cast<int>(sizeof(code_offsets_));
// Pad with kNop since GetInt() might read too far.
- Pad();
+ Pad(data_length);
// Append the offset table. During deserialization, the offset table is
// extracted by BuiltinSnapshotData.
- const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
- int data_length = static_cast<int>(sizeof(code_offsets_));
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
@@ -83,20 +65,13 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
object_serializer.Serialize();
}
-void BuiltinSerializer::SerializeHandler(Code* code) {
- DCHECK(ObjectIsBytecodeHandler(code));
- ObjectSerializer object_serializer(this, code, &sink_, kPlain,
- kStartOfObject);
- object_serializer.Serialize();
-}
-
void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!o->IsSmi());
// Roots can simply be serialized as root references.
- int root_index = root_index_map()->Lookup(o);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ RootIndex root_index;
+ if (root_index_map()->Lookup(o, &root_index)) {
DCHECK(startup_serializer_->root_has_been_serialized(root_index));
PutRoot(root_index, o, how_to_code, where_to_point, skip);
return;
@@ -115,8 +90,8 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
// * Strings: CSA_ASSERTs in debug builds, various other string constants.
// * HeapNumbers: Embedded constants.
// TODO(6624): Jump targets should never trigger content serialization, it
- // should always result in a reference instead. Reloc infos and handler
- // tables should not end up in the partial snapshot cache.
+ // should always result in a reference instead. Reloc infos and handler tables
+ // should not end up in the partial snapshot cache.
FlushSkip(skip);
@@ -128,17 +103,8 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK(BSU::IsBuiltinIndex(builtin_id));
code_offsets_[builtin_id] = offset;
}
-void BuiltinSerializer::SetHandlerOffset(Bytecode bytecode,
- OperandScale operand_scale,
- uint32_t offset) {
- const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
- DCHECK(BSU::IsHandlerIndex(index));
- code_offsets_[index] = offset;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer.h b/deps/v8/src/snapshot/builtin-serializer.h
index abc8be74e5..132aa0894b 100644
--- a/deps/v8/src/snapshot/builtin-serializer.h
+++ b/deps/v8/src/snapshot/builtin-serializer.h
@@ -5,9 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
+#include "src/builtins/builtins.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-serializer-allocator.h"
-#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/serializer.h"
namespace v8 {
@@ -15,12 +15,10 @@ namespace internal {
class StartupSerializer;
-// Responsible for serializing builtin and bytecode handler objects during
-// startup snapshot creation into a dedicated area of the snapshot.
+// Responsible for serializing builtin objects during startup snapshot creation
+// into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
- using BSU = BuiltinSnapshotUtils;
-
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
@@ -32,7 +30,6 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
Object** end) override;
void SerializeBuiltin(Code* code);
- void SerializeHandler(Code* code);
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
@@ -47,14 +44,11 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
// Stores the starting offset, within the serialized data, of each code
// object. This is later packed into the builtin snapshot, and used by the
- // builtin deserializer to deserialize individual builtins and bytecode
- // handlers.
+ // builtin deserializer to deserialize individual builtins.
//
// Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
// Builtin offsets.
- // Indices [kFirstHandlerIndex, kFirstHandlerIndex + kNumberOfHandlers[:
- // Bytecode handler offsets.
- uint32_t code_offsets_[BuiltinSnapshotUtils::kNumberOfCodeObjects];
+ uint32_t code_offsets_[Builtins::builtin_count];
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
};
diff --git a/deps/v8/src/snapshot/builtin-snapshot-utils.cc b/deps/v8/src/snapshot/builtin-snapshot-utils.cc
deleted file mode 100644
index e32a857c0b..0000000000
--- a/deps/v8/src/snapshot/builtin-snapshot-utils.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/builtin-snapshot-utils.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-bool BuiltinSnapshotUtils::IsBuiltinIndex(int maybe_index) {
- return (kFirstBuiltinIndex <= maybe_index &&
- maybe_index < kFirstBuiltinIndex + kNumberOfBuiltins);
-}
-
-// static
-bool BuiltinSnapshotUtils::IsHandlerIndex(int maybe_index) {
- return (kFirstHandlerIndex <= maybe_index &&
- maybe_index < kFirstHandlerIndex + kNumberOfHandlers);
-}
-
-// static
-int BuiltinSnapshotUtils::BytecodeToIndex(Bytecode bytecode,
- OperandScale operand_scale) {
- int index =
- BuiltinSnapshotUtils::kNumberOfBuiltins + static_cast<int>(bytecode);
- switch (operand_scale) { // clang-format off
- case OperandScale::kSingle: return index;
- case OperandScale::kDouble: return index + Bytecodes::kBytecodeCount;
- case OperandScale::kQuadruple: return index + 2 * Bytecodes::kBytecodeCount;
- } // clang-format on
- UNREACHABLE();
-}
-
-// static
-std::pair<interpreter::Bytecode, interpreter::OperandScale>
-BuiltinSnapshotUtils::BytecodeFromIndex(int index) {
- DCHECK(IsHandlerIndex(index));
-
- const int x = index - BuiltinSnapshotUtils::kNumberOfBuiltins;
- Bytecode bytecode = Bytecodes::FromByte(x % Bytecodes::kBytecodeCount);
- switch (x / Bytecodes::kBytecodeCount) { // clang-format off
- case 0: return {bytecode, OperandScale::kSingle};
- case 1: return {bytecode, OperandScale::kDouble};
- case 2: return {bytecode, OperandScale::kQuadruple};
- default: UNREACHABLE();
- } // clang-format on
-}
-
-// static
-void BuiltinSnapshotUtils::ForEachBytecode(
- std::function<void(Bytecode, OperandScale)> f) {
- static const OperandScale kOperandScales[] = {
-#define VALUE(Name, _) OperandScale::k##Name,
- OPERAND_SCALE_LIST(VALUE)
-#undef VALUE
- };
-
- for (OperandScale operand_scale : kOperandScales) {
- for (int i = 0; i < Bytecodes::kBytecodeCount; i++) {
- f(Bytecodes::FromByte(i), operand_scale);
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-snapshot-utils.h b/deps/v8/src/snapshot/builtin-snapshot-utils.h
deleted file mode 100644
index 587b4a35b0..0000000000
--- a/deps/v8/src/snapshot/builtin-snapshot-utils.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
-#define V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
-
-#include <functional>
-
-#include "src/interpreter/interpreter.h"
-
-namespace v8 {
-namespace internal {
-
-// Constants and utility methods used by builtin and bytecode handler
-// (de)serialization.
-class BuiltinSnapshotUtils : public AllStatic {
- using Bytecode = interpreter::Bytecode;
- using BytecodeOperands = interpreter::BytecodeOperands;
- using Bytecodes = interpreter::Bytecodes;
- using Interpreter = interpreter::Interpreter;
- using OperandScale = interpreter::OperandScale;
-
- public:
- static const int kFirstBuiltinIndex = 0;
- static const int kNumberOfBuiltins = Builtins::builtin_count;
-
- static const int kFirstHandlerIndex = kFirstBuiltinIndex + kNumberOfBuiltins;
- static const int kNumberOfHandlers =
- Bytecodes::kBytecodeCount * BytecodeOperands::kOperandScaleCount;
-
- // The number of code objects in the builtin snapshot.
- // TODO(jgruber): This could be reduced by a bit since not every
- // {bytecode, operand_scale} combination has an associated handler
- // (see Bytecodes::BytecodeHasHandler).
- static const int kNumberOfCodeObjects = kNumberOfBuiltins + kNumberOfHandlers;
-
- // Indexes into the offsets vector contained in snapshot.
- // See e.g. BuiltinSerializer::code_offsets_.
- static bool IsBuiltinIndex(int maybe_index);
- static bool IsHandlerIndex(int maybe_index);
- static int BytecodeToIndex(Bytecode bytecode, OperandScale operand_scale);
-
- // Converts an index back into the {bytecode,operand_scale} tuple. This is the
- // inverse operation of BytecodeToIndex().
- static std::pair<Bytecode, OperandScale> BytecodeFromIndex(int index);
-
- // Iteration over all {bytecode,operand_scale} pairs. Implemented here since
- // (de)serialization depends on the iteration order.
- static void ForEachBytecode(std::function<void(Bytecode, OperandScale)> f);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 5db7cae94b..b463ca2047 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -4,8 +4,6 @@
#include "src/snapshot/code-serializer.h"
-#include <memory>
-
#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -126,8 +124,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map()->Lookup(obj);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ RootIndex root_index;
+ if (root_index_map()->Lookup(obj, &root_index)) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
}
@@ -336,44 +334,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
return scope.CloseAndEscape(result);
}
-class Checksum {
- public:
- explicit Checksum(Vector<const byte> payload) {
-#ifdef MEMORY_SANITIZER
- // Computing the checksum includes padding bytes for objects like strings.
- // Mark every object as initialized in the code serializer.
- MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
-#endif // MEMORY_SANITIZER
- // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
- uintptr_t a = 1;
- uintptr_t b = 0;
- const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
- DCHECK(IsAligned(payload.length(), kIntptrSize));
- const uintptr_t* end = cur + payload.length() / kIntptrSize;
- while (cur < end) {
- // Unsigned overflow expected and intended.
- a += *cur++;
- b += a;
- }
-#if V8_HOST_ARCH_64_BIT
- a ^= a >> 32;
- b ^= b >> 32;
-#endif // V8_HOST_ARCH_64_BIT
- a_ = static_cast<uint32_t>(a);
- b_ = static_cast<uint32_t>(b);
- }
-
- bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
-
- uint32_t a() const { return a_; }
- uint32_t b() const { return b_; }
-
- private:
- uint32_t a_;
- uint32_t b_;
-
- DISALLOW_COPY_AND_ASSIGN(Checksum);
-};
SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
@@ -390,10 +350,14 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
uint32_t size =
padded_payload_offset + static_cast<uint32_t>(payload->size());
+ DCHECK(IsAligned(size, kPointerAlignment));
// Allocate backing store and create result data.
AllocateData(size);
+ // Zero out pre-payload data. Part of that is only used for padding.
+ memset(data_, 0, padded_payload_offset);
+
// Set header values.
SetMagicNumber(cs->isolate());
SetHeaderValue(kVersionHashOffset, Version::Hash());
@@ -418,16 +382,13 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
CopyBytes(data_ + kHeaderSize + reservation_size,
reinterpret_cast<const byte*>(stub_keys->data()), stub_keys_size);
- // Zero out any padding before the payload.
- memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
-
// Copy serialized data.
CopyBytes(data_ + padded_payload_offset, payload->data(),
static_cast<size_t>(payload->size()));
- Checksum checksum(DataWithoutHeader());
- SetHeaderValue(kChecksum1Offset, checksum.a());
- SetHeaderValue(kChecksum2Offset, checksum.b());
+ Checksum checksum(ChecksummedContent());
+ SetHeaderValue(kChecksumPartAOffset, checksum.a());
+ SetHeaderValue(kChecksumPartBOffset, checksum.b());
}
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
@@ -440,8 +401,8 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
- uint32_t c1 = GetHeaderValue(kChecksum1Offset);
- uint32_t c2 = GetHeaderValue(kChecksum2Offset);
+ uint32_t c1 = GetHeaderValue(kChecksumPartAOffset);
+ uint32_t c2 = GetHeaderValue(kChecksumPartBOffset);
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
@@ -454,7 +415,7 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
GetHeaderValue(kNumReservationsOffset) * kInt32Size +
GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size);
if (payload_length > max_payload_length) return LENGTH_MISMATCH;
- if (!Checksum(DataWithoutHeader()).Check(c1, c2)) return CHECKSUM_MISMATCH;
+ if (!Checksum(ChecksummedContent()).Check(c1, c2)) return CHECKSUM_MISMATCH;
return CHECK_SUCCESS;
}
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index d1f19ef081..d9b4be9a34 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -110,8 +110,8 @@ class SerializedCodeData : public SerializedData {
// [6] number of code stub keys
// [7] number of reservation size entries
// [8] payload length
- // [9] payload checksum part 1
- // [10] payload checksum part 2
+ // [9] payload checksum part A
+ // [10] payload checksum part B
// ... reservations
// ... code stub keys
// ... serialized payload
@@ -124,9 +124,12 @@ class SerializedCodeData : public SerializedData {
kNumReservationsOffset + kUInt32Size;
static const uint32_t kPayloadLengthOffset =
kNumCodeStubKeysOffset + kUInt32Size;
- static const uint32_t kChecksum1Offset = kPayloadLengthOffset + kUInt32Size;
- static const uint32_t kChecksum2Offset = kChecksum1Offset + kUInt32Size;
- static const uint32_t kUnalignedHeaderSize = kChecksum2Offset + kUInt32Size;
+ static const uint32_t kChecksumPartAOffset =
+ kPayloadLengthOffset + kUInt32Size;
+ static const uint32_t kChecksumPartBOffset =
+ kChecksumPartAOffset + kUInt32Size;
+ static const uint32_t kUnalignedHeaderSize =
+ kChecksumPartBOffset + kUInt32Size;
static const uint32_t kHeaderSize = POINTER_SIZE_ALIGN(kUnalignedHeaderSize);
// Used when consuming.
@@ -155,7 +158,7 @@ class SerializedCodeData : public SerializedData {
SerializedCodeData(const byte* data, int size)
: SerializedData(const_cast<byte*>(data), size) {}
- Vector<const byte> DataWithoutHeader() const {
+ Vector<const byte> ChecksummedContent() const {
return Vector<const byte>(data_ + kHeaderSize, size_ - kHeaderSize);
}
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/default-deserializer-allocator.cc
index 610b87c771..f3afc4d498 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.cc
@@ -121,7 +121,7 @@ HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
}
void DefaultDeserializerAllocator::DecodeReservation(
- std::vector<SerializedData::Reservation> res) {
+ const std::vector<SerializedData::Reservation>& res) {
DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
int current_space = FIRST_SPACE;
for (auto& r : res) {
@@ -167,8 +167,7 @@ bool DefaultDeserializerAllocator::ReserveSpace(
}
Heap::Reservation builtin_reservations =
- builtin_deserializer->allocator()
- ->CreateReservationsForEagerBuiltinsAndHandlers();
+ builtin_deserializer->allocator()->CreateReservationsForEagerBuiltins();
DCHECK(!builtin_reservations.empty());
for (const auto& c : builtin_reservations) {
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.h b/deps/v8/src/snapshot/default-deserializer-allocator.h
index e6a5ba3fdc..4a5758cc5a 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.h
@@ -58,7 +58,7 @@ class DefaultDeserializerAllocator final {
// ------- Reservation Methods -------
// Methods related to memory reservations (prior to deserialization).
- void DecodeReservation(std::vector<SerializedData::Reservation> res);
+ void DecodeReservation(const std::vector<SerializedData::Reservation>& res);
bool ReserveSpace();
// Atomically reserves space for the two given deserializers. Guarantees
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 3ed360e14a..bc5805fb52 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -69,8 +69,7 @@ template <class AllocatorT>
void Deserializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
- // Builtins and bytecode handlers are deserialized in a separate pass by the
- // BuiltinDeserializer.
+ // Builtins are deserialized in a separate pass by the BuiltinDeserializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
// The space must be new space. Any other space would cause ReadChunk to try
@@ -179,18 +178,11 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
}
if (obj->IsAllocationSite()) {
- // Allocation sites are present in the snapshot, and must be linked into
- // a list at deserialization time.
- AllocationSite* site = AllocationSite::cast(obj);
- // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
- // as a (weak) root. If this root is relocated correctly, this becomes
- // unnecessary.
- if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
- site->set_weak_next(ReadOnlyRoots(isolate_).undefined_value());
- } else {
- site->set_weak_next(isolate_->heap()->allocation_sites_list());
- }
- isolate_->heap()->set_allocation_sites_list(site);
+ // We should link new allocation sites, but we can't do this immediately
+ // because |AllocationSite::HasWeakNext()| internally accesses
+ // |Heap::roots_| that may not have been initialized yet. So defer this to
+ // |ObjectDeserializer::CommitPostProcessedObjects()|.
+ new_allocation_sites_.push_back(AllocationSite::cast(obj));
} else if (obj->IsCode()) {
// We flush all code pages after deserializing the startup snapshot. In that
// case, we only need to remember code objects in the large object space.
@@ -209,7 +201,7 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
} else if (obj->IsExternalString()) {
if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
ExternalOneByteString* string = ExternalOneByteString::cast(obj);
- DCHECK(string->is_short());
+ DCHECK(string->is_uncached());
string->SetResource(
isolate_, NativesExternalStringResource::DecodeForDeserialization(
string->resource()));
@@ -225,8 +217,8 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
isolate_->heap()->RegisterExternalString(String::cast(obj));
} else if (obj->IsJSTypedArray()) {
JSTypedArray* typed_array = JSTypedArray::cast(obj);
- CHECK(typed_array->byte_offset()->IsSmi());
- int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
+ CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
+ int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
if (byte_offset > 0) {
FixedTypedArrayBase* elements =
FixedTypedArrayBase::cast(typed_array->elements());
@@ -370,11 +362,7 @@ Object* Deserializer<AllocatorT>::ReadDataSingle() {
Address current_object = kNullAddress;
CHECK(ReadData(start, end, source_space, current_object));
- HeapObject* heap_object;
- bool success = o->ToStrongHeapObject(&heap_object);
- DCHECK(success);
- USE(success);
- return heap_object;
+ return o->GetHeapObjectAssumeStrong();
}
static void NoExternalReferencesCallback() {
@@ -684,7 +672,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
SIXTEEN_CASES(kRootArrayConstants)
SIXTEEN_CASES(kRootArrayConstants + 16) {
int id = data & kRootArrayConstantsMask;
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
+ RootIndex root_index = static_cast<RootIndex>(id);
MaybeObject* object =
MaybeObject::FromObject(isolate->heap()->root(root_index));
DCHECK(!Heap::InNewSpace(object));
@@ -818,7 +806,7 @@ MaybeObject** Deserializer<AllocatorT>::ReadDataCase(
new_object = GetBackReferencedObject(data & kSpaceMask);
} else if (where == kRootArray) {
int id = source_.GetInt();
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
+ RootIndex root_index = static_cast<RootIndex>(id);
new_object = isolate->heap()->root(root_index);
emit_write_barrier = Heap::InNewSpace(new_object);
hot_objects_.Add(HeapObject::cast(new_object));
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index f13bc03fd4..8340a93538 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -15,6 +15,7 @@
namespace v8 {
namespace internal {
+class AllocationSite;
class HeapObject;
class Object;
@@ -71,6 +72,9 @@ class Deserializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; }
+ const std::vector<AllocationSite*>& new_allocation_sites() const {
+ return new_allocation_sites_;
+ }
const std::vector<Code*>& new_code_objects() const {
return new_code_objects_;
}
@@ -148,6 +152,7 @@ class Deserializer : public SerializerDeserializer {
ExternalReferenceTable* external_reference_table_;
+ std::vector<AllocationSite*> new_allocation_sites_;
std::vector<Code*> new_code_objects_;
std::vector<AccessorInfo*> accessor_infos_;
std::vector<CallHandlerInfo*> call_handler_infos_;
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index a2303613d6..09db077694 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -165,10 +165,10 @@ class SnapshotWriter {
// present in the binary.
// For now, the straight-forward solution seems to be to just emit a pure
// .byte stream on OSX.
- WriteBinaryContentsAsByteDirective(fp, blob->data(), blob->size());
+ WriteBinaryContentsAsInlineAssembly(fp, blob->data(), blob->size());
#else
- WriteBinaryContentsAsByteDirective(fp, blob->data(),
- i::EmbeddedData::RawDataOffset());
+ WriteBinaryContentsAsInlineAssembly(fp, blob->data(),
+ i::EmbeddedData::RawDataOffset());
WriteBuiltins(fp, blob, embedded_variant);
#endif
fprintf(fp, "extern \"C\" const uint8_t v8_%s_embedded_blob_[];\n",
@@ -197,7 +197,7 @@ class SnapshotWriter {
embedded_variant, i::Builtins::name(i));
}
- WriteBinaryContentsAsByteDirective(
+ WriteBinaryContentsAsInlineAssembly(
fp,
reinterpret_cast<const uint8_t*>(blob->InstructionStartOfBuiltin(i)),
blob->PaddedInstructionSizeOfBuiltin(i));
@@ -205,34 +205,77 @@ class SnapshotWriter {
fprintf(fp, "\n");
}
- static void WriteBinaryContentsAsByteDirective(FILE* fp, const uint8_t* data,
- uint32_t size) {
- static const int kTextWidth = 80;
- int current_line_length = 0;
- int printed_chars;
+ static int WriteOcta(FILE* fp, int current_line_length, const uint8_t* data) {
+ const uint64_t* quad_ptr1 = reinterpret_cast<const uint64_t*>(data);
+ const uint64_t* quad_ptr2 = reinterpret_cast<const uint64_t*>(data + 8);
- fprintf(fp, "__asm__(\n");
- for (uint32_t i = 0; i < size; i++) {
- if (current_line_length == 0) {
- printed_chars = fprintf(fp, "%s", " \".byte ");
- DCHECK_LT(0, printed_chars);
- current_line_length += printed_chars;
- } else {
- printed_chars = fprintf(fp, ",");
- DCHECK_EQ(1, printed_chars);
- current_line_length += printed_chars;
- }
+#ifdef V8_TARGET_BIG_ENDIAN
+ uint64_t part1 = *quad_ptr1;
+ uint64_t part2 = *quad_ptr2;
+#else
+ uint64_t part1 = *quad_ptr2;
+ uint64_t part2 = *quad_ptr1;
+#endif // V8_TARGET_BIG_ENDIAN
+
+ if (part1 != 0) {
+ current_line_length +=
+ fprintf(fp, "0x%" PRIx64 "%016" PRIx64, part1, part2);
+ } else {
+ current_line_length += fprintf(fp, "0x%" PRIx64, part2);
+ }
+ return current_line_length;
+ }
- printed_chars = fprintf(fp, "0x%02x", data[i]);
+ static int WriteDirectiveOrSeparator(FILE* fp, int current_line_length,
+ const char* directive) {
+ int printed_chars;
+ if (current_line_length == 0) {
+ printed_chars = fprintf(fp, " \"%s ", directive);
DCHECK_LT(0, printed_chars);
- current_line_length += printed_chars;
+ } else {
+ printed_chars = fprintf(fp, ",");
+ DCHECK_EQ(1, printed_chars);
+ }
+ return current_line_length + printed_chars;
+ }
- if (current_line_length + strlen(",0xFF\\n\"") > kTextWidth) {
- fprintf(fp, "\\n\"\n");
- current_line_length = 0;
- }
+ static int WriteLineEndIfNeeded(FILE* fp, int current_line_length,
+ int write_size) {
+ static const int kTextWidth = 80;
+ // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
+ // the actual size of the string to be written to determine this so it's
+ // more conservative than strictly needed.
+ if (current_line_length + strlen(",0x\\n\"") + write_size * 2 >
+ kTextWidth) {
+ fprintf(fp, "\\n\"\n");
+ return 0;
+ } else {
+ return current_line_length;
}
+ }
+
+ static void WriteBinaryContentsAsInlineAssembly(FILE* fp, const uint8_t* data,
+ uint32_t size) {
+ int current_line_length = 0;
+ fprintf(fp, "__asm__(\n");
+ uint32_t i = 0;
+ const uint32_t size_of_octa = 16;
+ for (; i <= size - size_of_octa; i += size_of_octa) {
+ current_line_length =
+ WriteDirectiveOrSeparator(fp, current_line_length, ".octa");
+ current_line_length = WriteOcta(fp, current_line_length, data + i);
+ current_line_length =
+ WriteLineEndIfNeeded(fp, current_line_length, size_of_octa);
+ }
+ if (current_line_length != 0) fprintf(fp, "\\n\"\n");
+ current_line_length = 0;
+ for (; i < size; i++) {
+ current_line_length =
+ WriteDirectiveOrSeparator(fp, current_line_length, ".byte");
+ current_line_length += fprintf(fp, "0x%x", data[i]);
+ current_line_length = WriteLineEndIfNeeded(fp, current_line_length, 1);
+ }
if (current_line_length != 0) fprintf(fp, "\\n\"\n");
fprintf(fp, ");\n");
}
@@ -307,7 +350,7 @@ bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
}
v8::StartupData CreateSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
- const char* script_source = NULL) {
+ const char* script_source = nullptr) {
// Create a new isolate and a new context from scratch, optionally run
// a script to embed, and serialize to create a snapshot blob.
v8::StartupData result = {nullptr, 0};
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index aabc5bf1e0..8935c0ef89 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -90,6 +90,21 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
MaybeObjectHandle::Weak(script));
heap->SetRootScriptList(*list);
}
+
+ // Allocation sites are present in the snapshot, and must be linked into
+ // a list at deserialization time.
+ for (AllocationSite* site : new_allocation_sites()) {
+ if (!site->HasWeakNext()) continue;
+ // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
+ // as a (weak) root. If this root is relocated correctly, this becomes
+ // unnecessary.
+ if (heap->allocation_sites_list() == Smi::kZero) {
+ site->set_weak_next(ReadOnlyRoots(heap).undefined_value());
+ } else {
+ site->set_weak_next(heap->allocation_sites_list());
+ }
+ heap->set_allocation_sites_list(site);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index d127aa5f0a..1f3cbc5521 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -6,6 +6,7 @@
#include "src/snapshot/startup-serializer.h"
#include "src/api-inl.h"
+#include "src/math-random.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -40,8 +41,7 @@ void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
ReadOnlyRoots(isolate()).undefined_value());
DCHECK(!context_->global_object()->IsUndefined());
// Reset math random cache to get fresh random numbers.
- context_->set_math_random_index(Smi::kZero);
- context_->set_math_random_cache(ReadOnlyRoots(isolate()).undefined_value());
+ MathRandom::ResetContext(context_);
VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
reinterpret_cast<Object**>(o));
@@ -59,8 +59,8 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map()->Lookup(obj);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ RootIndex root_index;
+ if (root_index_map()->Lookup(obj, &root_index)) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 34a6b64676..8f547243d6 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/external-reference-table.h"
#include "src/globals.h"
+#include "src/msan.h"
#include "src/snapshot/references.h"
#include "src/v8memory.h"
#include "src/visitors.h"
@@ -39,7 +40,7 @@ class ExternalReferenceEncoder {
};
explicit ExternalReferenceEncoder(Isolate* isolate);
- ~ExternalReferenceEncoder();
+ ~ExternalReferenceEncoder(); // NOLINT (modernize-use-equals-default)
Value Encode(Address key);
Maybe<Value> TryEncode(Address key);
@@ -350,6 +351,45 @@ class SerializedData {
DISALLOW_COPY_AND_ASSIGN(SerializedData);
};
+class Checksum {
+ public:
+ explicit Checksum(Vector<const byte> payload) {
+#ifdef MEMORY_SANITIZER
+ // Computing the checksum includes padding bytes for objects like strings.
+ // Mark every object as initialized in the code serializer.
+ MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
+#endif // MEMORY_SANITIZER
+ // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
+ uintptr_t a = 1;
+ uintptr_t b = 0;
+ const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
+ DCHECK(IsAligned(payload.length(), kIntptrSize));
+ const uintptr_t* end = cur + payload.length() / kIntptrSize;
+ while (cur < end) {
+ // Unsigned overflow expected and intended.
+ a += *cur++;
+ b += a;
+ }
+#if V8_HOST_ARCH_64_BIT
+ a ^= a >> 32;
+ b ^= b >> 32;
+#endif // V8_HOST_ARCH_64_BIT
+ a_ = static_cast<uint32_t>(a);
+ b_ = static_cast<uint32_t>(b);
+ }
+
+ bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
+
+ uint32_t a() const { return a_; }
+ uint32_t b() const { return b_; }
+
+ private:
+ uint32_t a_;
+ uint32_t b_;
+
+ DISALLOW_COPY_AND_ASSIGN(Checksum);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 56d87b8916..a8b911a191 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -111,8 +111,7 @@ template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
- // Builtins and bytecode handlers are serialized in a separate pass by the
- // BuiltinSerializer.
+ // Builtins are serialized in a separate pass by the BuiltinSerializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
for (Object** current = start; current < end; current++) {
@@ -233,16 +232,15 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
template <class AllocatorT>
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
if (!obj->IsCode()) return false;
- Code* code = Code::cast(obj);
- if (isolate()->heap()->IsDeserializeLazyHandler(code)) return false;
- return (code->kind() == Code::BYTECODE_HANDLER);
+ return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutRoot(
- int root_index, HeapObject* object,
+ RootIndex root, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
SerializerDeserializer::WhereToPoint where_to_point, int skip) {
+ int root_index = static_cast<int>(root);
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
object->ShortPrint();
@@ -251,7 +249,7 @@ void Serializer<AllocatorT>::PutRoot(
// Assert that the first 32 root array items are a conscious choice. They are
// chosen so that the most common ones can be encoded more efficiently.
- STATIC_ASSERT(Heap::kArgumentsMarkerRootIndex ==
+ STATIC_ASSERT(static_cast<int>(RootIndex::kArgumentsMarker) ==
kNumberOfRootArrayConstants - 1);
if (how_to_code == kPlain && where_to_point == kStartOfObject &&
@@ -330,14 +328,14 @@ void Serializer<AllocatorT>::PutNextChunk(int space) {
}
template <class AllocatorT>
-void Serializer<AllocatorT>::Pad() {
+void Serializer<AllocatorT>::Pad(int padding_offset) {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
sink_.Put(kNop, "Padding");
}
// Pad up to pointer size for checksum.
- while (!IsAligned(sink_.Position(), kPointerAlignment)) {
+ while (!IsAligned(sink_.Position() + padding_offset, kPointerAlignment)) {
sink_.Put(kNop, "Padding");
}
}
@@ -436,10 +434,10 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
if (!typed_array->is_on_heap()) {
// Explicitly serialize the backing store now.
JSArrayBuffer* buffer = JSArrayBuffer::cast(typed_array->buffer());
- CHECK(buffer->byte_length()->IsSmi());
- CHECK(typed_array->byte_offset()->IsSmi());
- int32_t byte_length = NumberToInt32(buffer->byte_length());
- int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
+ CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
+ CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
+ int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
+ int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
// We need to calculate the backing store from the external pointer
// because the ArrayBuffer may already have been serialized.
@@ -469,9 +467,8 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
JSArrayBuffer* buffer = JSArrayBuffer::cast(object_);
void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than Smi range in the snapshot.
- // Attempt to make sure that NumberToInt32 produces something sensible.
- CHECK(buffer->byte_length()->IsSmi());
- int32_t byte_length = NumberToInt32(buffer->byte_length());
+ CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
+ int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
// The embedder-allocated backing store only exists for the off-heap case.
if (backing_store != nullptr) {
@@ -505,7 +502,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
}
} else {
ExternalOneByteString* string = ExternalOneByteString::cast(object_);
- DCHECK(string->is_short());
+ DCHECK(string->is_uncached());
const NativesExternalStringResource* resource =
reinterpret_cast<const NativesExternalStringResource*>(
string->resource());
@@ -581,7 +578,8 @@ class UnlinkWeakNextScope {
public:
explicit UnlinkWeakNextScope(Heap* heap, HeapObject* object)
: object_(nullptr) {
- if (object->IsAllocationSite()) {
+ if (object->IsAllocationSite() &&
+ AllocationSite::cast(object)->HasWeakNext()) {
object_ = object;
next_ = AllocationSite::cast(object)->weak_next();
AllocationSite::cast(object)->set_weak_next(
@@ -729,8 +727,7 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
HeapObject* host, MaybeObject** start, MaybeObject** end) {
MaybeObject** current = start;
while (current < end) {
- while (current < end &&
- ((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject())) {
+ while (current < end && ((*current)->IsSmi() || (*current)->IsCleared())) {
current++;
}
if (current < end) {
@@ -738,12 +735,14 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
}
HeapObject* current_contents;
HeapObjectReferenceType reference_type;
- while (current < end && (*current)->ToStrongOrWeakHeapObject(
- &current_contents, &reference_type)) {
- int root_index = serializer_->root_index_map()->Lookup(current_contents);
+ while (current < end &&
+ (*current)->GetHeapObject(&current_contents, &reference_type)) {
+ RootIndex root_index;
// Repeats are not subject to the write barrier so we can only use
// immortal immovable root members. They are never in new space.
- if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
+ if (current != start &&
+ serializer_->root_index_map()->Lookup(current_contents,
+ &root_index) &&
Heap::RootIsImmortalImmovable(root_index) &&
*current == current[-1]) {
DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 9427cb6c78..5a08e4299e 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -172,8 +172,8 @@ class Serializer : public SerializerDeserializer {
Object** end) override;
void SerializeRootObject(Object* object);
- void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
- int skip);
+ void PutRoot(RootIndex root_index, HeapObject* object, HowToCode how,
+ WhereToPoint where, int skip);
void PutSmi(Smi* smi);
void PutBackReference(HeapObject* object, SerializerReference reference);
void PutAttachedReference(SerializerReference reference,
@@ -210,7 +210,8 @@ class Serializer : public SerializerDeserializer {
}
// GetInt reads 4 bytes at once, requiring padding at the end.
- void Pad();
+ // Use padding_offset to specify the space you want to use after padding.
+ void Pad(int padding_offset = 0);
// We may not need the code address map for logging for every instance
// of the serializer. Initialize it on demand.
@@ -284,6 +285,7 @@ class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
serializer_->PushStack(obj);
#endif // DEBUG
}
+ // NOLINTNEXTLINE (modernize-use-equals-default)
~ObjectSerializer() override {
#ifdef DEBUG
serializer_->PopStack();
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 31f378792b..95baef0cc0 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -44,6 +44,7 @@ bool Snapshot::Initialize(Isolate* isolate) {
const v8::StartupData* blob = isolate->snapshot_blob();
CheckVersion(blob);
+ CHECK(VerifyChecksum(blob));
Vector<const byte> startup_data = ExtractStartupData(blob);
SnapshotData startup_snapshot_data(startup_data);
Vector<const byte> builtin_data = ExtractBuiltinData(blob);
@@ -136,13 +137,17 @@ void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
DCHECK_NE(Builtins::kDeserializeLazy, i);
Code* code = builtins->builtin(i);
- if (code->builtin_index() == Builtins::kDeserializeLazy) {
+ if (code->builtin_index() == Builtins::LazyDeserializerForBuiltin(i)) {
code = Snapshot::DeserializeBuiltin(isolate, i);
}
DCHECK_EQ(i, code->builtin_index());
DCHECK_EQ(code, builtins->builtin(i));
}
+
+ // Re-initialize the dispatch table now that any bytecodes have been
+ // deserialized.
+ isolate->interpreter()->InitializeDispatchTable();
}
// static
@@ -168,42 +173,6 @@ Code* Snapshot::EnsureBuiltinIsDeserialized(Isolate* isolate,
return code;
}
-// static
-Code* Snapshot::DeserializeHandler(Isolate* isolate,
- interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale) {
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing handler %s\n",
- interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str());
- }
-
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization) timer.Start();
-
- const v8::StartupData* blob = isolate->snapshot_blob();
- Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
- BuiltinSnapshotData builtin_snapshot_data(builtin_data);
-
- CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
- BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
- Code* code = builtin_deserializer.DeserializeHandler(bytecode, operand_scale);
-
- if (FLAG_profile_deserialization) {
- double ms = timer.Elapsed().InMillisecondsF();
- int bytes = code->Size();
- PrintF("[Deserializing handler %s (%d bytes) took %0.3f ms]\n",
- interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str(),
- bytes, ms);
- }
-
- if (isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling()) {
- isolate->logger()->LogBytecodeHandler(bytecode, operand_scale, code);
- }
-
- return code;
-}
-
void ProfileDeserialization(
const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
@@ -234,15 +203,22 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
uint32_t num_contexts = static_cast<uint32_t>(context_snapshots.size());
uint32_t startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
uint32_t total_length = startup_snapshot_offset;
+ DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(startup_snapshot->RawData().length());
+ DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(builtin_snapshot->RawData().length());
+ DCHECK(IsAligned(total_length, kPointerAlignment));
for (const auto context_snapshot : context_snapshots) {
total_length += static_cast<uint32_t>(context_snapshot->RawData().length());
+ DCHECK(IsAligned(total_length, kPointerAlignment));
}
ProfileDeserialization(startup_snapshot, builtin_snapshot, context_snapshots);
char* data = new char[total_length];
+ // Zero out pre-payload data. Part of that is only used for padding.
+ memset(data, 0, StartupSnapshotOffset(num_contexts));
+
SetHeaderValue(data, kNumberOfContextsOffset, num_contexts);
SetHeaderValue(data, kRehashabilityOffset, can_be_rehashed ? 1 : 0);
@@ -292,8 +268,13 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
payload_offset += payload_length;
}
- v8::StartupData result = {data, static_cast<int>(total_length)};
DCHECK_EQ(total_length, payload_offset);
+ v8::StartupData result = {data, static_cast<int>(total_length)};
+
+ Checksum checksum(ChecksummedContent(&result));
+ SetHeaderValue(data, kChecksumPartAOffset, checksum.a());
+ SetHeaderValue(data, kChecksumPartBOffset, checksum.b());
+
return result;
}
@@ -308,9 +289,11 @@ bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
case Builtins::TFS:
break;
- // Bytecode handlers will only ever be used by the interpreter and so there
- // will never be a need to use trampolines with them.
+ // Bytecode handlers (and their lazy deserializers) will only ever be used
+ // by the interpreter and so there will never be a need to use trampolines
+ // with them.
case Builtins::BCH:
+ case Builtins::DLH:
case Builtins::API:
case Builtins::ASM:
// TODO(jgruber): Extend checks to remaining kinds.
@@ -511,6 +494,19 @@ uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
return num_contexts;
}
+bool Snapshot::VerifyChecksum(const v8::StartupData* data) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+ uint32_t expected_a = GetHeaderValue(data, kChecksumPartAOffset);
+ uint32_t expected_b = GetHeaderValue(data, kChecksumPartBOffset);
+ Checksum checksum(ChecksummedContent(data));
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Verifying snapshot checksum took %0.3f ms]\n", ms);
+ }
+ return checksum.Check(expected_a, expected_b);
+}
+
void EmbeddedData::PrintStatistics() const {
DCHECK(FLAG_serialization_statistics);
@@ -644,12 +640,18 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
// Calculate sizes.
uint32_t reservation_size =
static_cast<uint32_t>(reservations.size()) * kUInt32Size;
+ uint32_t payload_offset = kHeaderSize + reservation_size;
+ uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
uint32_t size =
- kHeaderSize + reservation_size + static_cast<uint32_t>(payload->size());
+ padded_payload_offset + static_cast<uint32_t>(payload->size());
+ DCHECK(IsAligned(size, kPointerAlignment));
// Allocate backing store and create result data.
AllocateData(size);
+ // Zero out pre-payload data. Part of that is only used for padding.
+ memset(data_, 0, padded_payload_offset);
+
// Set header values.
SetMagicNumber(serializer->isolate());
SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
@@ -660,7 +662,7 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
reservation_size);
// Copy serialized data.
- CopyBytes(data_ + kHeaderSize + reservation_size, payload->data(),
+ CopyBytes(data_ + padded_payload_offset, payload->data(),
static_cast<size_t>(payload->size()));
}
@@ -679,7 +681,9 @@ std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
Vector<const byte> SnapshotData::Payload() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- const byte* payload = data_ + kHeaderSize + reservations_size;
+ uint32_t padded_payload_offset =
+ POINTER_SIZE_ALIGN(kHeaderSize + reservations_size);
+ const byte* payload = data_ + padded_payload_offset;
uint32_t length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
return Vector<const byte>(payload, length);
@@ -689,30 +693,22 @@ BuiltinSnapshotData::BuiltinSnapshotData(const BuiltinSerializer* serializer)
: SnapshotData(serializer) {}
Vector<const byte> BuiltinSnapshotData::Payload() const {
- uint32_t reservations_size =
- GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- const byte* payload = data_ + kHeaderSize + reservations_size;
- const int builtin_offsets_size =
- BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
- uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
- DCHECK_EQ(data_ + size_, payload + payload_length);
- DCHECK_GT(payload_length, builtin_offsets_size);
- return Vector<const byte>(payload, payload_length - builtin_offsets_size);
+ Vector<const byte> payload = SnapshotData::Payload();
+ const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ DCHECK_EQ(data_ + size_, payload.start() + payload.size());
+ DCHECK_GT(payload.size(), builtin_offsets_size);
+ return Vector<const byte>(payload.start(),
+ payload.size() - builtin_offsets_size);
}
Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
- uint32_t reservations_size =
- GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- const byte* payload = data_ + kHeaderSize + reservations_size;
- const int builtin_offsets_size =
- BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
- uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
- DCHECK_EQ(data_ + size_, payload + payload_length);
- DCHECK_GT(payload_length, builtin_offsets_size);
+ Vector<const byte> payload = SnapshotData::Payload();
+ const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ DCHECK_EQ(data_ + size_, payload.start() + payload.size());
+ DCHECK_GT(payload.size(), builtin_offsets_size);
const uint32_t* data = reinterpret_cast<const uint32_t*>(
- payload + payload_length - builtin_offsets_size);
- return Vector<const uint32_t>(data,
- BuiltinSnapshotUtils::kNumberOfCodeObjects);
+ payload.start() + payload.size() - builtin_offsets_size);
+ return Vector<const uint32_t>(data, Builtins::builtin_count);
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 584f86a760..8cf86526a3 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -27,7 +27,7 @@ class SnapshotByteSource final {
explicit SnapshotByteSource(Vector<const byte> payload)
: data_(payload.start()), length_(payload.length()), position_(0) {}
- ~SnapshotByteSource() {}
+ ~SnapshotByteSource() = default;
bool HasMore() { return position_ < length_; }
@@ -82,10 +82,10 @@ class SnapshotByteSource final {
*/
class SnapshotByteSink {
public:
- SnapshotByteSink() {}
+ SnapshotByteSink() = default;
explicit SnapshotByteSink(int initial_size) : data_(initial_size) {}
- ~SnapshotByteSink() {}
+ ~SnapshotByteSink() = default;
void Put(byte b, const char* description) { data_.push_back(b); }
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index b973ebb356..9edc12c1ce 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -175,12 +175,6 @@ class Snapshot : public AllStatic {
static Code* EnsureBuiltinIsDeserialized(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
- // Deserializes a single given handler code object. Intended to be called at
- // runtime after the isolate has been fully initialized.
- static Code* DeserializeHandler(Isolate* isolate,
- interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale);
-
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
@@ -189,6 +183,8 @@ class Snapshot : public AllStatic {
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
+ static bool VerifyChecksum(const v8::StartupData* data);
+
// ---------------- Serialization ----------------
static v8::StartupData CreateSnapshotBlob(
@@ -224,10 +220,12 @@ class Snapshot : public AllStatic {
// Snapshot blob layout:
// [0] number of contexts N
// [1] rehashability
- // [2] (128 bytes) version string
- // [3] offset to builtins
- // [4] offset to context 0
- // [5] offset to context 1
+ // [2] checksum part A
+ // [3] checksum part B
+ // [4] (128 bytes) version string
+ // [5] offset to builtins
+ // [6] offset to context 0
+ // [7] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
@@ -239,16 +237,28 @@ class Snapshot : public AllStatic {
// TODO(yangguo): generalize rehashing, and remove this flag.
static const uint32_t kRehashabilityOffset =
kNumberOfContextsOffset + kUInt32Size;
- static const uint32_t kVersionStringOffset =
+ static const uint32_t kChecksumPartAOffset =
kRehashabilityOffset + kUInt32Size;
+ static const uint32_t kChecksumPartBOffset =
+ kChecksumPartAOffset + kUInt32Size;
+ static const uint32_t kVersionStringOffset =
+ kChecksumPartBOffset + kUInt32Size;
static const uint32_t kVersionStringLength = 64;
static const uint32_t kBuiltinOffsetOffset =
kVersionStringOffset + kVersionStringLength;
static const uint32_t kFirstContextOffsetOffset =
kBuiltinOffsetOffset + kUInt32Size;
+ static Vector<const byte> ChecksummedContent(const v8::StartupData* data) {
+ const uint32_t kChecksumStart = kVersionStringOffset;
+ return Vector<const byte>(
+ reinterpret_cast<const byte*>(data->data + kChecksumStart),
+ data->raw_size - kChecksumStart);
+ }
+
static uint32_t StartupSnapshotOffset(int num_contexts) {
- return kFirstContextOffsetOffset + num_contexts * kInt32Size;
+ return POINTER_SIZE_ALIGN(kFirstContextOffsetOffset +
+ num_contexts * kInt32Size);
}
static uint32_t ContextSnapshotOffsetOffset(int index) {
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 8fbb073703..e9c23bb907 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -37,7 +37,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate->heap()->IterateStrongRoots(this,
+ VISIT_ONLY_STRONG_FOR_SERIALIZATION);
isolate->heap()->RepairFreeListsAfterDeserialization();
isolate->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
DeserializeDeferredObjects();
@@ -46,7 +47,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// Deserialize eager builtins from the builtin snapshot. Note that deferred
// objects must have been deserialized prior to this.
- builtin_deserializer.DeserializeEagerBuiltinsAndHandlers();
+ builtin_deserializer.DeserializeEagerBuiltins();
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
@@ -64,7 +65,6 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate, LogCodeObjects());
- LOG_CODE_EVENT(isolate, LogBytecodeHandlers());
LOG_CODE_EVENT(isolate, LogCompiledFunctions());
isolate->builtins()->MarkInitialized();
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 9ad6cda5d1..146d413de8 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -34,10 +34,10 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- int root_index = root_index_map()->Lookup(obj);
+ RootIndex root_index;
// We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front.
- if (root_index != RootIndexMap::kInvalidRootIndex) {
+ if (root_index_map()->Lookup(obj, &root_index)) {
if (root_has_been_serialized(root_index)) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
@@ -136,7 +136,7 @@ void StartupSerializer::VisitRootPointers(Root root, const char* description,
// referenced using kRootArray bytecodes.
for (Object** current = start; current < end; current++) {
SerializeRootObject(*current);
- int root_index = static_cast<int>(current - start);
+ size_t root_index = static_cast<size_t>(current - start);
root_has_been_serialized_.set(root_index);
}
} else {
@@ -152,9 +152,9 @@ void StartupSerializer::CheckRehashability(HeapObject* obj) {
}
bool StartupSerializer::MustBeDeferred(HeapObject* object) {
- if (root_has_been_serialized_.test(Heap::kFreeSpaceMapRootIndex) &&
- root_has_been_serialized_.test(Heap::kOnePointerFillerMapRootIndex) &&
- root_has_been_serialized_.test(Heap::kTwoPointerFillerMapRootIndex)) {
+ if (root_has_been_serialized(RootIndex::kFreeSpaceMap) &&
+ root_has_been_serialized(RootIndex::kOnePointerFillerMap) &&
+ root_has_been_serialized(RootIndex::kTwoPointerFillerMap)) {
// All required root objects are serialized, so any aligned objects can
// be saved without problems.
return false;
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index cf334d10b2..0b2065c3d0 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -28,8 +28,8 @@ class StartupSerializer : public Serializer<> {
int PartialSnapshotCacheIndex(HeapObject* o);
bool can_be_rehashed() const { return can_be_rehashed_; }
- bool root_has_been_serialized(int root_index) const {
- return root_has_been_serialized_.test(root_index);
+ bool root_has_been_serialized(RootIndex root_index) const {
+ return root_has_been_serialized_.test(static_cast<size_t>(root_index));
}
private:
@@ -69,7 +69,7 @@ class StartupSerializer : public Serializer<> {
void CheckRehashability(HeapObject* obj);
- std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
+ std::bitset<RootsTable::kEntriesCount> root_has_been_serialized_;
PartialCacheIndexMap partial_cache_index_map_;
std::vector<AccessorInfo*> accessor_infos_;
std::vector<CallHandlerInfo*> call_handler_infos_;
@@ -83,8 +83,8 @@ class StartupSerializer : public Serializer<> {
class SerializedHandleChecker : public RootVisitor {
public:
SerializedHandleChecker(Isolate* isolate, std::vector<Context*>* contexts);
- virtual void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end);
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
bool CheckGlobalAndEternalHandles();
private:
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/splay-tree.h
index e16575419f..454b409fbb 100644
--- a/deps/v8/src/splay-tree.h
+++ b/deps/v8/src/splay-tree.h
@@ -128,7 +128,7 @@ class SplayTree {
// A locator provides access to a node in the tree without actually
// exposing the node.
- class Locator BASE_EMBEDDED {
+ class Locator {
public:
explicit Locator(Node* node) : node_(node) { }
Locator() : node_(nullptr) {}
@@ -159,8 +159,8 @@ class SplayTree {
// Removes root_ node.
void RemoveRootNode(const Key& key);
- template<class Callback>
- class NodeToPairAdaptor BASE_EMBEDDED {
+ template <class Callback>
+ class NodeToPairAdaptor {
public:
explicit NodeToPairAdaptor(Callback* callback)
: callback_(callback) { }
@@ -174,9 +174,9 @@ class SplayTree {
DISALLOW_COPY_AND_ASSIGN(NodeToPairAdaptor);
};
- class NodeDeleter BASE_EMBEDDED {
+ class NodeDeleter {
public:
- NodeDeleter() { }
+ NodeDeleter() = default;
void Call(Node* node) { AllocationPolicy::Delete(node); }
private:
diff --git a/deps/v8/src/string-constants.cc b/deps/v8/src/string-constants.cc
new file mode 100644
index 0000000000..26a5d2045f
--- /dev/null
+++ b/deps/v8/src/string-constants.cc
@@ -0,0 +1,186 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/string-constants.h"
+
+#include "src/base/functional.h"
+#include "src/dtoa.h"
+#include "src/objects.h"
+#include "src/objects/string-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<String> StringConstantBase::AllocateStringConstant(
+ Isolate* isolate) const {
+ if (!flattened_.is_null()) {
+ return flattened_;
+ }
+
+ Handle<String> result;
+ switch (kind()) {
+ case StringConstantKind::kStringLiteral: {
+ result = static_cast<const StringLiteral*>(this)->str();
+ break;
+ }
+ case StringConstantKind::kNumberToStringConstant: {
+ auto num_constant = static_cast<const NumberToStringConstant*>(this);
+ Handle<Object> num_obj =
+ isolate->factory()->NewNumber(num_constant->num());
+ result = isolate->factory()->NumberToString(num_obj);
+ break;
+ }
+ case StringConstantKind::kStringCons: {
+ Handle<String> lhs =
+ static_cast<const StringCons*>(this)->lhs()->AllocateStringConstant(
+ isolate);
+ Handle<String> rhs =
+ static_cast<const StringCons*>(this)->rhs()->AllocateStringConstant(
+ isolate);
+ result = isolate->factory()->NewConsString(lhs, rhs).ToHandleChecked();
+ break;
+ }
+ }
+
+ // TODO(mslekova): Normally we'd want to flatten the string here
+ // but that results in OOM for too long strings.
+ Memoize(result);
+ return flattened_;
+}
+
+bool StringConstantBase::operator==(const StringConstantBase& other) const {
+ if (kind() != other.kind()) return false;
+
+ switch (kind()) {
+ case StringConstantKind::kStringLiteral: {
+ return static_cast<const StringLiteral*>(this) ==
+ static_cast<const StringLiteral*>(&other);
+ }
+ case StringConstantKind::kNumberToStringConstant: {
+ return static_cast<const NumberToStringConstant*>(this) ==
+ static_cast<const NumberToStringConstant*>(&other);
+ }
+ case StringConstantKind::kStringCons: {
+ return static_cast<const StringCons*>(this) ==
+ static_cast<const StringCons*>(&other);
+ }
+ }
+ UNREACHABLE();
+}
+
+size_t hash_value(StringConstantBase const& base) {
+ switch (base.kind()) {
+ case StringConstantKind::kStringLiteral: {
+ return hash_value(*static_cast<const StringLiteral*>(&base));
+ }
+ case StringConstantKind::kNumberToStringConstant: {
+ return hash_value(*static_cast<const NumberToStringConstant*>(&base));
+ }
+ case StringConstantKind::kStringCons: {
+ return hash_value(*static_cast<const StringCons*>(&base));
+ }
+ }
+ UNREACHABLE();
+}
+
+bool operator==(StringLiteral const& lhs, StringLiteral const& rhs) {
+ return lhs.str().address() == rhs.str().address();
+}
+
+bool operator!=(StringLiteral const& lhs, StringLiteral const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(StringLiteral const& p) {
+ return base::hash_combine(p.str().address());
+}
+
+std::ostream& operator<<(std::ostream& os, StringLiteral const& p) {
+ return os << Brief(*p.str());
+}
+
+bool operator==(NumberToStringConstant const& lhs,
+ NumberToStringConstant const& rhs) {
+ return lhs.num() == rhs.num();
+}
+
+bool operator!=(NumberToStringConstant const& lhs,
+ NumberToStringConstant const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(NumberToStringConstant const& p) {
+ return base::hash_combine(p.num());
+}
+
+std::ostream& operator<<(std::ostream& os, NumberToStringConstant const& p) {
+ return os << p.num();
+}
+
+bool operator==(StringCons const& lhs, StringCons const& rhs) {
+ // TODO(mslekova): Think if we can express this in a more readable manner
+ return *(lhs.lhs()) == *(rhs.lhs()) && *(lhs.rhs()) == *(rhs.rhs());
+}
+
+bool operator!=(StringCons const& lhs, StringCons const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(StringCons const& p) {
+ return base::hash_combine(*(p.lhs()), *(p.rhs()));
+}
+
+std::ostream& operator<<(std::ostream& os, const StringConstantBase* base) {
+ os << "DelayedStringConstant: ";
+ switch (base->kind()) {
+ case StringConstantKind::kStringLiteral: {
+ os << *static_cast<const StringLiteral*>(base);
+ break;
+ }
+ case StringConstantKind::kNumberToStringConstant: {
+ os << *static_cast<const NumberToStringConstant*>(base);
+ break;
+ }
+ case StringConstantKind::kStringCons: {
+ os << *static_cast<const StringCons*>(base);
+ break;
+ }
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, StringCons const& p) {
+ return os << p.lhs() << ", " << p.rhs();
+}
+
+size_t StringConstantBase::GetMaxStringConstantLength() const {
+ switch (kind()) {
+ case StringConstantKind::kStringLiteral: {
+ return static_cast<const StringLiteral*>(this)
+ ->GetMaxStringConstantLength();
+ }
+ case StringConstantKind::kNumberToStringConstant: {
+ return static_cast<const NumberToStringConstant*>(this)
+ ->GetMaxStringConstantLength();
+ }
+ case StringConstantKind::kStringCons: {
+ return static_cast<const StringCons*>(this)->GetMaxStringConstantLength();
+ }
+ }
+ UNREACHABLE();
+}
+
+size_t StringLiteral::GetMaxStringConstantLength() const { return length_; }
+
+size_t NumberToStringConstant::GetMaxStringConstantLength() const {
+ return kBase10MaximalLength + 1;
+}
+
+size_t StringCons::GetMaxStringConstantLength() const {
+ return lhs()->GetMaxStringConstantLength() +
+ rhs()->GetMaxStringConstantLength();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/string-constants.h b/deps/v8/src/string-constants.h
new file mode 100644
index 0000000000..b7134849db
--- /dev/null
+++ b/deps/v8/src/string-constants.h
@@ -0,0 +1,115 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRING_CONSTANTS_H_
+#define V8_STRING_CONSTANTS_H_
+
+#include "src/objects/string.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+enum class StringConstantKind {
+ kStringLiteral,
+ kNumberToStringConstant,
+ kStringCons
+};
+
+class StringConstantBase : public ZoneObject {
+ public:
+ explicit StringConstantBase(StringConstantKind kind) : kind_(kind) {}
+
+ StringConstantKind kind() const { return kind_; }
+ Handle<String> AllocateStringConstant(Isolate* isolate) const;
+
+ size_t GetMaxStringConstantLength() const;
+
+ bool operator==(const StringConstantBase& other) const;
+
+ private:
+ void Memoize(Handle<String> flattened) const { flattened_ = flattened; }
+
+ StringConstantKind kind_;
+ mutable Handle<String> flattened_ = Handle<String>::null();
+};
+
+size_t hash_value(StringConstantBase const& base);
+
+class StringLiteral final : public StringConstantBase {
+ public:
+ explicit StringLiteral(Handle<String> str, size_t length)
+ : StringConstantBase(StringConstantKind::kStringLiteral),
+ str_(str),
+ length_(length) {}
+
+ Handle<String> str() const { return str_; }
+
+ size_t GetMaxStringConstantLength() const;
+
+ private:
+ Handle<String> str_;
+ size_t length_; // We store this separately to avoid accessing the heap.
+};
+
+bool operator==(StringLiteral const& lhs, StringLiteral const& rhs);
+bool operator!=(StringLiteral const& lhs, StringLiteral const& rhs);
+
+size_t hash_value(StringLiteral const& parameters);
+
+std::ostream& operator<<(std::ostream& os, StringLiteral const& parameters);
+
+class NumberToStringConstant final : public StringConstantBase {
+ public:
+ explicit NumberToStringConstant(double num)
+ : StringConstantBase(StringConstantKind::kNumberToStringConstant),
+ num_(num) {}
+
+ double num() const { return num_; }
+
+ size_t GetMaxStringConstantLength() const;
+
+ private:
+ double num_;
+};
+
+bool operator==(NumberToStringConstant const& lhs,
+ NumberToStringConstant const& rhs);
+bool operator!=(NumberToStringConstant const& lhs,
+ NumberToStringConstant const& rhs);
+
+size_t hash_value(NumberToStringConstant const& parameters);
+
+std::ostream& operator<<(std::ostream& os,
+ NumberToStringConstant const& parameters);
+
+class StringCons final : public StringConstantBase {
+ public:
+ explicit StringCons(const StringConstantBase* lhs,
+ const StringConstantBase* rhs)
+ : StringConstantBase(StringConstantKind::kStringCons),
+ lhs_(lhs),
+ rhs_(rhs) {}
+
+ const StringConstantBase* lhs() const { return lhs_; }
+ const StringConstantBase* rhs() const { return rhs_; }
+
+ size_t GetMaxStringConstantLength() const;
+
+ private:
+ const StringConstantBase* lhs_;
+ const StringConstantBase* rhs_;
+};
+
+bool operator==(StringCons const& lhs, StringCons const& rhs);
+bool operator!=(StringCons const& lhs, StringCons const& rhs);
+
+size_t hash_value(StringCons const& parameters);
+
+std::ostream& operator<<(std::ostream& os, StringCons const& parameters);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_STRING_CONSTANTS_H_
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index aa4edffab4..6c4da47508 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -17,7 +17,7 @@ class ByteArray;
class StringAllocator {
public:
- virtual ~StringAllocator() { }
+ virtual ~StringAllocator() = default;
// Allocate a number of bytes.
virtual char* allocate(unsigned bytes) = 0;
// Allocate a larger number of bytes and copy the old buffer to the new one.
@@ -31,7 +31,7 @@ class StringAllocator {
// Normal allocator uses new[] and delete[].
class HeapStringAllocator final : public StringAllocator {
public:
- ~HeapStringAllocator() { DeleteArray(space_); }
+ ~HeapStringAllocator() override { DeleteArray(space_); }
char* allocate(unsigned bytes) override;
char* grow(unsigned* bytes) override;
@@ -44,7 +44,8 @@ class FixedStringAllocator final : public StringAllocator {
public:
FixedStringAllocator(char* buffer, unsigned length)
: buffer_(buffer), length_(length) {}
- ~FixedStringAllocator() override{};
+ ~FixedStringAllocator() override = default;
+
char* allocate(unsigned bytes) override;
char* grow(unsigned* bytes) override;
diff --git a/deps/v8/src/torque-assembler.h b/deps/v8/src/torque-assembler.h
new file mode 100644
index 0000000000..3d7cf361c4
--- /dev/null
+++ b/deps/v8/src/torque-assembler.h
@@ -0,0 +1,58 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_ASSEMBLER_H_
+#define V8_TORQUE_ASSEMBLER_H_
+
+#include <deque>
+#include <vector>
+
+#include "src/code-stub-assembler.h"
+
+#include "src/base/optional.h"
+
+namespace v8 {
+namespace internal {
+
+class TorqueAssembler : public CodeStubAssembler {
+ public:
+ using CodeStubAssembler::CodeStubAssembler;
+
+ protected:
+ template <class... Ts>
+ using PLabel = compiler::CodeAssemblerParameterizedLabel<Ts...>;
+
+ template <class T>
+ TNode<T> Uninitialized() {
+ return {};
+ }
+
+ template <class... T, class... Args>
+ void Goto(PLabel<T...>* label, Args... args) {
+ label->AddInputs(args...);
+ CodeStubAssembler::Goto(label->plain_label());
+ }
+ using CodeStubAssembler::Goto;
+ template <class... T>
+ void Bind(PLabel<T...>* label, TNode<T>*... phis) {
+ Bind(label->plain_label());
+ label->CreatePhis(phis...);
+ }
+ void Bind(Label* label) { CodeAssembler::Bind(label); }
+ using CodeStubAssembler::Bind;
+ template <class... T, class... Args>
+ void Branch(TNode<BoolT> condition, PLabel<T...>* if_true,
+ PLabel<T...>* if_false, Args... args) {
+ if_true->AddInputs(args...);
+ if_false->AddInputs(args...);
+ CodeStubAssembler::Branch(condition, if_true->plain_label(),
+ if_false->plain_label());
+ }
+ using CodeStubAssembler::Branch;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_ASSEMBLER_H_
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index d9bb71a663..6bd4d79096 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -30,7 +30,9 @@ namespace torque {
V(ElementAccessExpression) \
V(AssignmentExpression) \
V(IncrementDecrementExpression) \
- V(AssumeTypeImpossibleExpression)
+ V(AssumeTypeImpossibleExpression) \
+ V(StatementExpression) \
+ V(TryLabelExpression)
#define AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
V(BasicTypeExpression) \
@@ -52,7 +54,6 @@ namespace torque {
V(TailCallStatement) \
V(VarDeclarationStatement) \
V(GotoStatement) \
- V(TryLabelStatement)
#define AST_DECLARATION_NODE_KIND_LIST(V) \
V(TypeDeclaration) \
@@ -90,7 +91,7 @@ struct AstNode {
};
AstNode(Kind kind, SourcePosition pos) : kind(kind), pos(pos) {}
- virtual ~AstNode() {}
+ virtual ~AstNode() = default;
const Kind kind;
SourcePosition pos;
@@ -171,7 +172,7 @@ struct DefaultModuleDeclaration : ModuleDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(DefaultModuleDeclaration)
DefaultModuleDeclaration(SourcePosition pos,
std::vector<Declaration*> declarations)
- : ModuleDeclaration(kKind, pos, declarations) {}
+ : ModuleDeclaration(kKind, pos, std::move(declarations)) {}
bool IsDefault() const override { return true; }
};
@@ -179,7 +180,8 @@ struct ExplicitModuleDeclaration : ModuleDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExplicitModuleDeclaration)
ExplicitModuleDeclaration(SourcePosition pos, std::string name,
std::vector<Declaration*> declarations)
- : ModuleDeclaration(kKind, pos, declarations), name(std::move(name)) {}
+ : ModuleDeclaration(kKind, pos, std::move(declarations)),
+ name(std::move(name)) {}
bool IsDefault() const override { return false; }
std::string name;
};
@@ -228,7 +230,7 @@ struct CallExpression : Expression {
callee(pos, std::move(callee), std::move(generic_arguments)),
is_operator(is_operator),
arguments(std::move(arguments)),
- labels(labels) {}
+ labels(std::move(labels)) {}
IdentifierExpression callee;
bool is_operator;
std::vector<Expression*> arguments;
@@ -240,7 +242,7 @@ struct StructExpression : Expression {
StructExpression(SourcePosition pos, std::string name,
std::vector<Expression*> expressions)
: Expression(kKind, pos),
- name(name),
+ name(std::move(name)),
expressions(std::move(expressions)) {}
std::string name;
std::vector<Expression*> expressions;
@@ -362,7 +364,9 @@ struct ParameterList {
struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
BasicTypeExpression(SourcePosition pos, bool is_constexpr, std::string name)
- : TypeExpression(kKind, pos), is_constexpr(is_constexpr), name(name) {}
+ : TypeExpression(kKind, pos),
+ is_constexpr(is_constexpr),
+ name(std::move(name)) {}
bool is_constexpr;
std::string name;
};
@@ -373,7 +377,7 @@ struct FunctionTypeExpression : TypeExpression {
std::vector<TypeExpression*> parameters,
TypeExpression* return_type)
: TypeExpression(kKind, pos),
- parameters(parameters),
+ parameters(std::move(parameters)),
return_type(return_type) {}
std::vector<TypeExpression*> parameters;
TypeExpression* return_type;
@@ -550,15 +554,22 @@ struct LabelBlock : AstNode {
Statement* body;
};
-struct TryLabelStatement : Statement {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(TryLabelStatement)
- TryLabelStatement(SourcePosition pos, Statement* try_block,
- std::vector<LabelBlock*> label_blocks)
- : Statement(kKind, pos),
- try_block(try_block),
- label_blocks(std::move(label_blocks)) {}
- Statement* try_block;
- std::vector<LabelBlock*> label_blocks;
+struct StatementExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(StatementExpression)
+ StatementExpression(SourcePosition pos, Statement* statement)
+ : Expression(kKind, pos), statement(statement) {}
+ Statement* statement;
+};
+
+struct TryLabelExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(TryLabelExpression)
+ TryLabelExpression(SourcePosition pos, Expression* try_expression,
+ LabelBlock* label_block)
+ : Expression(kKind, pos),
+ try_expression(try_expression),
+ label_block(label_block) {}
+ Expression* try_expression;
+ LabelBlock* label_block;
};
struct BlockStatement : Statement {
@@ -634,7 +645,8 @@ struct MacroDeclaration : CallableNode {
base::Optional<std::string> op, ParameterList parameters,
TypeExpression* return_type,
const LabelAndTypesVector& labels)
- : CallableNode(kind, pos, name, parameters, return_type, labels),
+ : CallableNode(kind, pos, std::move(name), std::move(parameters),
+ return_type, labels),
op(std::move(op)) {}
base::Optional<std::string> op;
};
@@ -646,8 +658,8 @@ struct ExternalMacroDeclaration : MacroDeclaration {
ParameterList parameters,
TypeExpression* return_type,
const LabelAndTypesVector& labels)
- : MacroDeclaration(kKind, pos, name, op, parameters, return_type,
- labels) {}
+ : MacroDeclaration(kKind, pos, std::move(name), std::move(op),
+ std::move(parameters), return_type, labels) {}
};
struct TorqueMacroDeclaration : MacroDeclaration {
@@ -656,15 +668,16 @@ struct TorqueMacroDeclaration : MacroDeclaration {
base::Optional<std::string> op,
ParameterList parameters, TypeExpression* return_type,
const LabelAndTypesVector& labels)
- : MacroDeclaration(kKind, pos, name, op, parameters, return_type,
- labels) {}
+ : MacroDeclaration(kKind, pos, std::move(name), std::move(op),
+ std::move(parameters), return_type, labels) {}
};
struct BuiltinDeclaration : CallableNode {
BuiltinDeclaration(AstNode::Kind kind, SourcePosition pos,
bool javascript_linkage, std::string name,
ParameterList parameters, TypeExpression* return_type)
- : CallableNode(kind, pos, name, parameters, return_type, {}),
+ : CallableNode(kind, pos, std::move(name), std::move(parameters),
+ return_type, {}),
javascript_linkage(javascript_linkage) {}
bool javascript_linkage;
};
@@ -674,8 +687,8 @@ struct ExternalBuiltinDeclaration : BuiltinDeclaration {
ExternalBuiltinDeclaration(SourcePosition pos, bool javascript_linkage,
std::string name, ParameterList parameters,
TypeExpression* return_type)
- : BuiltinDeclaration(kKind, pos, javascript_linkage, name, parameters,
- return_type) {}
+ : BuiltinDeclaration(kKind, pos, javascript_linkage, std::move(name),
+ std::move(parameters), return_type) {}
};
struct TorqueBuiltinDeclaration : BuiltinDeclaration {
@@ -683,8 +696,8 @@ struct TorqueBuiltinDeclaration : BuiltinDeclaration {
TorqueBuiltinDeclaration(SourcePosition pos, bool javascript_linkage,
std::string name, ParameterList parameters,
TypeExpression* return_type)
- : BuiltinDeclaration(kKind, pos, javascript_linkage, name, parameters,
- return_type) {}
+ : BuiltinDeclaration(kKind, pos, javascript_linkage, std::move(name),
+ std::move(parameters), return_type) {}
};
struct ExternalRuntimeDeclaration : CallableNode {
@@ -741,8 +754,9 @@ struct SpecializationDeclaration : Declaration {
: Declaration(kKind, pos),
name(std::move(name)),
external(false),
- generic_parameters(generic_parameters),
- signature(new CallableNodeSignature{parameters, return_type, labels}),
+ generic_parameters(std::move(generic_parameters)),
+ signature(new CallableNodeSignature{std::move(parameters), return_type,
+ std::move(labels)}),
body(b) {}
std::string name;
bool external;
diff --git a/deps/v8/src/torque/cfg.cc b/deps/v8/src/torque/cfg.cc
new file mode 100644
index 0000000000..1489d9f6af
--- /dev/null
+++ b/deps/v8/src/torque/cfg.cc
@@ -0,0 +1,134 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/cfg.h"
+
+#include "src/torque/type-oracle.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+void Block::SetInputTypes(const Stack<const Type*>& input_types) {
+ if (!input_types_) {
+ input_types_ = input_types;
+ } else if (*input_types_ != input_types) {
+ std::stringstream error;
+ error << "incompatible types at branch:\n";
+ for (intptr_t i = std::max(input_types_->Size(), input_types.Size()) - 1;
+ i >= 0; --i) {
+ base::Optional<const Type*> left;
+ base::Optional<const Type*> right;
+ if (static_cast<size_t>(i) < input_types.Size()) {
+ left = input_types.Peek(BottomOffset{static_cast<size_t>(i)});
+ }
+ if (static_cast<size_t>(i) < input_types_->Size()) {
+ right = input_types_->Peek(BottomOffset{static_cast<size_t>(i)});
+ }
+ if (left && right && *left == *right) {
+ error << **left << "\n";
+ } else {
+ if (left) {
+ error << **left;
+ } else {
+ error << "/*missing*/";
+ }
+ error << " => ";
+ if (right) {
+ error << **right;
+ } else {
+ error << "/*missing*/";
+ }
+ error << "\n";
+ }
+ }
+ ReportError(error.str());
+ }
+}
+
+void CfgAssembler::Bind(Block* block) {
+ DCHECK(current_block_->IsComplete());
+ DCHECK(block->instructions().empty());
+ DCHECK(block->HasInputTypes());
+ current_block_ = block;
+ current_stack_ = block->InputTypes();
+ cfg_.PlaceBlock(block);
+}
+
+void CfgAssembler::Goto(Block* block) {
+ if (block->HasInputTypes()) {
+ DropTo(block->InputTypes().AboveTop());
+ }
+ Emit(GotoInstruction{block});
+}
+
+StackRange CfgAssembler::Goto(Block* block, size_t preserved_slots) {
+ DCHECK(block->HasInputTypes());
+ DCHECK_GE(CurrentStack().Size(), block->InputTypes().Size());
+ Emit(DeleteRangeInstruction{
+ StackRange{block->InputTypes().AboveTop() - preserved_slots,
+ CurrentStack().AboveTop() - preserved_slots}});
+ StackRange preserved_slot_range = TopRange(preserved_slots);
+ Emit(GotoInstruction{block});
+ return preserved_slot_range;
+}
+
+void CfgAssembler::Branch(Block* if_true, Block* if_false) {
+ Emit(BranchInstruction{if_true, if_false});
+}
+
+// Delete the specified range of slots, moving upper slots to fill the gap.
+void CfgAssembler::DeleteRange(StackRange range) {
+ DCHECK_LE(range.end(), current_stack_.AboveTop());
+ if (range.Size() == 0) return;
+ Emit(DeleteRangeInstruction{range});
+}
+
+void CfgAssembler::DropTo(BottomOffset new_level) {
+ DeleteRange(StackRange{new_level, CurrentStack().AboveTop()});
+}
+
+StackRange CfgAssembler::Peek(StackRange range,
+ base::Optional<const Type*> type) {
+ std::vector<const Type*> lowered_types;
+ if (type) {
+ lowered_types = LowerType(*type);
+ DCHECK_EQ(lowered_types.size(), range.Size());
+ }
+ for (size_t i = 0; i < range.Size(); ++i) {
+ Emit(PeekInstruction{
+ range.begin() + i,
+ type ? lowered_types[i] : base::Optional<const Type*>{}});
+ }
+ return TopRange(range.Size());
+}
+
+void CfgAssembler::Poke(StackRange destination, StackRange origin,
+ base::Optional<const Type*> type) {
+ DCHECK_EQ(destination.Size(), origin.Size());
+ DCHECK_LE(destination.end(), origin.begin());
+ DCHECK_EQ(origin.end(), CurrentStack().AboveTop());
+ std::vector<const Type*> lowered_types;
+ if (type) {
+ lowered_types = LowerType(*type);
+ DCHECK_EQ(lowered_types.size(), origin.Size());
+ }
+ for (intptr_t i = origin.Size() - 1; i >= 0; --i) {
+ Emit(PokeInstruction{
+ destination.begin() + i,
+ type ? lowered_types[i] : base::Optional<const Type*>{}});
+ }
+}
+
+void CfgAssembler::Print(std::string s) {
+ Emit(PrintConstantStringInstruction{std::move(s)});
+}
+
+void CfgAssembler::Unreachable() { Emit(DebugBreakInstruction{true}); }
+
+void CfgAssembler::DebugBreak() { Emit(DebugBreakInstruction{false}); }
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/cfg.h b/deps/v8/src/torque/cfg.h
new file mode 100644
index 0000000000..6fca593505
--- /dev/null
+++ b/deps/v8/src/torque/cfg.h
@@ -0,0 +1,149 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_CFG_H_
+#define V8_TORQUE_CFG_H_
+
+#include <list>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+#include "src/torque/ast.h"
+#include "src/torque/instructions.h"
+#include "src/torque/source-positions.h"
+#include "src/torque/types.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class Block {
+ public:
+ explicit Block(size_t id, base::Optional<Stack<const Type*>> input_types,
+ bool is_deferred)
+ : input_types_(std::move(input_types)),
+ id_(id),
+ is_deferred_(is_deferred) {}
+ void Add(Instruction instruction) {
+ DCHECK(!IsComplete());
+ instructions_.push_back(std::move(instruction));
+ }
+
+ bool HasInputTypes() const { return input_types_ != base::nullopt; }
+ const Stack<const Type*>& InputTypes() const { return *input_types_; }
+ void SetInputTypes(const Stack<const Type*>& input_types);
+
+ const std::vector<Instruction>& instructions() const { return instructions_; }
+ bool IsComplete() const {
+ return !instructions_.empty() && instructions_.back()->IsBlockTerminator();
+ }
+ size_t id() const { return id_; }
+ bool IsDeferred() const { return is_deferred_; }
+
+ private:
+ std::vector<Instruction> instructions_;
+ base::Optional<Stack<const Type*>> input_types_;
+ const size_t id_;
+ bool is_deferred_;
+};
+
+class ControlFlowGraph {
+ public:
+ explicit ControlFlowGraph(Stack<const Type*> input_types) {
+ start_ = NewBlock(std::move(input_types), false);
+ PlaceBlock(start_);
+ }
+
+ Block* NewBlock(base::Optional<Stack<const Type*>> input_types,
+ bool is_deferred) {
+ blocks_.emplace_back(next_block_id_++, std::move(input_types), is_deferred);
+ return &blocks_.back();
+ }
+ void PlaceBlock(Block* block) { placed_blocks_.push_back(block); }
+ Block* start() const { return start_; }
+ base::Optional<Block*> end() const { return end_; }
+ void set_end(Block* end) { end_ = end; }
+ void SetReturnType(const Type* t) {
+ if (!return_type_) {
+ return_type_ = t;
+ return;
+ }
+ if (t != *return_type_) {
+ ReportError("expected return type ", **return_type_, " instead of ", *t);
+ }
+ }
+ const std::vector<Block*>& blocks() const { return placed_blocks_; }
+
+ private:
+ std::list<Block> blocks_;
+ Block* start_;
+ std::vector<Block*> placed_blocks_;
+ base::Optional<Block*> end_;
+ base::Optional<const Type*> return_type_;
+ size_t next_block_id_ = 0;
+};
+
+class CfgAssembler {
+ public:
+ explicit CfgAssembler(Stack<const Type*> input_types)
+ : current_stack_(std::move(input_types)), cfg_(current_stack_) {}
+
+ const ControlFlowGraph& Result() {
+ if (!CurrentBlockIsComplete()) {
+ cfg_.set_end(current_block_);
+ }
+ return cfg_;
+ }
+
+ Block* NewBlock(
+ base::Optional<Stack<const Type*>> input_types = base::nullopt,
+ bool is_deferred = false) {
+ return cfg_.NewBlock(std::move(input_types), is_deferred);
+ }
+
+ bool CurrentBlockIsComplete() const { return current_block_->IsComplete(); }
+
+ void Emit(Instruction instruction) {
+ instruction.TypeInstruction(&current_stack_, &cfg_);
+ current_block_->Add(std::move(instruction));
+ }
+
+ const Stack<const Type*>& CurrentStack() const { return current_stack_; }
+
+ StackRange TopRange(size_t slot_count) const {
+ return CurrentStack().TopRange(slot_count);
+ }
+
+ void Bind(Block* block);
+ void Goto(Block* block);
+ // Goto block while keeping {preserved_slots} many slots on the top and
+ // deleting additional the slots below these to match the input type of the
+ // target block.
+ // Returns the StackRange of the preserved slots in the target block.
+ StackRange Goto(Block* block, size_t preserved_slots);
+ // The condition must be of type bool and on the top of stack. It is removed
+ // from the stack before branching.
+ void Branch(Block* if_true, Block* if_false);
+ // Delete the specified range of slots, moving upper slots to fill the gap.
+ void DeleteRange(StackRange range);
+ void DropTo(BottomOffset new_level);
+ StackRange Peek(StackRange range, base::Optional<const Type*> type);
+ void Poke(StackRange destination, StackRange origin,
+ base::Optional<const Type*> type);
+ void Print(std::string s);
+ void Unreachable();
+ void DebugBreak();
+
+ private:
+ Stack<const Type*> current_stack_;
+ ControlFlowGraph cfg_;
+ Block* current_block_ = cfg_.start();
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_CFG_H_
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
new file mode 100644
index 0000000000..902b1b7f4a
--- /dev/null
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -0,0 +1,487 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/csa-generator.h"
+
+#include "src/torque/type-oracle.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+base::Optional<Stack<std::string>> CSAGenerator::EmitGraph(
+ Stack<std::string> parameters) {
+ for (Block* block : cfg_.blocks()) {
+ out_ << " PLabel<";
+ PrintCommaSeparatedList(out_, block->InputTypes(), [](const Type* t) {
+ return t->GetGeneratedTNodeTypeName();
+ });
+ out_ << "> " << BlockName(block) << "(this, compiler::CodeAssemblerLabel::"
+ << (block->IsDeferred() ? "kDeferred" : "kNonDeferred") << ");\n";
+ }
+
+ EmitInstruction(GotoInstruction{cfg_.start()}, &parameters);
+ for (Block* block : cfg_.blocks()) {
+ if (cfg_.end() && *cfg_.end() == block) continue;
+ out_ << "\n if (" << BlockName(block) << ".is_used()) {\n";
+ EmitBlock(block);
+ out_ << " }\n";
+ }
+ if (cfg_.end()) {
+ out_ << "\n";
+ return EmitBlock(*cfg_.end());
+ }
+ return base::nullopt;
+}
+
+Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
+ Stack<std::string> stack;
+ for (const Type* t : block->InputTypes()) {
+ stack.Push(FreshNodeName());
+ out_ << " TNode<" << t->GetGeneratedTNodeTypeName() << "> "
+ << stack.Top() << ";\n";
+ }
+ out_ << " Bind(&" << BlockName(block);
+ for (const std::string& name : stack) {
+ out_ << ", &" << name;
+ }
+ out_ << ");\n";
+ for (const Instruction& instruction : block->instructions()) {
+ EmitInstruction(instruction, &stack);
+ }
+ return stack;
+}
+
+void CSAGenerator::EmitInstruction(const Instruction& instruction,
+ Stack<std::string>* stack) {
+ switch (instruction.kind()) {
+#define ENUM_ITEM(T) \
+ case InstructionKind::k##T: \
+ return EmitInstruction(instruction.Cast<T>(), stack);
+ TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
+#undef ENUM_ITEM
+ }
+}
+
+void CSAGenerator::EmitInstruction(const PeekInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Push(stack->Peek(instruction.slot));
+}
+
+void CSAGenerator::EmitInstruction(const PokeInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Poke(instruction.slot, stack->Top());
+ stack->Pop();
+}
+
+void CSAGenerator::EmitInstruction(const DeleteRangeInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->DeleteRange(instruction.range);
+}
+
+void CSAGenerator::EmitInstruction(
+ const PushUninitializedInstruction& instruction,
+ Stack<std::string>* stack) {
+ // TODO(tebbi): This can trigger an error in CSA if it is used. Instead, we
+ // should prevent usage of uninitialized in the type system. This
+ // requires "if constexpr" being evaluated at Torque time.
+ stack->Push("Uninitialized<" + instruction.type->GetGeneratedTNodeTypeName() +
+ ">()");
+}
+
+void CSAGenerator::EmitInstruction(
+ const PushCodePointerInstruction& instruction, Stack<std::string>* stack) {
+ stack->Push(
+ "UncheckedCast<Code>(HeapConstant(Builtins::CallableFor(isolate(), "
+ "Builtins::k" +
+ instruction.external_name + ").code()))");
+}
+
+void CSAGenerator::EmitInstruction(const ModuleConstantInstruction& instruction,
+ Stack<std::string>* stack) {
+ const Type* type = instruction.constant->type();
+ std::vector<std::string> results;
+ for (const Type* lowered : LowerType(type)) {
+ results.push_back(FreshNodeName());
+ stack->Push(results.back());
+ out_ << " TNode<" << lowered->GetGeneratedTNodeTypeName() << "> "
+ << stack->Top() << ";\n";
+ out_ << " USE(" << stack->Top() << ");\n";
+ }
+ out_ << " ";
+ if (type->IsStructType()) {
+ out_ << "std::tie(";
+ PrintCommaSeparatedList(out_, results);
+ out_ << ") = ";
+ } else if (results.size() == 1) {
+ out_ << results[0] << " = ";
+ }
+ out_ << instruction.constant->constant_name() << "()";
+ if (type->IsStructType()) {
+ out_ << ".Flatten();\n";
+ } else {
+ out_ << ";\n";
+ }
+}
+
+void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::vector<std::string> constexpr_arguments =
+ instruction.constexpr_arguments;
+ std::vector<std::string> args;
+ TypeVector parameter_types =
+ instruction.macro->signature().parameter_types.types;
+ for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
+ const Type* type = *it;
+ VisitResult arg;
+ if (type->IsConstexpr()) {
+ args.push_back(std::move(constexpr_arguments.back()));
+ constexpr_arguments.pop_back();
+ } else {
+ std::stringstream s;
+ size_t slot_count = LoweredSlotCount(type);
+ VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
+ EmitCSAValue(arg, *stack, s);
+ args.push_back(s.str());
+ stack->PopMany(slot_count);
+ }
+ }
+ std::reverse(args.begin(), args.end());
+
+ const Type* return_type = instruction.macro->signature().return_type;
+ std::vector<std::string> results;
+ for (const Type* type : LowerType(return_type)) {
+ results.push_back(FreshNodeName());
+ stack->Push(results.back());
+ out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
+ << stack->Top() << ";\n";
+ out_ << " USE(" << stack->Top() << ");\n";
+ }
+ out_ << " ";
+ if (return_type->IsStructType()) {
+ out_ << "std::tie(";
+ PrintCommaSeparatedList(out_, results);
+ out_ << ") = ";
+ } else {
+ if (results.size() == 1) {
+ out_ << results[0] << " = UncheckedCast<"
+ << return_type->GetGeneratedTNodeTypeName() << ">(";
+ }
+ }
+ out_ << instruction.macro->name() << "(";
+ PrintCommaSeparatedList(out_, args);
+ if (return_type->IsStructType()) {
+ out_ << ").Flatten();\n";
+ } else {
+ if (results.size() == 1) out_ << ")";
+ out_ << ");\n";
+ }
+}
+
+void CSAGenerator::EmitInstruction(
+ const CallCsaMacroAndBranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::vector<std::string> constexpr_arguments =
+ instruction.constexpr_arguments;
+ std::vector<std::string> args;
+ TypeVector parameter_types =
+ instruction.macro->signature().parameter_types.types;
+ for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
+ const Type* type = *it;
+ VisitResult arg;
+ if (type->IsConstexpr()) {
+ args.push_back(std::move(constexpr_arguments.back()));
+ constexpr_arguments.pop_back();
+ } else {
+ std::stringstream s;
+ size_t slot_count = LoweredSlotCount(type);
+ VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
+ EmitCSAValue(arg, *stack, s);
+ args.push_back(s.str());
+ stack->PopMany(slot_count);
+ }
+ }
+ std::reverse(args.begin(), args.end());
+
+ std::vector<std::string> results;
+ const Type* return_type = instruction.macro->signature().return_type;
+ if (return_type != TypeOracle::GetNeverType()) {
+ for (const Type* type :
+ LowerType(instruction.macro->signature().return_type)) {
+ results.push_back(FreshNodeName());
+ out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
+ << results.back() << ";\n";
+ out_ << " USE(" << results.back() << ");\n";
+ }
+ }
+
+ std::vector<std::string> label_names;
+ std::vector<std::vector<std::string>> var_names;
+ const LabelDeclarationVector& labels = instruction.macro->signature().labels;
+ DCHECK_EQ(labels.size(), instruction.label_blocks.size());
+ for (size_t i = 0; i < labels.size(); ++i) {
+ TypeVector label_parameters = labels[i].types;
+ label_names.push_back("label" + std::to_string(i));
+ var_names.push_back({});
+ for (size_t j = 0; j < label_parameters.size(); ++j) {
+ var_names[i].push_back("result_" + std::to_string(i) + "_" +
+ std::to_string(j));
+ out_ << " TVariable<"
+ << label_parameters[j]->GetGeneratedTNodeTypeName() << "> "
+ << var_names[i][j] << "(this);\n";
+ }
+ out_ << " Label " << label_names[i] << "(this);\n";
+ }
+
+ out_ << " ";
+ if (results.size() == 1) {
+ out_ << results[0] << " = ";
+ } else if (results.size() > 1) {
+ out_ << "std::tie(";
+ PrintCommaSeparatedList(out_, results);
+ out_ << ") = ";
+ }
+ out_ << instruction.macro->name() << "(";
+ PrintCommaSeparatedList(out_, args);
+ bool first = args.empty();
+ for (size_t i = 0; i < label_names.size(); ++i) {
+ if (!first) out_ << ", ";
+ out_ << "&" << label_names[i];
+ first = false;
+ for (size_t j = 0; j < var_names[i].size(); ++j) {
+ out_ << ", &" << var_names[i][j];
+ }
+ }
+ out_ << ");\n";
+ if (instruction.return_continuation) {
+ out_ << " Goto(&" << BlockName(*instruction.return_continuation);
+ for (const std::string& value : *stack) {
+ out_ << ", " << value;
+ }
+ for (const std::string& result : results) {
+ out_ << ", " << result;
+ }
+ out_ << ");\n";
+ }
+ for (size_t i = 0; i < label_names.size(); ++i) {
+ out_ << " if (" << label_names[i] << ".is_used()) {\n";
+ out_ << " Bind(&" << label_names[i] << ");\n";
+ out_ << " Goto(&" << BlockName(instruction.label_blocks[i]);
+ for (const std::string& value : *stack) {
+ out_ << ", " << value;
+ }
+ for (const std::string& var : var_names[i]) {
+ out_ << ", " << var << ".value()";
+ }
+ out_ << ");\n";
+
+ out_ << " }\n";
+ }
+}
+
+void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::vector<std::string> arguments = stack->PopMany(instruction.argc);
+ std::vector<const Type*> result_types =
+ LowerType(instruction.builtin->signature().return_type);
+ if (instruction.is_tailcall) {
+ out_ << " TailCallBuiltin(Builtins::k" << instruction.builtin->name()
+ << ", ";
+ PrintCommaSeparatedList(out_, arguments);
+ out_ << ");\n";
+ } else {
+ if (result_types.size() == 1) {
+ std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
+ stack->Push(FreshNodeName());
+ out_ << " TNode<" << generated_type << "> " << stack->Top() << " = ";
+ if (generated_type != "Object") out_ << "CAST(";
+ out_ << "CallBuiltin(Builtins::k" << instruction.builtin->name() << ", ";
+ PrintCommaSeparatedList(out_, arguments);
+ if (generated_type != "Object") out_ << ")";
+ out_ << ");\n";
+ out_ << " USE(" << stack->Top() << ");\n";
+ } else {
+ DCHECK_EQ(0, result_types.size());
+ // TODO(tebbi): Actually, builtins have to return a value, so we should
+ // not have to handle this case.
+ out_ << " CallBuiltin(Builtins::k" << instruction.builtin->name()
+ << ", ";
+ PrintCommaSeparatedList(out_, arguments);
+ out_ << ");\n";
+ }
+ }
+}
+
+void CSAGenerator::EmitInstruction(
+ const CallBuiltinPointerInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::vector<std::string> function_and_arguments =
+ stack->PopMany(1 + instruction.argc);
+ std::vector<const Type*> result_types =
+ LowerType(instruction.example_builtin->signature().return_type);
+ if (result_types.size() != 1) {
+ ReportError("builtins must have exactly one result");
+ }
+ if (instruction.is_tailcall) {
+ out_ << " Tail (Builtins::CallableFor(isolate(), Builtins::k"
+ << instruction.example_builtin->name() << ").descriptor(), ";
+ PrintCommaSeparatedList(out_, function_and_arguments);
+ out_ << ");\n";
+ } else {
+ stack->Push(FreshNodeName());
+ std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
+ out_ << " TNode<" << generated_type << "> " << stack->Top() << " = ";
+ if (generated_type != "Object") out_ << "CAST(";
+ out_ << "CallStub(Builtins::CallableFor(isolate(), Builtins::k"
+ << instruction.example_builtin->name() << ").descriptor(), ";
+ PrintCommaSeparatedList(out_, function_and_arguments);
+ out_ << ")";
+ if (generated_type != "Object") out_ << ")";
+ out_ << "; \n";
+ out_ << " USE(" << stack->Top() << ");\n";
+ }
+}
+
+void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::vector<std::string> arguments = stack->PopMany(instruction.argc);
+ std::vector<const Type*> result_types =
+ LowerType(instruction.runtime_function->signature().return_type);
+ if (result_types.size() > 1) {
+ ReportError("runtime function must have at most one result");
+ }
+ if (instruction.is_tailcall) {
+ out_ << " TailCallRuntime(Runtime::k"
+ << instruction.runtime_function->name() << ", ";
+ PrintCommaSeparatedList(out_, arguments);
+ out_ << ");\n";
+ } else {
+ if (result_types.size() == 1) {
+ stack->Push(FreshNodeName());
+ out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
+ << "> " << stack->Top() << " = CAST(CallRuntime(Runtime::k"
+ << instruction.runtime_function->name() << ", ";
+ PrintCommaSeparatedList(out_, arguments);
+ out_ << "));\n";
+ out_ << " USE(" << stack->Top() << ");\n";
+ } else {
+ DCHECK_EQ(0, result_types.size());
+ // TODO(tebbi): Actually, runtime functions have to return a value, so we
+ // should not have to handle this case.
+ out_ << " CallRuntime(Runtime::k"
+ << instruction.runtime_function->name() << ", ";
+ PrintCommaSeparatedList(out_, arguments);
+ out_ << ");\n";
+ }
+ }
+}
+
+void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ out_ << " Branch(" << stack->Pop() << ", &"
+ << BlockName(instruction.if_true) << ", &"
+ << BlockName(instruction.if_false);
+ for (const std::string& value : *stack) {
+ out_ << ", " << value;
+ }
+ out_ << ");\n";
+}
+
+void CSAGenerator::EmitInstruction(
+ const ConstexprBranchInstruction& instruction, Stack<std::string>* stack) {
+ out_ << " if (" << instruction.condition << ") {\n";
+ out_ << " Goto(&" << BlockName(instruction.if_true);
+ for (const std::string& value : *stack) {
+ out_ << ", " << value;
+ }
+ out_ << ");\n";
+ out_ << " } else {\n";
+ out_ << " Goto(&" << BlockName(instruction.if_false);
+ for (const std::string& value : *stack) {
+ out_ << ", " << value;
+ }
+ out_ << ");\n";
+
+ out_ << " }\n";
+}
+
+void CSAGenerator::EmitInstruction(const GotoInstruction& instruction,
+ Stack<std::string>* stack) {
+ out_ << " Goto(&" << BlockName(instruction.destination);
+ for (const std::string& value : *stack) {
+ out_ << ", " << value;
+ }
+ out_ << ");\n";
+}
+
+void CSAGenerator::EmitInstruction(const GotoExternalInstruction& instruction,
+ Stack<std::string>* stack) {
+ for (auto it = instruction.variable_names.rbegin();
+ it != instruction.variable_names.rend(); ++it) {
+ out_ << " *" << *it << " = " << stack->Pop() << ";\n";
+ }
+ out_ << " Goto(" << instruction.destination << ");\n";
+}
+
+void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
+ Stack<std::string>* stack) {
+ if (*linkage_ == Builtin::kVarArgsJavaScript) {
+ out_ << " " << ARGUMENTS_VARIABLE_STRING << "->PopAndReturn(";
+ } else {
+ out_ << " Return(";
+ }
+ out_ << stack->Pop() << ");\n";
+}
+
+void CSAGenerator::EmitInstruction(
+ const PrintConstantStringInstruction& instruction,
+ Stack<std::string>* stack) {
+ out_ << " Print(" << StringLiteralQuote(instruction.message) << ");\n";
+}
+
+void CSAGenerator::EmitInstruction(const DebugBreakInstruction& instruction,
+ Stack<std::string>* stack) {
+ if (instruction.never_continues) {
+ out_ << " Unreachable();\n";
+ } else {
+ out_ << " DebugBreak();\n";
+ }
+}
+
+void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Poke(stack->AboveTop() - 1,
+ "UncheckedCast<" +
+ instruction.destination_type->GetGeneratedTNodeTypeName() +
+ ">(" + stack->Top() + ")");
+}
+
+// static
+void CSAGenerator::EmitCSAValue(VisitResult result,
+ const Stack<std::string>& values,
+ std::ostream& out) {
+ if (!result.IsOnStack()) {
+ out << result.constexpr_value();
+ } else if (auto* struct_type = StructType::DynamicCast(result.type())) {
+ out << struct_type->name() << "{";
+ bool first = true;
+ for (auto& field : struct_type->fields()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ EmitCSAValue(ProjectStructField(result, field.name), values, out);
+ }
+ out << "}";
+ } else {
+ DCHECK_EQ(1, result.stack_range().Size());
+ out << "TNode<" << result.type()->GetGeneratedTNodeTypeName() << ">{"
+ << values.Peek(result.stack_range().begin()) << "}";
+ }
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/csa-generator.h b/deps/v8/src/torque/csa-generator.h
new file mode 100644
index 0000000000..78fccebd6d
--- /dev/null
+++ b/deps/v8/src/torque/csa-generator.h
@@ -0,0 +1,53 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_CSA_GENERATOR_H_
+#define V8_TORQUE_CSA_GENERATOR_H_
+
+#include <iostream>
+
+#include "src/torque/cfg.h"
+#include "src/torque/declarable.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class CSAGenerator {
+ public:
+ CSAGenerator(const ControlFlowGraph& cfg, std::ostream& out,
+ base::Optional<Builtin::Kind> linkage = base::nullopt)
+ : cfg_(cfg), out_(out), linkage_(linkage) {}
+ base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
+
+ static constexpr const char* ARGUMENTS_VARIABLE_STRING = "arguments";
+
+ static void EmitCSAValue(VisitResult result, const Stack<std::string>& values,
+ std::ostream& out);
+
+ private:
+ const ControlFlowGraph& cfg_;
+ std::ostream& out_;
+ size_t fresh_id_ = 0;
+ base::Optional<Builtin::Kind> linkage_;
+
+ std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
+ std::string BlockName(const Block* block) {
+ return "block" + std::to_string(block->id());
+ }
+
+ Stack<std::string> EmitBlock(const Block* block);
+ void EmitInstruction(const Instruction& instruction,
+ Stack<std::string>* stack);
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ void EmitInstruction(const T& instruction, Stack<std::string>* stack);
+ TORQUE_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#undef EMIT_INSTRUCTION_DECLARATION
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_CSA_GENERATOR_H_
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index bf0b9a9d7a..6768da5474 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -34,18 +34,6 @@ std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b) {
return os;
}
-std::string Variable::RValue() const {
- if (!IsDefined()) {
- ReportError("Reading uninitialized variable.");
- }
- if (type()->IsStructType()) {
- return value();
- }
- std::string result = "(*" + value() + ")";
- if (!IsConst()) result += ".value()";
- return result;
-}
-
void PrintLabel(std::ostream& os, const Label& l, bool with_names) {
os << l.name();
if (l.GetParameterCount() != 0) {
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index 315ff8f636..1d173062bd 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -18,13 +18,14 @@ namespace v8 {
namespace internal {
namespace torque {
+class Block;
+class Generic;
class Scope;
class ScopeChain;
-class Generic;
class Declarable {
public:
- virtual ~Declarable() {}
+ virtual ~Declarable() = default;
enum Kind {
kVariable,
kParameter,
@@ -88,13 +89,17 @@ class Declarable {
class Value : public Declarable {
public:
+ DECLARE_DECLARABLE_BOILERPLATE(Value, value);
const std::string& name() const { return name_; }
virtual bool IsConst() const { return true; }
- virtual std::string value() const = 0;
- virtual std::string RValue() const { return value(); }
- DECLARE_DECLARABLE_BOILERPLATE(Value, value);
+ VisitResult value() const { return *value_; }
const Type* type() const { return type_; }
+ void set_value(VisitResult value) {
+ DCHECK(!value_);
+ value_ = value;
+ }
+
protected:
Value(Kind kind, const Type* type, const std::string& name)
: Declarable(kind), type_(type), name_(name) {}
@@ -102,40 +107,44 @@ class Value : public Declarable {
private:
const Type* type_;
std::string name_;
+ base::Optional<VisitResult> value_;
};
class Parameter : public Value {
public:
DECLARE_DECLARABLE_BOILERPLATE(Parameter, parameter);
- std::string value() const override { return var_name_; }
+
+ const std::string& external_name() const { return external_name_; }
private:
friend class Declarations;
- Parameter(const std::string& name, const Type* type,
- const std::string& var_name)
- : Value(Declarable::kParameter, type, name), var_name_(var_name) {}
+ Parameter(const std::string& name, std::string external_name,
+ const Type* type)
+ : Value(Declarable::kParameter, type, name),
+ external_name_(external_name) {}
- std::string var_name_;
+ std::string external_name_;
};
class ModuleConstant : public Value {
public:
DECLARE_DECLARABLE_BOILERPLATE(ModuleConstant, constant);
- std::string value() const override { UNREACHABLE(); }
- std::string RValue() const override { return name() + "()"; }
+
+ const std::string& constant_name() const { return constant_name_; }
private:
friend class Declarations;
- explicit ModuleConstant(const std::string& name, const Type* type)
- : Value(Declarable::kModuleConstant, type, name) {}
+ explicit ModuleConstant(std::string constant_name, const Type* type)
+ : Value(Declarable::kModuleConstant, type, constant_name),
+ constant_name_(std::move(constant_name)) {}
+
+ std::string constant_name_;
};
class Variable : public Value {
public:
DECLARE_DECLARABLE_BOILERPLATE(Variable, variable);
bool IsConst() const override { return const_; }
- std::string value() const override { return value_; }
- std::string RValue() const override;
void Define() {
if (defined_ && IsConst()) {
ReportError("Cannot re-define a const-bound variable.");
@@ -146,10 +155,8 @@ class Variable : public Value {
private:
friend class Declarations;
- Variable(const std::string& name, const std::string& value, const Type* type,
- bool is_const)
+ Variable(std::string name, const Type* type, bool is_const)
: Value(Declarable::kVariable, type, name),
- value_(value),
defined_(false),
const_(is_const) {
DCHECK_IMPLIES(type->IsConstexpr(), IsConst());
@@ -163,8 +170,20 @@ class Variable : public Value {
class Label : public Declarable {
public:
void AddVariable(Variable* var) { parameters_.push_back(var); }
- std::string name() const { return name_; }
- std::string generated() const { return generated_; }
+ Block* block() const { return *block_; }
+ void set_block(Block* block) {
+ DCHECK(!block_);
+ block_ = block;
+ }
+ const std::string& external_label_name() const {
+ return *external_label_name_;
+ }
+ const std::string& name() const { return name_; }
+ void set_external_label_name(std::string external_label_name) {
+ DCHECK(!block_);
+ DCHECK(!external_label_name_);
+ external_label_name_ = std::move(external_label_name);
+ }
Variable* GetParameter(size_t i) const { return parameters_[i]; }
size_t GetParameterCount() const { return parameters_.size(); }
const std::vector<Variable*>& GetParameters() const { return parameters_; }
@@ -172,34 +191,35 @@ class Label : public Declarable {
DECLARE_DECLARABLE_BOILERPLATE(Label, label);
void MarkUsed() { used_ = true; }
bool IsUsed() const { return used_; }
+ bool IsDeferred() const { return deferred_; }
private:
friend class Declarations;
- explicit Label(const std::string& name)
+ explicit Label(std::string name, bool deferred = false)
: Declarable(Declarable::kLabel),
- name_(name),
- generated_("label_" + name + "_" + std::to_string(next_id_++)),
- used_(false) {}
+ name_(std::move(name)),
+ used_(false),
+ deferred_(deferred) {}
std::string name_;
- std::string generated_;
+ base::Optional<Block*> block_;
+ base::Optional<std::string> external_label_name_;
std::vector<Variable*> parameters_;
static size_t next_id_;
bool used_;
+ bool deferred_;
};
class ExternConstant : public Value {
public:
DECLARE_DECLARABLE_BOILERPLATE(ExternConstant, constant);
- std::string value() const override { return value_; }
private:
friend class Declarations;
- explicit ExternConstant(const std::string& name, const Type* type,
- const std::string& value)
- : Value(Declarable::kExternConstant, type, name), value_(value) {}
-
- std::string value_;
+ explicit ExternConstant(std::string name, const Type* type, std::string value)
+ : Value(Declarable::kExternConstant, type, std::move(name)) {
+ set_value(VisitResult(type, std::move(value)));
+ }
};
class Callable : public Declarable {
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 2f3a1fa869..abc207c049 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -174,22 +174,10 @@ void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl,
CurrentCallableActivator activator(global_context_, macro, decl);
DeclareSignature(signature);
- Variable* return_variable = nullptr;
- if (!signature.return_type->IsVoidOrNever()) {
- return_variable =
- DeclareVariable(kReturnValueVariable, signature.return_type,
- signature.return_type->IsConstexpr());
- }
- PushControlSplit();
if (body != nullptr) {
Visit(body);
}
- auto changed_vars = PopControlSplit();
- if (return_variable) changed_vars.insert(return_variable);
- global_context_.AddControlSplitChangedVariables(
- decl, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
}
void DeclarationVisitor::Visit(ConstDeclaration* decl) {
@@ -273,28 +261,13 @@ void DeclarationVisitor::Visit(ReturnStatement* stmt) {
Variable* DeclarationVisitor::DeclareVariable(const std::string& name,
const Type* type, bool is_const) {
Variable* result = declarations()->DeclareVariable(name, type, is_const);
- if (type->IsStructType()) {
- const StructType* struct_type = StructType::cast(type);
- for (auto& field : struct_type->fields()) {
- std::string field_var_name = name + "." + field.name;
- DeclareVariable(field_var_name, field.type, is_const);
- }
- }
return result;
}
Parameter* DeclarationVisitor::DeclareParameter(const std::string& name,
const Type* type) {
- Parameter* result = declarations()->DeclareParameter(
+ return declarations()->DeclareParameter(
name, GetParameterVariableFromName(name), type);
- if (type->IsStructType()) {
- const StructType* struct_type = StructType::cast(type);
- for (auto& field : struct_type->fields()) {
- std::string field_var_name = name + "." + field.name;
- DeclareParameter(field_var_name, field.type);
- }
- }
- return result;
}
void DeclarationVisitor::Visit(VarDeclarationStatement* stmt) {
@@ -372,7 +345,9 @@ void DeclarationVisitor::Visit(LogicalAndExpression* expr) {
Visit(expr->right);
}
-void DeclarationVisitor::DeclareExpressionForBranch(Expression* node) {
+void DeclarationVisitor::DeclareExpressionForBranch(
+ Expression* node, base::Optional<Statement*> true_statement,
+ base::Optional<Statement*> false_statement) {
Declarations::NodeScopeActivator scope(declarations(), node);
// Conditional expressions can either explicitly return a bit
// type, or they can be backed by macros that don't return but
@@ -380,46 +355,27 @@ void DeclarationVisitor::DeclareExpressionForBranch(Expression* node) {
// visiting the conditional expression, those label-based
// macro conditionals will be able to find them through normal
// label lookups.
- declarations()->DeclareLabel(kTrueLabelName);
- declarations()->DeclareLabel(kFalseLabelName);
+ declarations()->DeclareLabel(kTrueLabelName, true_statement);
+ declarations()->DeclareLabel(kFalseLabelName, false_statement);
Visit(node);
}
void DeclarationVisitor::Visit(ConditionalExpression* expr) {
DeclareExpressionForBranch(expr->condition);
- PushControlSplit();
Visit(expr->if_true);
Visit(expr->if_false);
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- expr, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
}
void DeclarationVisitor::Visit(IfStatement* stmt) {
- if (!stmt->is_constexpr) {
- PushControlSplit();
- }
- DeclareExpressionForBranch(stmt->condition);
+ DeclareExpressionForBranch(stmt->condition, stmt->if_true, stmt->if_false);
Visit(stmt->if_true);
if (stmt->if_false) Visit(*stmt->if_false);
- if (!stmt->is_constexpr) {
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
- }
}
void DeclarationVisitor::Visit(WhileStatement* stmt) {
Declarations::NodeScopeActivator scope(declarations(), stmt);
DeclareExpressionForBranch(stmt->condition);
- PushControlSplit();
Visit(stmt->body);
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
}
void DeclarationVisitor::Visit(ForOfLoopStatement* stmt) {
@@ -429,18 +385,12 @@ void DeclarationVisitor::Visit(ForOfLoopStatement* stmt) {
Visit(stmt->iterable);
if (stmt->begin) Visit(*stmt->begin);
if (stmt->end) Visit(*stmt->end);
- PushControlSplit();
Visit(stmt->body);
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
}
void DeclarationVisitor::Visit(ForLoopStatement* stmt) {
Declarations::NodeScopeActivator scope(declarations(), stmt);
if (stmt->var_declaration) Visit(*stmt->var_declaration);
- PushControlSplit();
// Same as DeclareExpressionForBranch, but without the extra scope.
// If no test expression is present we can not use it for the scope.
@@ -450,22 +400,20 @@ void DeclarationVisitor::Visit(ForLoopStatement* stmt) {
Visit(stmt->body);
if (stmt->action) Visit(*stmt->action);
- auto changed_vars = PopControlSplit();
- global_context_.AddControlSplitChangedVariables(
- stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
- changed_vars);
}
-void DeclarationVisitor::Visit(TryLabelStatement* stmt) {
- // Activate a new scope to declare handler labels, they should not be
- // visible outside the label block.
+void DeclarationVisitor::Visit(TryLabelExpression* stmt) {
+ // Activate a new scope to declare the handler's label parameters, they should
+ // not be visible outside the label block.
{
Declarations::NodeScopeActivator scope(declarations(), stmt);
- // Declare labels
- for (LabelBlock* block : stmt->label_blocks) {
+ // Declare label
+ {
+ LabelBlock* block = stmt->label_block;
CurrentSourcePosition::Scope scope(block->pos);
- Label* shared_label = declarations()->DeclareLabel(block->label);
+ Label* shared_label =
+ declarations()->DeclareLabel(block->label, block->body);
{
Declarations::NodeScopeActivator scope(declarations(), block->body);
if (block->parameters.has_varargs) {
@@ -475,7 +423,7 @@ void DeclarationVisitor::Visit(TryLabelStatement* stmt) {
}
size_t i = 0;
- for (auto p : block->parameters.names) {
+ for (const auto& p : block->parameters.names) {
const Type* type =
declarations()->GetType(block->parameters.types[i]);
if (type->IsConstexpr()) {
@@ -485,18 +433,16 @@ void DeclarationVisitor::Visit(TryLabelStatement* stmt) {
shared_label->AddVariable(DeclareVariable(p, type, false));
++i;
}
- }
- if (global_context_.verbose()) {
- std::cout << " declaring label " << block->label << "\n";
+ if (global_context_.verbose()) {
+ std::cout << " declaring label " << block->label << "\n";
+ }
}
}
- Visit(stmt->try_block);
+ Visit(stmt->try_expression);
}
- for (LabelBlock* block : stmt->label_blocks) {
- Visit(block->body);
- }
+ Visit(stmt->label_block->body);
}
void DeclarationVisitor::GenerateHeader(std::string& file_name) {
@@ -531,7 +477,7 @@ void DeclarationVisitor::GenerateHeader(std::string& file_name) {
}
if (declareParameters) {
int index = 0;
- for (auto parameter : builtin->parameter_names()) {
+ for (const auto& parameter : builtin->parameter_names()) {
if (index >= firstParameterIndex) {
new_contents_stream << ", k" << CamelifyString(parameter);
}
@@ -569,6 +515,10 @@ void DeclarationVisitor::Visit(IdentifierExpression* expr) {
}
}
+void DeclarationVisitor::Visit(StatementExpression* expr) {
+ Visit(expr->statement);
+}
+
void DeclarationVisitor::Visit(CallExpression* expr) {
Visit(&expr->callee);
for (Expression* arg : expr->arguments) Visit(arg);
@@ -589,37 +539,9 @@ void DeclarationVisitor::Visit(TypeDeclaration* decl) {
}
}
-void DeclarationVisitor::MarkLocationModified(Expression* location) {
- if (IdentifierExpression* id = IdentifierExpression::cast(location)) {
- const Value* value = declarations()->LookupValue(id->name);
- if (value->IsVariable()) {
- const Variable* variable = Variable::cast(value);
- bool was_live = MarkVariableModified(variable);
- if (was_live && global_context_.verbose()) {
- std::cout << *variable << " was modified in control split at "
- << PositionAsString(id->pos) << "\n";
- }
- }
- }
-}
-
-bool DeclarationVisitor::MarkVariableModified(const Variable* variable) {
- auto e = live_and_changed_variables_.rend();
- auto c = live_and_changed_variables_.rbegin();
- bool was_live_in_preceeding_split = false;
- while (c != e) {
- if (c->live.find(variable) != c->live.end()) {
- c->changed.insert(variable);
- was_live_in_preceeding_split = true;
- }
- c++;
- }
- return was_live_in_preceeding_split;
-}
-
void DeclarationVisitor::DeclareSignature(const Signature& signature) {
auto type_iterator = signature.parameter_types.types.begin();
- for (auto name : signature.parameter_names) {
+ for (const auto& name : signature.parameter_names) {
const Type* t(*type_iterator++);
if (name.size() != 0) {
DeclareParameter(name, t);
@@ -628,6 +550,7 @@ void DeclarationVisitor::DeclareSignature(const Signature& signature) {
for (auto& label : signature.labels) {
auto label_params = label.types;
Label* new_label = declarations()->DeclareLabel(label.name);
+ new_label->set_external_label_name("label_" + label.name);
size_t i = 0;
for (auto var_type : label_params) {
if (var_type->IsConstexpr()) {
@@ -635,7 +558,8 @@ void DeclarationVisitor::DeclareSignature(const Signature& signature) {
}
std::string var_name = label.name + std::to_string(i++);
- new_label->AddVariable(DeclareVariable(var_name, var_type, false));
+ new_label->AddVariable(
+ declarations()->CreateVariable(var_name, var_type, false));
}
}
}
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index e1d5439018..d8a9698c6f 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -114,7 +114,9 @@ class DeclarationVisitor : public FileVisitor {
void Visit(LogicalOrExpression* expr);
void Visit(LogicalAndExpression* expr);
- void DeclareExpressionForBranch(Expression* node);
+ void DeclareExpressionForBranch(
+ Expression* node, base::Optional<Statement*> true_statement = {},
+ base::Optional<Statement*> false_statement = {});
void Visit(ConditionalExpression* expr);
void Visit(IfStatement* stmt);
@@ -122,7 +124,6 @@ class DeclarationVisitor : public FileVisitor {
void Visit(ForOfLoopStatement* stmt);
void Visit(AssignmentExpression* expr) {
- MarkLocationModified(expr->location);
Visit(expr->location);
Visit(expr->value);
}
@@ -133,39 +134,20 @@ class DeclarationVisitor : public FileVisitor {
void Visit(ForLoopStatement* stmt);
void Visit(IncrementDecrementExpression* expr) {
- MarkLocationModified(expr->location);
Visit(expr->location);
}
void Visit(AssumeTypeImpossibleExpression* expr) { Visit(expr->expression); }
- void Visit(TryLabelStatement* stmt);
+ void Visit(TryLabelExpression* stmt);
+ void Visit(StatementExpression* stmt);
void GenerateHeader(std::string& file_name);
private:
- struct LiveAndChanged {
- std::set<const Variable*> live;
- std::set<const Variable*> changed;
- };
-
- void PushControlSplit() {
- LiveAndChanged live_and_changed;
- live_and_changed.live = declarations()->GetLiveVariables();
- live_and_changed_variables_.push_back(live_and_changed);
- }
-
Variable* DeclareVariable(const std::string& name, const Type* type,
bool is_const);
Parameter* DeclareParameter(const std::string& name, const Type* type);
- std::set<const Variable*> PopControlSplit() {
- auto result = live_and_changed_variables_.back().changed;
- live_and_changed_variables_.pop_back();
- return result;
- }
-
- void MarkLocationModified(Expression* location);
- bool MarkVariableModified(const Variable* variable);
void DeclareSignature(const Signature& signature);
void DeclareSpecializedTypes(const SpecializationKey& key);
@@ -175,7 +157,6 @@ class DeclarationVisitor : public FileVisitor {
Declarations::ModuleScopeActivator scope_;
std::vector<Builtin*> torque_builtins_;
- std::vector<LiveAndChanged> live_and_changed_variables_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 9b2964d210..f001b98355 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -233,9 +233,15 @@ void Declarations::DeclareStruct(Module* module, const std::string& name,
DeclareType(name, new_type);
}
-Label* Declarations::DeclareLabel(const std::string& name) {
+Label* Declarations::DeclareLabel(const std::string& name,
+ base::Optional<Statement*> statement) {
CheckAlreadyDeclared(name, "label");
- Label* result = new Label(name);
+ bool deferred = false;
+ if (statement) {
+ BlockStatement* block = BlockStatement::DynamicCast(*statement);
+ deferred = block && block->deferred;
+ }
+ Label* result = new Label(name, deferred);
Declare(name, std::unique_ptr<Declarable>(result));
return result;
}
@@ -298,38 +304,31 @@ RuntimeFunction* Declarations::DeclareRuntimeFunction(
return result;
}
+Variable* Declarations::CreateVariable(const std::string& var, const Type* type,
+ bool is_const) {
+ return RegisterDeclarable(
+ std::unique_ptr<Variable>(new Variable(var, type, is_const)));
+}
+
Variable* Declarations::DeclareVariable(const std::string& var,
const Type* type, bool is_const) {
- std::string name(var + "_" +
- std::to_string(GetNextUniqueDeclarationNumber()));
- std::replace(name.begin(), name.end(), '.', '_');
CheckAlreadyDeclared(var, "variable");
- Variable* result = new Variable(var, name, type, is_const);
+ Variable* result = new Variable(var, type, is_const);
Declare(var, std::unique_ptr<Declarable>(result));
return result;
}
Parameter* Declarations::DeclareParameter(const std::string& name,
- const std::string& var_name,
+ std::string external_name,
const Type* type) {
CheckAlreadyDeclared(name, "parameter");
- Parameter* result = new Parameter(name, type, var_name);
- Declare(name, std::unique_ptr<Declarable>(result));
- return result;
-}
-
-Label* Declarations::DeclarePrivateLabel(const std::string& raw_name) {
- std::string name =
- raw_name + "_" + std::to_string(GetNextUniqueDeclarationNumber());
- CheckAlreadyDeclared(name, "label");
- Label* result = new Label(name);
+ Parameter* result = new Parameter(name, std::move(external_name), type);
Declare(name, std::unique_ptr<Declarable>(result));
return result;
}
void Declarations::DeclareExternConstant(const std::string& name,
- const Type* type,
- const std::string& value) {
+ const Type* type, std::string value) {
CheckAlreadyDeclared(name, "constant, parameter or arguments");
ExternConstant* result = new ExternConstant(name, type, value);
Declare(name, std::unique_ptr<Declarable>(result));
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 880b3e75a6..5a45e5dbda 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
namespace torque {
-static constexpr const char* const kFromConstexprMacroName = "from_constexpr";
+static constexpr const char* const kFromConstexprMacroName = "FromConstexpr";
static constexpr const char* kTrueLabelName = "_True";
static constexpr const char* kFalseLabelName = "_False";
@@ -79,7 +79,8 @@ class Declarations {
void DeclareStruct(Module* module, const std::string& name,
const std::vector<NameAndType>& fields);
- Label* DeclareLabel(const std::string& name);
+ Label* DeclareLabel(const std::string& name,
+ base::Optional<Statement*> statement = {});
Macro* DeclareMacro(const std::string& name, const Signature& signature,
base::Optional<std::string> op = {});
@@ -90,17 +91,16 @@ class Declarations {
RuntimeFunction* DeclareRuntimeFunction(const std::string& name,
const Signature& signature);
+ Variable* CreateVariable(const std::string& var, const Type* type,
+ bool is_const);
Variable* DeclareVariable(const std::string& var, const Type* type,
bool is_const);
Parameter* DeclareParameter(const std::string& name,
- const std::string& mangled_name,
- const Type* type);
-
- Label* DeclarePrivateLabel(const std::string& name);
+ std::string external_name, const Type* type);
void DeclareExternConstant(const std::string& name, const Type* type,
- const std::string& value);
+ std::string value);
ModuleConstant* DeclareModuleConstant(const std::string& name,
const Type* type);
@@ -219,7 +219,7 @@ class Declarations::ScopedGenericScopeChainSnapshot {
ScopedGenericScopeChainSnapshot(Declarations* declarations,
const SpecializationKey& key)
: restorer_(declarations->generic_declaration_scopes_[key.first]) {}
- ~ScopedGenericScopeChainSnapshot() {}
+ ~ScopedGenericScopeChainSnapshot() = default;
private:
ScopeChain::ScopedSnapshotRestorer restorer_;
diff --git a/deps/v8/src/torque/file-visitor.cc b/deps/v8/src/torque/file-visitor.cc
index c2e5aa7924..865b7b456d 100644
--- a/deps/v8/src/torque/file-visitor.cc
+++ b/deps/v8/src/torque/file-visitor.cc
@@ -13,7 +13,7 @@ namespace torque {
Signature FileVisitor::MakeSignature(const CallableNodeSignature* signature) {
LabelDeclarationVector definition_vector;
- for (auto label : signature->labels) {
+ for (const auto& label : signature->labels) {
LabelDeclaration def = {label.name, GetTypeVector(label.types)};
definition_vector.push_back(def);
}
diff --git a/deps/v8/src/torque/file-visitor.h b/deps/v8/src/torque/file-visitor.h
index d306392446..7d79e9acba 100644
--- a/deps/v8/src/torque/file-visitor.h
+++ b/deps/v8/src/torque/file-visitor.h
@@ -51,10 +51,6 @@ class FileVisitor {
};
protected:
- static constexpr const char* kReturnValueVariable = "_return";
- static constexpr const char* kDoneLabelName = "_done";
- static constexpr const char* kForIndexValueVariable = "_for_index";
-
Module* CurrentModule() const { return module_; }
friend class ScopedModuleActivator;
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index 33573f7175..cd20c332f3 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -65,34 +65,12 @@ class GlobalContext {
void SetVerbose() { verbose_ = true; }
bool verbose() const { return verbose_; }
- void AddControlSplitChangedVariables(const AstNode* node,
- const TypeVector& specialization_types,
- const std::set<const Variable*>& vars) {
- auto key = std::make_pair(node, specialization_types);
- control_split_changed_variables_[key] = vars;
- }
-
- const std::set<const Variable*>& GetControlSplitChangedVariables(
- const AstNode* node, const TypeVector& specialization_types) {
- auto key = std::make_pair(node, specialization_types);
- assert(control_split_changed_variables_.find(key) !=
- control_split_changed_variables_.end());
- return control_split_changed_variables_.find(key)->second;
- }
-
- void MarkVariableChanged(const AstNode* node,
- const TypeVector& specialization_types,
- Variable* var) {
- auto key = std::make_pair(node, specialization_types);
- control_split_changed_variables_[key].insert(var);
- }
-
friend class CurrentCallableActivator;
friend class BreakContinueActivator;
Callable* GetCurrentCallable() const { return current_callable_; }
- Label* GetCurrentBreak() const { return break_continue_stack_.back().first; }
- Label* GetCurrentContinue() const {
+ Block* GetCurrentBreak() const { return break_continue_stack_.back().first; }
+ Block* GetCurrentContinue() const {
return break_continue_stack_.back().second;
}
@@ -104,11 +82,9 @@ class GlobalContext {
int next_label_number_;
Declarations declarations_;
Callable* current_callable_;
- std::vector<std::pair<Label*, Label*>> break_continue_stack_;
+ std::vector<std::pair<Block*, Block*>> break_continue_stack_;
std::map<std::string, std::unique_ptr<Module>> modules_;
Module* default_module_;
- std::map<std::pair<const AstNode*, TypeVector>, std::set<const Variable*>>
- control_split_changed_variables_;
Ast ast_;
};
@@ -132,10 +108,10 @@ class CurrentCallableActivator {
class BreakContinueActivator {
public:
- BreakContinueActivator(GlobalContext& context, Label* break_label,
- Label* continue_label)
+ BreakContinueActivator(GlobalContext& context, Block* break_block,
+ Block* continue_block)
: context_(context) {
- context_.break_continue_stack_.push_back({break_label, continue_label});
+ context_.break_continue_stack_.push_back({break_block, continue_block});
}
~BreakContinueActivator() { context_.break_continue_stack_.pop_back(); }
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 5044d914db..89c0c70416 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -4,6 +4,7 @@
#include <algorithm>
+#include "src/torque/csa-generator.h"
#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
@@ -26,19 +27,20 @@ VisitResult ImplementationVisitor::Visit(Expression* expr) {
const Type* ImplementationVisitor::Visit(Statement* stmt) {
CurrentSourcePosition::Scope scope(stmt->pos);
- GenerateIndent();
- source_out() << "// " << CurrentPositionAsString() << "\n";
+ const Type* result;
switch (stmt->kind) {
-#define ENUM_ITEM(name) \
- case AstNode::Kind::k##name: \
- return Visit(name::cast(stmt));
+#define ENUM_ITEM(name) \
+ case AstNode::Kind::k##name: \
+ result = Visit(name::cast(stmt)); \
+ break;
AST_STATEMENT_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
default:
- UNIMPLEMENTED();
+ UNREACHABLE();
}
- UNREACHABLE();
- return nullptr;
+ DCHECK_EQ(result == TypeOracle::GetNeverType(),
+ assembler().CurrentBlockIsComplete());
+ return result;
}
void ImplementationVisitor::Visit(Declaration* decl) {
@@ -72,12 +74,13 @@ void ImplementationVisitor::BeginModuleFile(Module* module) {
std::ostream& header = module->header_stream();
if (module->IsDefault()) {
- source << "#include \"src/code-stub-assembler.h\"";
+ source << "#include \"src/torque-assembler.h\"";
} else {
source << "#include \"src/builtins/builtins-" +
DashifyString(module->name()) + "-gen.h\"";
}
source << "\n";
+ source << "#include \"src/objects/arguments.h\"\n";
source << "#include \"src/builtins/builtins-utils-gen.h\"\n";
source << "#include \"src/builtins/builtins.h\"\n";
source << "#include \"src/code-factory.h\"\n";
@@ -103,7 +106,7 @@ void ImplementationVisitor::BeginModuleFile(Module* module) {
header << "#ifndef " << headerDefine << "\n";
header << "#define " << headerDefine << "\n\n";
if (module->IsDefault()) {
- header << "#include \"src/code-stub-assembler.h\"";
+ header << "#include \"src/torque-assembler.h\"";
} else {
header << "#include \"src/builtins/builtins-" +
DashifyString(module->name()) + "-gen.h\"\n";
@@ -176,12 +179,20 @@ void ImplementationVisitor::Visit(ConstDeclaration* decl) {
DCHECK(!signature.return_type->IsVoidOrNever());
+ assembler_ = CfgAssembler(Stack<const Type*>{});
+
VisitResult expression_result = Visit(decl->expression);
VisitResult return_result =
GenerateImplicitConvert(signature.return_type, expression_result);
- GenerateIndent();
- source_out() << "return " << return_result.RValue() << ";\n";
+ CSAGenerator csa_generator{assembler().Result(), source_out()};
+ Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
+
+ assembler_ = base::nullopt;
+
+ source_out() << "return ";
+ CSAGenerator::EmitCSAValue(return_result, values, source_out());
+ source_out() << ";\n";
source_out() << "}\n\n";
}
@@ -193,13 +204,41 @@ void ImplementationVisitor::Visit(StructDeclaration* decl) {
header_out() << " " << field.type->GetGeneratedTypeName();
header_out() << " " << field.name << ";\n";
}
- header_out() << " } "
- << ";\n";
+ header_out() << "\n std::tuple<";
+ bool first = true;
+ for (const Type* type : LowerType(struct_type)) {
+ if (!first) {
+ header_out() << ", ";
+ }
+ first = false;
+ header_out() << type->GetGeneratedTypeName();
+ }
+ header_out() << "> Flatten() const {\n"
+ << " return std::tuple_cat(";
+ first = true;
+ for (auto& field : struct_type->fields()) {
+ if (!first) {
+ header_out() << ", ";
+ }
+ first = false;
+ if (field.type->IsStructType()) {
+ header_out() << field.name << ".Flatten()";
+ } else {
+ header_out() << "std::make_tuple(" << field.name << ")";
+ }
+ }
+ header_out() << ");\n";
+ header_out() << " }\n";
+ header_out() << " };\n";
}
void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
const Signature& sig, Statement* body) {
Signature signature = MakeSignature(decl->signature.get());
+ const Type* return_type = signature.return_type;
+ bool can_return = return_type != TypeOracle::GetNeverType();
+ bool has_return_value =
+ can_return && return_type != TypeOracle::GetVoidType();
std::string name = GetGeneratedCallableName(
decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
const TypeVector& list = signature.types();
@@ -216,15 +255,43 @@ void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
source_out(), GetDSLAssemblerName(CurrentModule()) + "::", macro);
source_out() << " {\n";
- const Variable* result_var = nullptr;
- if (macro->HasReturnValue()) {
- result_var =
- GeneratePredeclaredVariableDeclaration(kReturnValueVariable, {});
+ Stack<std::string> lowered_parameters;
+ Stack<const Type*> lowered_parameter_types;
+
+ for (const std::string& name : macro->parameter_names()) {
+ Parameter* parameter = Parameter::cast(declarations()->LookupValue(name));
+ const Type* type = parameter->type();
+ if (type->IsConstexpr()) {
+ parameter->set_value(
+ VisitResult(parameter->type(), parameter->external_name()));
+ } else {
+ LowerParameter(type, parameter->external_name(), &lowered_parameters);
+ StackRange range = lowered_parameter_types.PushMany(LowerType(type));
+ parameter->set_value(VisitResult(type, range));
+ }
+ }
+
+ DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
+ assembler_ = CfgAssembler(lowered_parameter_types);
+
+ for (const LabelDeclaration& label_info : sig.labels) {
+ Label* label = declarations()->LookupLabel(label_info.name);
+ Stack<const Type*> label_input_stack;
+ for (Variable* v : label->GetParameters()) {
+ label_input_stack.PushMany(LowerType(v->type()));
+ }
+ CreateBlockForLabel(label, label_input_stack);
}
+
Label* macro_end = declarations()->DeclareLabel("macro_end");
- GenerateLabelDefinition(macro_end, decl);
+ if (can_return) {
+ Stack<const Type*> result_stack;
+ CreateBlockForLabel(macro_end,
+ Stack<const Type*>{LowerType(signature.return_type)});
+ }
const Type* result = Visit(body);
+
if (result->IsNever()) {
if (!macro->signature().return_type->IsNever() && !macro->HasReturns()) {
std::stringstream s;
@@ -246,23 +313,54 @@ void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
ReportError(s.str());
}
}
- if (macro->HasReturns()) {
- if (!result->IsNever()) {
- GenerateLabelGoto(macro_end);
+ if (!result->IsNever()) {
+ GenerateLabelGoto(macro_end);
+ }
+
+ for (const LabelDeclaration& label_info : sig.labels) {
+ Label* label = declarations()->LookupLabel(label_info.name);
+ GenerateLabelBind(label);
+ std::vector<std::string> label_parameter_variables;
+ for (size_t i = 0; i < label->GetParameterCount(); ++i) {
+ label_parameter_variables.push_back(
+ ExternalLabelParameterName(label, i));
}
+ assembler().Emit(GotoExternalInstruction{label->external_label_name(),
+ label_parameter_variables});
+ }
+
+ if (macro->HasReturns() || !result->IsNever()) {
GenerateLabelBind(macro_end);
}
- if (result_var != nullptr) {
- GenerateIndent();
- source_out() << "return "
- << RValueFlattenStructs(
- VisitResult(result_var->type(), result_var))
- << ";\n";
+
+ CSAGenerator csa_generator{assembler().Result(), source_out()};
+ base::Optional<Stack<std::string>> values =
+ csa_generator.EmitGraph(lowered_parameters);
+
+ assembler_ = base::nullopt;
+
+ if (has_return_value) {
+ source_out() << " return ";
+ CSAGenerator::EmitCSAValue(GetAndClearReturnValue(), *values,
+ source_out());
+ source_out() << ";\n";
}
source_out() << "}\n\n";
}
}
+namespace {
+std::string AddParameter(Value* parameter, size_t i,
+ Stack<std::string>* parameters,
+ Stack<const Type*>* parameter_types) {
+ std::string name = "parameter" + std::to_string(i);
+ parameters->Push(name);
+ StackRange range = parameter_types->PushMany(LowerType(parameter->type()));
+ parameter->set_value(VisitResult(parameter->type(), range));
+ return name;
+}
+} // namespace
+
void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
const Signature& signature, Statement* body) {
std::string name = GetGeneratedCallableName(
@@ -272,15 +370,17 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
Builtin* builtin = declarations()->LookupBuiltin(name);
CurrentCallableActivator activator(global_context_, builtin, decl);
+ Stack<const Type*> parameter_types;
+ Stack<std::string> parameters;
+
// Context
- const Value* val =
+ Value* val =
declarations()->LookupValue(decl->signature->parameters.names[0]);
- GenerateIndent();
- source_out() << "TNode<Context> " << val->value()
+ std::string parameter0 = AddParameter(val, 0, &parameters, &parameter_types);
+ source_out() << " TNode<Context> " << parameter0
<< " = UncheckedCast<Context>(Parameter("
<< "Descriptor::kContext));\n";
- GenerateIndent();
- source_out() << "USE(" << val->value() << ");\n";
+ source_out() << " USE(" << parameter0 << ");\n";
size_t first = 1;
if (builtin->IsVarArgsJavaScript()) {
@@ -288,41 +388,82 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
ExternConstant* arguments =
ExternConstant::cast(declarations()->LookupValue(
decl->signature->parameters.arguments_variable));
- std::string arguments_name = arguments->value();
- GenerateIndent();
+ std::string arguments_name = arguments->value().constexpr_value();
source_out()
- << "Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
- GenerateIndent();
- source_out() << "CodeStubArguments arguments_impl(this, "
+ << " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
+ source_out() << " CodeStubArguments arguments_impl(this, "
"ChangeInt32ToIntPtr(argc));\n";
- const Value* receiver =
+ Value* receiver =
declarations()->LookupValue(decl->signature->parameters.names[1]);
- GenerateIndent();
- source_out() << "TNode<Object> " << receiver->value()
+ std::string parameter1 =
+ AddParameter(receiver, 1, &parameters, &parameter_types);
+
+ source_out() << " TNode<Object> " << parameter1
<< " = arguments_impl.GetReceiver();\n";
- GenerateIndent();
- source_out() << "auto arguments = &arguments_impl;\n";
- GenerateIndent();
+ source_out() << "auto " << CSAGenerator::ARGUMENTS_VARIABLE_STRING
+ << " = &arguments_impl;\n";
source_out() << "USE(arguments);\n";
- GenerateIndent();
- source_out() << "USE(" << receiver->value() << ");\n";
+ source_out() << "USE(" << parameter1 << ");\n";
first = 2;
}
- GenerateParameterList(decl->signature->parameters.names, first);
- Visit(body);
+ for (size_t i = 0; i < decl->signature->parameters.names.size(); ++i) {
+ if (i < first) continue;
+ const std::string& parameter_name = decl->signature->parameters.names[i];
+ Value* parameter = declarations()->LookupValue(parameter_name);
+ std::string var = AddParameter(parameter, i, &parameters, &parameter_types);
+ source_out() << " " << parameter->type()->GetGeneratedTypeName() << " "
+ << var << " = "
+ << "UncheckedCast<"
+ << parameter->type()->GetGeneratedTNodeTypeName()
+ << ">(Parameter(Descriptor::k"
+ << CamelifyString(parameter_name) << "));\n";
+ source_out() << " USE(" << var << ");\n";
+ }
+
+ assembler_ = CfgAssembler(parameter_types);
+ const Type* body_result = Visit(body);
+ if (body_result != TypeOracle::GetNeverType()) {
+ ReportError("control reaches end of builtin, expected return of a value");
+ }
+ CSAGenerator csa_generator{assembler().Result(), source_out(),
+ builtin->kind()};
+ csa_generator.EmitGraph(parameters);
+ assembler_ = base::nullopt;
source_out() << "}\n\n";
}
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
+ base::Optional<const Type*> type;
+ if (stmt->type) type = declarations()->GetType(*stmt->type);
base::Optional<VisitResult> init_result;
if (stmt->initializer) {
+ StackScope scope(this);
init_result = Visit(*stmt->initializer);
+ if (type) {
+ init_result = GenerateImplicitConvert(*type, *init_result);
+ }
+ init_result = scope.Yield(*init_result);
+ } else {
+ DCHECK(type.has_value());
+ if ((*type)->IsConstexpr()) {
+ ReportError("constexpr variables need an initializer");
+ }
+ TypeVector lowered_types = LowerType(*type);
+ for (const Type* type : lowered_types) {
+ assembler().Emit(PushUninitializedInstruction{type});
+ }
+ init_result =
+ VisitResult(*type, assembler().TopRange(lowered_types.size()));
}
- base::Optional<const Type*> type;
- if (stmt->type) type = declarations()->GetType(*stmt->type);
- GenerateVariableDeclaration(stmt, stmt->name, stmt->const_qualified, type,
- init_result);
+ Variable* var;
+ if (stmt->const_qualified) {
+ var = declarations()->DeclareVariable(stmt->name, init_result->type(),
+ stmt->const_qualified);
+ } else {
+ var = Variable::cast(declarations()->LookupValue(stmt->name));
+ }
+ var->set_value(*init_result);
return TypeOracle::GetVoidType();
}
@@ -331,72 +472,61 @@ const Type* ImplementationVisitor::Visit(TailCallStatement* stmt) {
}
VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
- std::string f1 = NewTempVariable();
- std::string f2 = NewTempVariable();
-
- // The code for both paths of the conditional need to be generated first in
- // lambdas before evaluating the conditional expression because the common
- // type of the result of both the true and false of the condition needs to be
- // known when declaring the variable to hold the result of the conditional.
- VisitResult left, right;
- GenerateIndent();
- source_out() << "auto " << f1 << " = [=]() ";
- {
- ScopedIndent indent(this, false);
- source_out() << "\n";
- left = Visit(expr->if_true);
- GenerateIndent();
- source_out() << "return " << RValueFlattenStructs(left) << ";\n";
- }
- source_out() << ";\n";
- GenerateIndent();
- source_out() << "auto " << f2 << " = [=]() ";
+ Label* true_label;
+ Label* false_label;
+ Block* done_block = assembler().NewBlock();
+ Block* true_conversion_block = assembler().NewBlock();
{
- ScopedIndent indent(this, false);
- source_out() << "\n";
- right = Visit(expr->if_false);
- GenerateIndent();
- source_out() << "return " << RValueFlattenStructs(right) << ";\n";
+ Declarations::NodeScopeActivator scope(declarations(), expr->condition);
+
+ true_label = declarations()->LookupLabel(kTrueLabelName);
+ CreateBlockForLabel(true_label, assembler().CurrentStack());
+ false_label = declarations()->LookupLabel(kFalseLabelName);
+ CreateBlockForLabel(false_label, assembler().CurrentStack());
+ done_block = assembler().NewBlock();
+
+ {
+ StackScope condition_scope(this);
+ VisitResult condition_result = Visit(expr->condition);
+ if (!condition_result.type()->IsNever()) {
+ condition_result = condition_scope.Yield(GenerateImplicitConvert(
+ TypeOracle::GetBoolType(), condition_result));
+ assembler().Branch(true_label->block(), false_label->block());
+ }
+ }
}
- source_out() << ";\n";
- const Type* common_type = GetCommonType(left.type(), right.type());
- std::string result_var = NewTempVariable();
- Variable* result =
- GenerateVariableDeclaration(expr, result_var, false, common_type);
+ VisitResult left;
+ VisitResult right;
{
- ScopedIndent indent(this);
- Declarations::NodeScopeActivator scope(declarations(), expr->condition);
-
- Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(true_label);
- Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(false_label);
- Label* done_label = declarations()->DeclarePrivateLabel(kDoneLabelName);
- GenerateLabelDefinition(done_label, expr);
+ // The code for both paths of the conditional need to be generated first
+ // before evaluating the conditional expression because the common type of
+ // the result of both the true and false of the condition needs to be known
+ // to convert both branches to a common type.
+ assembler().Bind(true_label->block());
+ StackScope left_scope(this);
+ left = Visit(expr->if_true);
+ assembler().Goto(true_conversion_block);
- VisitResult condition_result = Visit(expr->condition);
- if (!condition_result.type()->IsNever()) {
- condition_result =
- GenerateImplicitConvert(TypeOracle::GetBoolType(), condition_result);
- GenerateBranch(condition_result, true_label, false_label);
+ const Type* common_type;
+ {
+ assembler().Bind(false_label->block());
+ StackScope right_scope(this);
+ right = Visit(expr->if_false);
+ common_type = GetCommonType(left.type(), right.type());
+ right = right_scope.Yield(GenerateImplicitConvert(common_type, right));
+ assembler().Goto(done_block);
}
- GenerateLabelBind(true_label);
- GenerateIndent();
- VisitResult left_result = {right.type(), f1 + "()"};
- GenerateAssignToVariable(result, left_result);
- GenerateLabelGoto(done_label);
- GenerateLabelBind(false_label);
- GenerateIndent();
- VisitResult right_result = {right.type(), f2 + "()"};
- GenerateAssignToVariable(result, right_result);
- GenerateLabelGoto(done_label);
-
- GenerateLabelBind(done_label);
+ assembler().Bind(true_conversion_block);
+ left = left_scope.Yield(GenerateImplicitConvert(common_type, left));
+ assembler().Goto(done_block);
}
- return VisitResult(common_type, result);
+
+ assembler().Bind(done_block);
+ CHECK_EQ(left, right);
+ return left;
}
VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
@@ -404,32 +534,44 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
{
Declarations::NodeScopeActivator scope(declarations(), expr->left);
Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(false_label);
+ CreateBlockForLabel(false_label, assembler().CurrentStack());
left_result = Visit(expr->left);
if (left_result.type()->IsBool()) {
Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateIndent();
- source_out() << "GotoIf(" << RValueFlattenStructs(left_result) << ", "
- << true_label->generated() << ");\n";
+ assembler().Branch(true_label->block(), false_label->block());
+ assembler().Bind(false_label->block());
+ } else if (left_result.type()->IsNever()) {
+ assembler().Bind(false_label->block());
} else if (!left_result.type()->IsConstexprBool()) {
- GenerateLabelBind(false_label);
+ ReportError(
+ "expected type bool, constexpr bool, or never on left-hand side of "
+ "operator ||");
}
}
- VisitResult right_result = Visit(expr->right);
- if (right_result.type() != left_result.type()) {
- std::stringstream stream;
- stream << "types of left and right expression of logical OR don't match (\""
- << *left_result.type() << "\" vs. \"" << *right_result.type()
- << "\")";
- ReportError(stream.str());
- }
+
if (left_result.type()->IsConstexprBool()) {
- return VisitResult(left_result.type(),
- std::string("(") + RValueFlattenStructs(left_result) +
- " || " + RValueFlattenStructs(right_result) + ")");
- } else {
- return right_result;
+ VisitResult right_result = Visit(expr->right);
+ if (!right_result.type()->IsConstexprBool()) {
+ ReportError(
+ "expected type constexpr bool on right-hand side of operator "
+ "||");
+ }
+ return VisitResult(TypeOracle::GetConstexprBoolType(),
+ std::string("(") + left_result.constexpr_value() +
+ " || " + right_result.constexpr_value() + ")");
}
+
+ VisitResult right_result = Visit(expr->right);
+ if (right_result.type()->IsBool()) {
+ Label* true_label = declarations()->LookupLabel(kTrueLabelName);
+ Label* false_label = declarations()->LookupLabel(kFalseLabelName);
+ assembler().Branch(true_label->block(), false_label->block());
+ return VisitResult::NeverResult();
+ } else if (!right_result.type()->IsNever()) {
+ ReportError(
+ "expected type bool or never on right-hand side of operator ||");
+ }
+ return right_result;
}
VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
@@ -437,67 +579,75 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
{
Declarations::NodeScopeActivator scope(declarations(), expr->left);
Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(true_label);
+ CreateBlockForLabel(true_label, assembler().CurrentStack());
left_result = Visit(expr->left);
if (left_result.type()->IsBool()) {
Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateIndent();
- source_out() << "GotoIfNot(" << RValueFlattenStructs(left_result) << ", "
- << false_label->generated() << ");\n";
+ assembler().Branch(true_label->block(), false_label->block());
+ assembler().Bind(true_label->block());
+ } else if (left_result.type()->IsNever()) {
+ assembler().Bind(true_label->block());
} else if (!left_result.type()->IsConstexprBool()) {
- GenerateLabelBind(true_label);
+ ReportError(
+ "expected type bool, constexpr bool, or never on left-hand side of "
+ "operator &&");
}
}
- VisitResult right_result = Visit(expr->right);
- if (right_result.type() != left_result.type()) {
- std::stringstream stream;
- stream
- << "types of left and right expression of logical AND don't match (\""
- << *left_result.type() << "\" vs. \"" << *right_result.type() << "\")";
- ReportError(stream.str());
- }
+
if (left_result.type()->IsConstexprBool()) {
- return VisitResult(left_result.type(),
- std::string("(") + RValueFlattenStructs(left_result) +
- " && " + RValueFlattenStructs(right_result) + ")");
- } else {
- return right_result;
+ VisitResult right_result = Visit(expr->right);
+ if (!right_result.type()->IsConstexprBool()) {
+ ReportError(
+ "expected type constexpr bool on right-hand side of operator "
+ "&&");
+ }
+ return VisitResult(TypeOracle::GetConstexprBoolType(),
+ std::string("(") + left_result.constexpr_value() +
+ " && " + right_result.constexpr_value() + ")");
+ }
+
+ VisitResult right_result = Visit(expr->right);
+ if (right_result.type()->IsBool()) {
+ Label* true_label = declarations()->LookupLabel(kTrueLabelName);
+ Label* false_label = declarations()->LookupLabel(kFalseLabelName);
+ assembler().Branch(true_label->block(), false_label->block());
+ return VisitResult::NeverResult();
+ } else if (!right_result.type()->IsNever()) {
+ ReportError(
+ "expected type bool or never on right-hand side of operator &&");
}
+ return right_result;
}
VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
- VisitResult value_copy;
- auto location_ref = GetLocationReference(expr->location);
- VisitResult current_value =
- GenerateFetchFromLocation(expr->location, location_ref);
- if (expr->postfix) {
- value_copy = GenerateCopy(current_value);
- }
+ StackScope scope(this);
+ LocationReference location_ref = GetLocationReference(expr->location);
+ VisitResult current_value = GenerateFetchFromLocation(location_ref);
VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
Arguments args;
args.parameters = {current_value, one};
VisitResult assignment_value = GenerateCall(
expr->op == IncrementDecrementOperator::kIncrement ? "+" : "-", args);
- GenerateAssignToLocation(expr->location, location_ref, assignment_value);
- return expr->postfix ? value_copy : assignment_value;
+ GenerateAssignToLocation(location_ref, assignment_value);
+ return scope.Yield(expr->postfix ? current_value : assignment_value);
}
VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
+ StackScope scope(this);
LocationReference location_ref = GetLocationReference(expr->location);
VisitResult assignment_value;
if (expr->op) {
- VisitResult location_value =
- GenerateFetchFromLocation(expr->location, location_ref);
+ VisitResult location_value = GenerateFetchFromLocation(location_ref);
assignment_value = Visit(expr->value);
Arguments args;
args.parameters = {location_value, assignment_value};
assignment_value = GenerateCall(*expr->op, args);
- GenerateAssignToLocation(expr->location, location_ref, assignment_value);
+ GenerateAssignToLocation(location_ref, assignment_value);
} else {
assignment_value = Visit(expr->value);
- GenerateAssignToLocation(expr->location, location_ref, assignment_value);
+ GenerateAssignToLocation(location_ref, assignment_value);
}
- return assignment_value;
+ return scope.Yield(assignment_value);
}
VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
@@ -513,9 +663,7 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
result_type = declarations()->LookupType(CONST_INT32_TYPE_STRING);
}
}
- std::string temp = GenerateNewTempVariable(result_type);
- source_out() << expr->number << ";\n";
- return VisitResult{result_type, temp};
+ return VisitResult{result_type, expr->number};
}
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
@@ -525,16 +673,16 @@ VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
if (result_type->IsNever()) {
ReportError("unreachable code");
}
- return VisitResult{result_type, "UncheckedCast<" +
- result_type->GetGeneratedTNodeTypeName() +
- ">(" + result.RValue() + ")"};
+ CHECK_EQ(LowerType(result_type), TypeVector{result_type});
+ assembler().Emit(UnsafeCastInstruction{result_type});
+ result.SetType(result_type);
+ return result;
}
VisitResult ImplementationVisitor::Visit(StringLiteralExpression* expr) {
- std::string temp = GenerateNewTempVariable(TypeOracle::GetConstStringType());
- source_out() << "\"" << expr->literal.substr(1, expr->literal.size() - 2)
- << "\";\n";
- return VisitResult{TypeOracle::GetConstStringType(), temp};
+ return VisitResult{
+ TypeOracle::GetConstStringType(),
+ "\"" + expr->literal.substr(1, expr->literal.size() - 2) + "\""};
}
VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
@@ -546,13 +694,12 @@ VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
const Type* type = TypeOracle::GetFunctionPointerType(
builtin->signature().parameter_types.types,
builtin->signature().return_type);
- std::string code =
- "HeapConstant(Builtins::CallableFor(isolate(), Builtins::k" +
- builtin->name() + ").code())";
- return VisitResult(type, code);
+ assembler().Emit(PushCodePointerInstruction{builtin->name(), type});
+ return VisitResult(type, assembler().TopRange(1));
}
VisitResult ImplementationVisitor::Visit(IdentifierExpression* expr) {
+ StackScope scope(this);
std::string name = expr->name;
if (expr->generic_arguments.size() != 0) {
GenericList* generic_list = declarations()->LookupGeneric(expr->name);
@@ -567,10 +714,10 @@ VisitResult ImplementationVisitor::Visit(IdentifierExpression* expr) {
}
if (Builtin* builtin = Builtin::DynamicCast(declarations()->Lookup(name))) {
- return GetBuiltinCode(builtin);
+ return scope.Yield(GetBuiltinCode(builtin));
}
- return GenerateFetchFromLocation(expr, GetLocationReference(expr));
+ return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
}
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
@@ -585,20 +732,21 @@ const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
}
size_t i = 0;
+ StackRange arguments = assembler().TopRange(0);
for (Expression* e : stmt->arguments) {
+ StackScope scope(this);
VisitResult result = Visit(e);
Variable* var = label->GetParameter(i++);
- GenerateAssignToVariable(var, result);
+ result = GenerateImplicitConvert(var->type(), result);
+ arguments.Extend(scope.Yield(result).stack_range());
}
- GenerateLabelGoto(label);
+ GenerateLabelGoto(label, arguments);
label->MarkUsed();
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
- ScopedIndent indent(this);
-
bool has_else = stmt->if_false.has_value();
if (stmt->is_constexpr) {
@@ -611,23 +759,33 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
ReportError(stream.str());
}
+ Block* true_block = assembler().NewBlock();
+ Block* false_block = assembler().NewBlock();
+ Block* done_block = assembler().NewBlock();
+
+ assembler().Emit(ConstexprBranchInstruction{
+ expression_result.constexpr_value(), true_block, false_block});
+
+ assembler().Bind(true_block);
const Type* left_result;
- const Type* right_result = TypeOracle::GetVoidType();
{
- GenerateIndent();
- source_out() << "if ((" << RValueFlattenStructs(expression_result)
- << ")) ";
- ScopedIndent indent(this, false);
- source_out() << "\n";
+ StackScope stack_scope(this);
left_result = Visit(stmt->if_true);
}
+ if (left_result == TypeOracle::GetVoidType()) {
+ assembler().Goto(done_block);
+ }
+ assembler().Bind(false_block);
+ const Type* right_result = TypeOracle::GetVoidType();
if (has_else) {
- source_out() << " else ";
- ScopedIndent indent(this, false);
- source_out() << "\n";
+ StackScope stack_scope(this);
right_result = Visit(*stmt->if_false);
}
+ if (right_result == TypeOracle::GetVoidType()) {
+ assembler().Goto(done_block);
+ }
+
if (left_result->IsNever() != right_result->IsNever()) {
std::stringstream stream;
stream << "either both or neither branches in a constexpr if statement "
@@ -636,8 +794,9 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
ReportError(stream.str());
}
- source_out() << "\n";
-
+ if (left_result != TypeOracle::GetNeverType()) {
+ assembler().Bind(done_block);
+ }
return left_result;
} else {
Label* true_label = nullptr;
@@ -645,56 +804,54 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
{
Declarations::NodeScopeActivator scope(declarations(), &*stmt->condition);
true_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(true_label);
+ CreateBlockForLabel(true_label, assembler().CurrentStack());
false_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(false_label, !has_else ? stmt : nullptr);
+ CreateBlockForLabel(false_label, assembler().CurrentStack());
}
- Label* done_label = nullptr;
+ Block* done_block;
bool live = false;
if (has_else) {
- done_label = declarations()->DeclarePrivateLabel("if_done_label");
- GenerateLabelDefinition(done_label, stmt);
+ done_block = assembler().NewBlock();
} else {
- done_label = false_label;
+ done_block = false_label->block();
live = true;
}
std::vector<Statement*> blocks = {stmt->if_true};
std::vector<Label*> labels = {true_label, false_label};
if (has_else) blocks.push_back(*stmt->if_false);
- if (GenerateExpressionBranch(stmt->condition, labels, blocks, done_label)) {
+ if (GenerateExpressionBranch(stmt->condition, labels, blocks, done_block)) {
live = true;
}
if (live) {
- GenerateLabelBind(done_label);
+ assembler().Bind(done_block);
}
return live ? TypeOracle::GetVoidType() : TypeOracle::GetNeverType();
}
}
const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
- ScopedIndent indent(this);
-
Label* body_label = nullptr;
Label* exit_label = nullptr;
{
Declarations::NodeScopeActivator scope(declarations(), stmt->condition);
body_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(body_label);
+ CreateBlockForLabel(body_label, assembler().CurrentStack());
exit_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(exit_label);
+ CreateBlockForLabel(exit_label, assembler().CurrentStack());
}
- Label* header_label = declarations()->DeclarePrivateLabel("header");
- GenerateLabelDefinition(header_label, stmt);
- GenerateLabelGoto(header_label);
- GenerateLabelBind(header_label);
+ Block* header_block = assembler().NewBlock();
+ assembler().Goto(header_block);
+
+ assembler().Bind(header_block);
Declarations::NodeScopeActivator scope(declarations(), stmt->body);
- BreakContinueActivator activator(global_context_, exit_label, header_label);
+ BreakContinueActivator activator(global_context_, exit_label->block(),
+ header_block);
GenerateExpressionBranch(stmt->condition, {body_label, exit_label},
- {stmt->body}, header_label);
+ {stmt->body}, header_block);
GenerateLabelBind(exit_label);
return TypeOracle::GetVoidType();
@@ -702,7 +859,6 @@ const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
const Type* ImplementationVisitor::Visit(BlockStatement* block) {
Declarations::NodeScopeActivator scope(declarations(), block);
- ScopedIndent indent(this);
const Type* type = TypeOracle::GetVoidType();
for (Statement* s : block->statements) {
if (type->IsNever()) {
@@ -717,17 +873,14 @@ const Type* ImplementationVisitor::Visit(BlockStatement* block) {
const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
#if defined(DEBUG)
- GenerateIndent();
- source_out() << "Print(\""
- << "halting because of '" << stmt->reason << "' at "
- << PositionAsString(stmt->pos) << "\");\n";
+ assembler().Emit(PrintConstantStringInstruction{"halting because of '" +
+ stmt->reason + "' at " +
+ PositionAsString(stmt->pos)});
#endif
- GenerateIndent();
+ assembler().Emit(DebugBreakInstruction{stmt->never_continues});
if (stmt->never_continues) {
- source_out() << "Unreachable();\n";
return TypeOracle::GetNeverType();
} else {
- source_out() << "DebugBreak();\n";
return TypeOracle::GetVoidType();
}
}
@@ -769,9 +922,9 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
Label* false_label = nullptr;
Declarations::NodeScopeActivator scope(declarations(), stmt->expression);
true_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(true_label);
+ CreateBlockForLabel(true_label, assembler().CurrentStack());
false_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(false_label);
+ CreateBlockForLabel(false_label, assembler().CurrentStack());
VisitResult expression_result = Visit(stmt->expression);
if (expression_result.type() == TypeOracle::GetBoolType()) {
@@ -786,12 +939,10 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
}
GenerateLabelBind(false_label);
- GenerateIndent();
- source_out() << "Print(\""
- << "assert '" << FormatAssertSource(stmt->source)
- << "' failed at " << PositionAsString(stmt->pos) << "\");\n";
- GenerateIndent();
- source_out() << "Unreachable();\n";
+ assembler().Emit(PrintConstantStringInstruction{
+ "assert '" + FormatAssertSource(stmt->source) + "' failed at " +
+ PositionAsString(stmt->pos)});
+ assembler().Emit(DebugBreakInstruction{true});
GenerateLabelBind(true_label);
}
@@ -824,20 +975,16 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
VisitResult return_result = GenerateImplicitConvert(
current_callable->signature().return_type, expression_result);
if (current_callable->IsMacro()) {
- Variable* var =
- Variable::cast(declarations()->LookupValue(kReturnValueVariable));
- GenerateAssignToVariable(var, return_result);
- GenerateLabelGoto(end);
- } else if (current_callable->IsBuiltin()) {
- if (Builtin::cast(current_callable)->IsVarArgsJavaScript()) {
- GenerateIndent();
- source_out() << "arguments->PopAndReturn("
- << RValueFlattenStructs(return_result) << ");\n";
+ if (return_result.IsOnStack()) {
+ StackRange return_value_range =
+ GenerateLabelGoto(end, return_result.stack_range());
+ SetReturnValue(VisitResult(return_result.type(), return_value_range));
} else {
- GenerateIndent();
- source_out() << "Return(" << RValueFlattenStructs(return_result)
- << ");\n";
+ GenerateLabelGoto(end);
+ SetReturnValue(return_result);
}
+ } else if (current_callable->IsBuiltin()) {
+ assembler().Emit(ReturnInstruction{});
} else {
UNREACHABLE();
}
@@ -856,6 +1003,7 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
Declarations::NodeScopeActivator scope(declarations(), stmt);
+ StackScope stack_scope(this);
VisitResult expression_result = Visit(stmt->iterable);
VisitResult begin = stmt->begin
@@ -866,194 +1014,207 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
? Visit(*stmt->end)
: GenerateCall(".length", {{expression_result}, {}});
- Label* body_label = declarations()->DeclarePrivateLabel("body");
- GenerateLabelDefinition(body_label);
- Label* increment_label = declarations()->DeclarePrivateLabel("increment");
- GenerateLabelDefinition(increment_label);
- Label* exit_label = declarations()->DeclarePrivateLabel("exit");
- GenerateLabelDefinition(exit_label);
-
const Type* common_type = GetCommonType(begin.type(), end.type());
- Variable* index_var = GenerateVariableDeclaration(
- stmt, std::string(kForIndexValueVariable) + "_" + NewTempVariable(),
- false, common_type, begin);
+ VisitResult index = GenerateImplicitConvert(common_type, begin);
- VisitResult index_for_read = {index_var->type(), index_var};
+ Block* body_block = assembler().NewBlock();
+ Block* increment_block = assembler().NewBlock(assembler().CurrentStack());
+ Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
- Label* header_label = declarations()->DeclarePrivateLabel("header");
- GenerateLabelDefinition(header_label, stmt);
+ Block* header_block = assembler().NewBlock();
- GenerateLabelGoto(header_label);
+ assembler().Goto(header_block);
- GenerateLabelBind(header_label);
+ assembler().Bind(header_block);
- BreakContinueActivator activator(global_context_, exit_label,
- increment_label);
+ BreakContinueActivator activator(global_context_, exit_block,
+ increment_block);
- VisitResult result = GenerateCall("<", {{index_for_read, end}, {}});
- GenerateBranch(result, body_label, exit_label);
+ {
+ StackScope comparison_scope(this);
+ VisitResult result = GenerateCall("<", {{index, end}, {}});
+ if (result.type() != TypeOracle::GetBoolType()) {
+ ReportError("operator < with arguments(", *index.type(), ", ",
+ *end.type(),
+ ") used in for-of loop has to return type bool, but "
+ "returned type ",
+ *result.type());
+ }
+ comparison_scope.Yield(result);
+ }
+ assembler().Branch(body_block, exit_block);
- GenerateLabelBind(body_label);
- VisitResult element_result =
- GenerateCall("[]", {{expression_result, index_for_read}, {}});
- base::Optional<const Type*> declared_type;
- if (stmt->var_declaration->type)
- declared_type = declarations()->GetType(*stmt->var_declaration->type);
- GenerateVariableDeclaration(
- stmt->var_declaration, stmt->var_declaration->name,
- stmt->var_declaration->const_qualified, declared_type, element_result);
- Visit(stmt->body);
- GenerateLabelGoto(increment_label);
+ assembler().Bind(body_block);
+ {
+ StackScope body_scope(this);
- GenerateLabelBind(increment_label);
- Arguments increment_args;
- increment_args.parameters = {index_for_read,
- {TypeOracle::GetConstInt31Type(), "1"}};
- VisitResult increment_result = GenerateCall("+", increment_args);
+ VisitResult element_result;
+ {
+ StackScope element_scope(this);
+ VisitResult result = GenerateCall("[]", {{expression_result, index}, {}});
+ if (stmt->var_declaration->type) {
+ const Type* declared_type =
+ declarations()->GetType(*stmt->var_declaration->type);
+ result = GenerateImplicitConvert(declared_type, result);
+ }
+ element_result = element_scope.Yield(result);
+ }
+ Variable* element_var = Variable::cast(
+ declarations()->LookupValue(stmt->var_declaration->name));
+ element_var->set_value(element_result);
+ Visit(stmt->body);
+ }
+ assembler().Goto(increment_block);
+
+ assembler().Bind(increment_block);
+ {
+ Arguments increment_args;
+ increment_args.parameters = {index, {TypeOracle::GetConstInt31Type(), "1"}};
+ VisitResult increment_result = GenerateCall("+", increment_args);
- GenerateAssignToVariable(index_var, increment_result);
+ GenerateAssignToLocation(LocationReference::VariableAccess(index),
+ increment_result);
+ }
- GenerateLabelGoto(header_label);
+ assembler().Goto(header_block);
- GenerateLabelBind(exit_label);
+ assembler().Bind(exit_block);
return TypeOracle::GetVoidType();
}
-const Type* ImplementationVisitor::Visit(TryLabelStatement* stmt) {
- ScopedIndent indent(this);
- Label* try_done = declarations()->DeclarePrivateLabel("try_done");
- GenerateLabelDefinition(try_done);
- const Type* try_result = TypeOracle::GetNeverType();
- std::vector<Label*> labels;
+VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
+ Block* done_block = assembler().NewBlock();
+ VisitResult try_result;
+ Label* label = nullptr;
// Output labels for the goto handlers and for the merge after the try.
{
// Activate a new scope to see handler labels
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- for (LabelBlock* block : stmt->label_blocks) {
- CurrentSourcePosition::Scope scope(block->pos);
- Label* label = declarations()->LookupLabel(block->label);
- labels.push_back(label);
- GenerateLabelDefinition(label);
- }
-
- size_t i = 0;
- for (auto label : labels) {
- Declarations::NodeScopeActivator scope(declarations(),
- stmt->label_blocks[i]->body);
- for (auto& v : label->GetParameters()) {
- GenerateVariableDeclaration(stmt, v->name(), false, v->type());
+ Declarations::NodeScopeActivator scope(declarations(), expr);
+ {
+ LabelBlock* block = expr->label_block;
+ CurrentSourcePosition::Scope source_position(block->pos);
+ label = declarations()->LookupLabel(block->label);
+
+ Declarations::NodeScopeActivator scope(declarations(), block->body);
+ Stack<const Type*> label_input_stack = assembler().CurrentStack();
+ for (Variable* v : label->GetParameters()) {
+ StackRange range = label_input_stack.PushMany(LowerType(v->type()));
+ v->set_value(VisitResult(v->type(), range));
v->Define();
}
- ++i;
+ CreateBlockForLabel(label, label_input_stack);
}
- Label* try_begin_label = declarations()->DeclarePrivateLabel("try_begin");
- GenerateLabelDefinition(try_begin_label);
- GenerateLabelGoto(try_begin_label);
-
// Visit try
- if (GenerateLabeledStatementBlocks({stmt->try_block},
- std::vector<Label*>({try_begin_label}),
- try_done)) {
- try_result = TypeOracle::GetVoidType();
+ {
+ StackScope stack_scope(this);
+ try_result = Visit(expr->try_expression);
+ if (try_result.type() != TypeOracle::GetNeverType()) {
+ try_result = stack_scope.Yield(try_result);
+ assembler().Goto(done_block);
+ }
}
}
- // Make sure that each label clause is actually used. It's not just a friendly
- // thing to do, it will cause problems downstream in the compiler if there are
- // bound labels that are never jumped to.
- auto label_iterator = stmt->label_blocks.begin();
- for (auto label : labels) {
- CurrentSourcePosition::Scope scope((*label_iterator)->pos);
- if (!label->IsUsed()) {
- std::stringstream s;
- s << "label ";
- s << (*label_iterator)->label;
- s << " has a handler block but is never referred to in try block";
- ReportError(s.str());
+ if (label->IsUsed()) {
+ // Visit and output the code for the label block. If the label block falls
+ // through, then the try must not return a value. Also, if the try doesn't
+ // fall through, but the label does, then overall the try-label block
+ // returns type void.
+ GenerateLabelBind(label);
+ const Type* label_result;
+ {
+ StackScope stack_scope(this);
+ label_result = Visit(expr->label_block->body);
+ }
+ if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
+ ReportError(
+ "otherwise clauses cannot fall through in a non-void expression");
+ }
+ if (label_result != TypeOracle::GetNeverType()) {
+ assembler().Goto(done_block);
+ }
+ if (label_result->IsVoid() && try_result.type()->IsNever()) {
+ try_result =
+ VisitResult(TypeOracle::GetVoidType(), try_result.stack_range());
}
- label_iterator++;
- }
-
- // Visit and output the code for each catch block, one-by-one.
- std::vector<Statement*> bodies;
- for (LabelBlock* block : stmt->label_blocks) bodies.push_back(block->body);
- if (GenerateLabeledStatementBlocks(bodies, labels, try_done)) {
- try_result = TypeOracle::GetVoidType();
}
- if (!try_result->IsNever()) {
- GenerateLabelBind(try_done);
+ if (!try_result.type()->IsNever()) {
+ assembler().Bind(done_block);
}
return try_result;
}
+VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
+ return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
+}
+
const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
- Label* break_label = global_context_.GetCurrentBreak();
- if (break_label == nullptr) {
+ Block* break_block = global_context_.GetCurrentBreak();
+ if (break_block == nullptr) {
ReportError("break used outside of loop");
}
- GenerateLabelGoto(break_label);
+ assembler().Goto(break_block);
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
- Label* continue_label = global_context_.GetCurrentContinue();
- if (continue_label == nullptr) {
+ Block* continue_block = global_context_.GetCurrentContinue();
+ if (continue_block == nullptr) {
ReportError("continue used outside of loop");
}
- GenerateLabelGoto(continue_label);
+ assembler().Goto(continue_block);
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
Declarations::NodeScopeActivator scope(declarations(), stmt);
+ StackScope stack_scope(this);
if (stmt->var_declaration) Visit(*stmt->var_declaration);
Label* body_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(body_label);
+ CreateBlockForLabel(body_label, assembler().CurrentStack());
Label* exit_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(exit_label);
+ CreateBlockForLabel(exit_label, assembler().CurrentStack());
- Label* header_label = declarations()->DeclarePrivateLabel("header");
- GenerateLabelDefinition(header_label, stmt);
- GenerateLabelGoto(header_label);
- GenerateLabelBind(header_label);
+ Block* header_block = assembler().NewBlock();
+ assembler().Goto(header_block);
+ assembler().Bind(header_block);
// The continue label is where "continue" statements jump to. If no action
// expression is provided, we jump directly to the header.
- Label* continue_label = header_label;
+ Block* continue_block = header_block;
// The action label is only needed when an action expression was provided.
- Label* action_label = nullptr;
+ Block* action_block = nullptr;
if (stmt->action) {
- action_label = declarations()->DeclarePrivateLabel("action");
- GenerateLabelDefinition(action_label);
+ action_block = assembler().NewBlock();
// The action expression needs to be executed on a continue.
- continue_label = action_label;
+ continue_block = action_block;
}
- BreakContinueActivator activator(global_context_, exit_label, continue_label);
+ BreakContinueActivator activator(global_context_, exit_label->block(),
+ continue_block);
std::vector<Label*> labels = {body_label, exit_label};
bool generate_action = true;
if (stmt->test) {
generate_action = GenerateExpressionBranch(*stmt->test, labels,
- {stmt->body}, continue_label);
+ {stmt->body}, continue_block);
} else {
GenerateLabelGoto(body_label);
generate_action =
- GenerateLabeledStatementBlocks({stmt->body}, labels, continue_label);
+ GenerateLabeledStatementBlocks({stmt->body}, labels, continue_block);
}
if (generate_action && stmt->action) {
- ScopedIndent indent(this);
- GenerateLabelBind(action_label);
+ assembler().Bind(action_block);
Visit(*stmt->action);
- GenerateLabelGoto(header_label);
+ assembler().Goto(header_block);
}
GenerateLabelBind(exit_label);
@@ -1075,7 +1236,7 @@ void ImplementationVisitor::GenerateImplementation(const std::string& dir,
std::string ImplementationVisitor::GetBaseAssemblerName(Module* module) {
if (module == global_context_.GetDefaultModule()) {
- return "CodeStubAssembler";
+ return "TorqueAssembler";
} else {
std::string assembler_name(CamelifyString(module->name()) +
"BuiltinsAssembler");
@@ -1089,12 +1250,6 @@ std::string ImplementationVisitor::GetDSLAssemblerName(Module* module) {
return assembler_name;
}
-void ImplementationVisitor::GenerateIndent() {
- for (size_t i = 0; i <= indent_; ++i) {
- source_out() << " ";
- }
-}
-
void ImplementationVisitor::GenerateMacroFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, Macro* macro) {
GenerateFunctionDeclaration(o, macro_prefix, macro->name(),
@@ -1128,11 +1283,12 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
if (!first) {
o << ", ";
}
- const Value* parameter = declarations()->LookupValue(name);
+ const Parameter* parameter =
+ Parameter::cast(declarations()->LookupValue(name));
const Type* parameter_type = *type_iterator;
const std::string& generated_type_name =
parameter_type->GetGeneratedTypeName();
- o << generated_type_name << " " << parameter->value();
+ o << generated_type_name << " " << parameter->external_name();
type_iterator++;
first = false;
}
@@ -1142,13 +1298,15 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
if (!first) {
o << ", ";
}
- o << "Label* " << label->generated();
+ o << "Label* " << label->external_label_name();
+ size_t i = 0;
for (Variable* var : label->GetParameters()) {
std::string generated_type_name("TVariable<");
generated_type_name += var->type()->GetGeneratedTNodeTypeName();
generated_type_name += ">*";
o << ", ";
- o << generated_type_name << " " << var->value();
+ o << generated_type_name << " " << ExternalLabelParameterName(label, i);
+ ++i;
}
}
@@ -1280,43 +1438,6 @@ Callable* ImplementationVisitor::LookupCall(
return result;
}
-void ImplementationVisitor::GetFlattenedStructsVars(
- const Variable* base, std::set<const Variable*>* vars) {
- const Type* type = base->type();
- if (base->IsConst()) return;
- if (type->IsStructType()) {
- const StructType* struct_type = StructType::cast(type);
- for (auto& field : struct_type->fields()) {
- std::string field_var_name = base->name() + "." + field.name;
- GetFlattenedStructsVars(
- Variable::cast(declarations()->LookupValue(field_var_name)), vars);
- }
- } else {
- vars->insert(base);
- }
-}
-
-void ImplementationVisitor::GenerateChangedVarsFromControlSplit(AstNode* node) {
- const std::set<const Variable*>& changed_vars =
- global_context_.GetControlSplitChangedVariables(
- node, declarations()->GetCurrentSpecializationTypeNamesVector());
- std::set<const Variable*> flattened_vars;
- for (auto v : changed_vars) {
- GetFlattenedStructsVars(v, &flattened_vars);
- }
- std::vector<const Variable*> flattened_vars_sorted(flattened_vars.begin(),
- flattened_vars.end());
- auto compare_variables = [](const Variable* a, const Variable* b) {
- return a->value() < b->value();
- };
- std::sort(flattened_vars_sorted.begin(), flattened_vars_sorted.end(),
- compare_variables);
- source_out() << "{";
- PrintCommaSeparatedList(source_out(), flattened_vars_sorted,
- [](const Variable* v) { return v->value(); });
- source_out() << "}";
-}
-
const Type* ImplementationVisitor::GetCommonType(const Type* left,
const Type* right) {
const Type* common_type;
@@ -1332,11 +1453,11 @@ const Type* ImplementationVisitor::GetCommonType(const Type* left,
}
VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
- std::string temp = GenerateNewTempVariable(to_copy.type());
- source_out() << RValueFlattenStructs(to_copy) << ";\n";
- GenerateIndent();
- source_out() << "USE(" << temp << ");\n";
- return VisitResult(to_copy.type(), temp);
+ if (to_copy.IsOnStack()) {
+ return VisitResult(to_copy.type(),
+ assembler().Peek(to_copy.stack_range(), to_copy.type()));
+ }
+ return to_copy;
}
VisitResult ImplementationVisitor::Visit(StructExpression* decl) {
@@ -1354,23 +1475,19 @@ VisitResult ImplementationVisitor::Visit(StructExpression* decl) {
<< ")";
ReportError(s.str());
}
- std::vector<VisitResult> expression_results;
- for (auto& field : struct_type->fields()) {
- VisitResult value = Visit(decl->expressions[expression_results.size()]);
+ StackRange stack_range = assembler().TopRange(0);
+ for (size_t i = 0; i < struct_type->fields().size(); ++i) {
+ const NameAndType& field = struct_type->fields()[i];
+ StackScope scope(this);
+ VisitResult value = Visit(decl->expressions[i]);
value = GenerateImplicitConvert(field.type, value);
- expression_results.push_back(value);
+ stack_range.Extend(scope.Yield(value).stack_range());
}
- std::string result_var_name = GenerateNewTempVariable(struct_type);
- source_out() << "{";
- PrintCommaSeparatedList(
- source_out(), expression_results,
- [&](const VisitResult& result) { return RValueFlattenStructs(result); });
- source_out() << "};\n";
- return VisitResult(struct_type, result_var_name);
+ return VisitResult(struct_type, stack_range);
}
LocationReference ImplementationVisitor::GetLocationReference(
- LocationExpression* location) {
+ Expression* location) {
switch (location->kind) {
case AstNode::Kind::kIdentifierExpression:
return GetLocationReference(static_cast<IdentifierExpression*>(location));
@@ -1381,231 +1498,92 @@ LocationReference ImplementationVisitor::GetLocationReference(
return GetLocationReference(
static_cast<ElementAccessExpression*>(location));
default:
- UNREACHABLE();
+ return LocationReference::Temporary(Visit(location), "expression");
}
}
LocationReference ImplementationVisitor::GetLocationReference(
FieldAccessExpression* expr) {
- VisitResult result = Visit(expr->object);
- if (result.type()->IsStructType()) {
- if (result.declarable()) {
- return LocationReference(
- declarations()->LookupValue((*result.declarable())->name() + "." +
- expr->field),
- {}, {});
- }
+ LocationReference reference = GetLocationReference(expr->object);
+ if (reference.IsVariableAccess() &&
+ reference.variable().type()->IsStructType()) {
+ return LocationReference::VariableAccess(
+ ProjectStructField(reference.variable(), expr->field));
}
- return LocationReference(nullptr, result, {});
-}
-
-std::string ImplementationVisitor::RValueFlattenStructs(VisitResult result) {
- if (result.declarable()) {
- const Value* value = *result.declarable();
- const Type* type = value->type();
- if (const StructType* struct_type = StructType::DynamicCast(type)) {
- std::stringstream s;
- s << struct_type->name() << "{";
- PrintCommaSeparatedList(
- s, struct_type->fields(), [&](const NameAndType& field) {
- std::string field_declaration = value->name() + "." + field.name;
- Variable* field_variable =
- Variable::cast(declarations()->LookupValue(field_declaration));
- return RValueFlattenStructs(
- VisitResult(field_variable->type(), field_variable));
- });
- s << "}";
- return s.str();
- }
+ if (reference.IsTemporary() && reference.temporary().type()->IsStructType()) {
+ return LocationReference::Temporary(
+ ProjectStructField(reference.temporary(), expr->field),
+ reference.temporary_description());
}
- return result.RValue();
+ return LocationReference::FieldAccess(GenerateFetchFromLocation(reference),
+ expr->field);
}
-VisitResult ImplementationVisitor::GenerateFetchFromLocation(
- LocationExpression* location, LocationReference reference) {
- switch (location->kind) {
- case AstNode::Kind::kIdentifierExpression:
- return GenerateFetchFromLocation(
- static_cast<IdentifierExpression*>(location), reference);
- case AstNode::Kind::kFieldAccessExpression:
- return GenerateFetchFromLocation(
- static_cast<FieldAccessExpression*>(location), reference);
- case AstNode::Kind::kElementAccessExpression:
- return GenerateFetchFromLocation(
- static_cast<ElementAccessExpression*>(location), reference);
- default:
- UNREACHABLE();
- }
-}
-
-VisitResult ImplementationVisitor::GenerateFetchFromLocation(
- FieldAccessExpression* expr, LocationReference reference) {
- if (reference.value != nullptr) {
- return GenerateFetchFromLocation(reference);
- }
- const Type* type = reference.base.type();
- if (const StructType* struct_type = StructType::DynamicCast(type)) {
- return VisitResult(struct_type->GetFieldType(expr->field),
- reference.base.RValue() + "." + expr->field);
- } else {
- Arguments arguments;
- arguments.parameters = {reference.base};
- return GenerateCall(std::string(".") + expr->field, arguments);
- }
+LocationReference ImplementationVisitor::GetLocationReference(
+ ElementAccessExpression* expr) {
+ VisitResult array = Visit(expr->array);
+ VisitResult index = Visit(expr->index);
+ return LocationReference::ArrayAccess(array, index);
}
-void ImplementationVisitor::GenerateAssignToVariable(Variable* var,
- VisitResult value) {
- if (var->type()->IsStructType()) {
- if (value.type() != var->type()) {
- std::stringstream s;
- s << "incompatable assignment from type " << *value.type() << " to "
- << *var->type();
- ReportError(s.str());
- }
- const StructType* struct_type = StructType::cast(var->type());
- for (auto& field : struct_type->fields()) {
- std::string field_declaration = var->name() + "." + field.name;
- Variable* field_variable =
- Variable::cast(declarations()->LookupValue(field_declaration));
- if (value.declarable() && (*value.declarable())->IsVariable()) {
- Variable* source_field = Variable::cast(declarations()->LookupValue(
- Variable::cast((*value.declarable()))->name() + "." + field.name));
- GenerateAssignToVariable(
- field_variable, VisitResult{source_field->type(), source_field});
- } else {
- GenerateAssignToVariable(
- field_variable, VisitResult{field_variable->type(),
- value.RValue() + "." + field.name});
- }
+LocationReference ImplementationVisitor::GetLocationReference(
+ IdentifierExpression* expr) {
+ Value* value = declarations()->LookupValue(expr->name);
+ if (auto* constant = ModuleConstant::DynamicCast(value)) {
+ if (constant->type()->IsConstexpr()) {
+ return LocationReference::Temporary(
+ VisitResult(constant->type(), constant->constant_name() + "()"),
+ "module constant " + expr->name);
}
- } else {
- VisitResult casted_value = GenerateImplicitConvert(var->type(), value);
- GenerateIndent();
- VisitResult var_value = {var->type(), var};
- source_out() << var_value.LValue() << " = "
- << RValueFlattenStructs(casted_value) << ";\n";
+ assembler().Emit(ModuleConstantInstruction{constant});
+ StackRange stack_range =
+ assembler().TopRange(LoweredSlotCount(constant->type()));
+ return LocationReference::Temporary(
+ VisitResult(constant->type(), stack_range),
+ "module constant " + expr->name);
}
- var->Define();
-}
-
-void ImplementationVisitor::GenerateAssignToLocation(
- LocationExpression* location, const LocationReference& reference,
- VisitResult assignment_value) {
- if (reference.value != nullptr) {
- Value* value = reference.value;
- Variable* var = Variable::cast(value);
- if (var->IsConst()) {
- std::stringstream s;
- s << "\"" << var->name()
- << "\" is declared const (maybe implicitly) and cannot be assigned to";
- ReportError(s.str());
- }
- GenerateAssignToVariable(var, assignment_value);
- } else if (auto access = FieldAccessExpression::DynamicCast(location)) {
- GenerateCall(std::string(".") + access->field + "=",
- {{reference.base, assignment_value}, {}});
- } else {
- DCHECK_NOT_NULL(ElementAccessExpression::cast(location));
- GenerateCall("[]=",
- {{reference.base, reference.index, assignment_value}, {}});
+ if (value->IsConst()) {
+ return LocationReference::Temporary(value->value(),
+ "constant value " + expr->name);
}
+ DCHECK(value->IsVariable());
+ return LocationReference::VariableAccess(value->value());
}
-void ImplementationVisitor::GenerateVariableDeclaration(const Variable* var) {
- const Type* var_type = var->type();
- if (var_type->IsStructType()) {
- const StructType* struct_type = StructType::cast(var_type);
- for (auto& field : struct_type->fields()) {
- GenerateVariableDeclaration(Variable::cast(
- declarations()->LookupValue(var->name() + "." + field.name)));
- }
+VisitResult ImplementationVisitor::GenerateFetchFromLocation(
+ const LocationReference& reference) {
+ if (reference.IsTemporary()) {
+ return GenerateCopy(reference.temporary());
+ } else if (reference.IsVariableAccess()) {
+ return GenerateCopy(reference.variable());
} else {
- std::string value = var->value();
- GenerateIndent();
- if (var_type->IsConstexpr()) {
- source_out() << var_type->GetGeneratedTypeName();
- source_out() << " " << value << "_impl;\n";
- } else if (var->IsConst()) {
- source_out() << "TNode<" << var->type()->GetGeneratedTNodeTypeName();
- source_out() << "> " << var->value() << "_impl;\n";
- } else {
- source_out() << "TVARIABLE(";
- source_out() << var_type->GetGeneratedTNodeTypeName();
- source_out() << ", " << value << "_impl);\n";
- }
- GenerateIndent();
- source_out() << "auto " << value << " = &" << value << "_impl;\n";
- GenerateIndent();
- source_out() << "USE(" << value << ");\n";
- }
-}
-
-Variable* ImplementationVisitor::GeneratePredeclaredVariableDeclaration(
- const std::string& name,
- const base::Optional<VisitResult>& initialization) {
- Variable* variable = Variable::cast(declarations()->LookupValue(name));
- GenerateVariableDeclaration(variable);
- if (initialization) {
- GenerateAssignToVariable(variable, *initialization);
+ DCHECK(reference.IsCallAccess());
+ return GenerateCall(reference.eval_function(),
+ Arguments{reference.call_arguments(), {}});
}
- return variable;
}
-Variable* ImplementationVisitor::GenerateVariableDeclaration(
- AstNode* node, const std::string& name, bool is_const,
- const base::Optional<const Type*>& type,
- const base::Optional<VisitResult>& initialization) {
- Variable* variable = nullptr;
- if (declarations()->IsDeclaredInCurrentScope(name)) {
- variable = Variable::cast(declarations()->LookupValue(name));
+void ImplementationVisitor::GenerateAssignToLocation(
+ const LocationReference& reference, const VisitResult& assignment_value) {
+ if (reference.IsCallAccess()) {
+ Arguments arguments{reference.call_arguments(), {}};
+ arguments.parameters.push_back(assignment_value);
+ GenerateCall(reference.assign_function(), arguments);
+ } else if (reference.IsVariableAccess()) {
+ VisitResult variable = reference.variable();
+ VisitResult converted_value =
+ GenerateImplicitConvert(variable.type(), assignment_value);
+ assembler().Poke(variable.stack_range(), converted_value.stack_range(),
+ variable.type());
} else {
- variable = declarations()->DeclareVariable(
- name, type ? *type : initialization->type(), is_const);
- if (!is_const) {
- // Because the variable is being defined during code generation, it must
- // be assumed that it changes along all control split paths because it's
- // no longer possible to run the control-flow anlaysis in the declaration
- // pass over the variable.
- global_context_.MarkVariableChanged(
- node, declarations()->GetCurrentSpecializationTypeNamesVector(),
- variable);
- }
- }
- GenerateVariableDeclaration(variable);
- if (initialization) {
- GenerateAssignToVariable(variable, *initialization);
- }
- return variable;
-}
-
-void ImplementationVisitor::GenerateParameter(
- const std::string& parameter_name) {
- const Value* val = declarations()->LookupValue(parameter_name);
- std::string var = val->value();
- GenerateIndent();
- source_out() << val->type()->GetGeneratedTypeName() << " " << var << " = ";
-
- source_out() << "UncheckedCast<" << val->type()->GetGeneratedTNodeTypeName()
- << ">(Parameter(Descriptor::k" << CamelifyString(parameter_name)
- << "));\n";
- GenerateIndent();
- source_out() << "USE(" << var << ");\n";
-}
-
-void ImplementationVisitor::GenerateParameterList(const NameVector& list,
- size_t first) {
- for (auto p : list) {
- if (first == 0) {
- GenerateParameter(p);
- } else {
- first--;
- }
+ DCHECK(reference.IsTemporary());
+ ReportError("cannot assign to ", reference.temporary_description());
}
}
VisitResult ImplementationVisitor::GeneratePointerCall(
Expression* callee, const Arguments& arguments, bool is_tailcall) {
+ StackScope scope(this);
TypeVector parameter_types(arguments.parameters.GetTypeVector());
VisitResult callee_result = Visit(callee);
if (!callee_result.type()->IsFunctionPointerType()) {
@@ -1637,28 +1615,13 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
ReportError(stream.str());
}
- std::vector<std::string> variables;
+ callee_result = GenerateCopy(callee_result);
+ StackRange arg_range = assembler().TopRange(0);
for (size_t current = 0; current < arguments.parameters.size(); ++current) {
const Type* to_type = type->parameter_types()[current];
- VisitResult result =
- GenerateImplicitConvert(to_type, arguments.parameters[current]);
- variables.push_back(RValueFlattenStructs(result));
- }
-
- std::string result_variable_name;
- bool no_result = type->return_type()->IsVoidOrNever() || is_tailcall;
- if (no_result) {
- GenerateIndent();
- } else {
- const Type* return_type = type->return_type();
- result_variable_name = GenerateNewTempVariable(return_type);
- if (return_type->IsStructType()) {
- source_out() << "(";
- } else {
- source_out() << "UncheckedCast<";
- source_out() << type->return_type()->GetGeneratedTNodeTypeName();
- source_out() << ">(";
- }
+ arg_range.Extend(
+ GenerateImplicitConvert(to_type, arguments.parameters[current])
+ .stack_range());
}
Builtin* example_builtin =
@@ -1669,27 +1632,14 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
ReportError(stream.str());
}
- if (is_tailcall) {
- source_out() << "TailCallStub(";
- } else {
- source_out() << "CallStub(";
- }
- source_out() << "Builtins::CallableFor(isolate(), Builtins::k"
- << example_builtin->name() << ").descriptor(), "
- << RValueFlattenStructs(callee_result) << ", ";
+ assembler().Emit(CallBuiltinPointerInstruction{is_tailcall, example_builtin,
+ arg_range.Size()});
- size_t total_parameters = 0;
- for (size_t i = 0; i < arguments.parameters.size(); ++i) {
- if (total_parameters++ != 0) {
- source_out() << ", ";
- }
- source_out() << variables[i];
- }
- if (!no_result) {
- source_out() << ")";
+ if (is_tailcall) {
+ return VisitResult::NeverResult();
}
- source_out() << ");\n";
- return VisitResult(type->return_type(), result_variable_name);
+ DCHECK_EQ(1, LoweredSlotCount(type->return_type()));
+ return scope.Yield(VisitResult(type->return_type(), assembler().TopRange(1)));
}
VisitResult ImplementationVisitor::GenerateCall(
@@ -1708,67 +1658,29 @@ VisitResult ImplementationVisitor::GenerateCall(
arguments.labels.push_back(false_label);
}
- const Type* result_type = callable->signature().return_type;
+ const Type* return_type = callable->signature().return_type;
- std::vector<std::string> variables;
+ std::vector<VisitResult> converted_arguments;
+ StackRange argument_range = assembler().TopRange(0);
+ std::vector<std::string> constexpr_arguments;
for (size_t current = 0; current < arguments.parameters.size(); ++current) {
const Type* to_type = (current >= callable->signature().types().size())
? TypeOracle::GetObjectType()
: callable->signature().types()[current];
- VisitResult result =
+ VisitResult converted =
GenerateImplicitConvert(to_type, arguments.parameters[current]);
- variables.push_back(RValueFlattenStructs(result));
- }
-
- std::string result_variable_name;
- if (result_type->IsVoidOrNever() || is_tailcall) {
- GenerateIndent();
- } else {
- result_variable_name = GenerateNewTempVariable(result_type);
- if (!result_type->IsConstexpr()) {
- if (result_type->IsStructType()) {
- source_out() << "(";
- } else {
- source_out() << "UncheckedCast<";
- source_out() << result_type->GetGeneratedTNodeTypeName();
- source_out() << ">(";
- }
- }
- }
- if (callable->IsBuiltin()) {
- if (is_tailcall) {
- source_out() << "TailCallBuiltin(Builtins::k" << callable->name() << ", ";
+ converted_arguments.push_back(converted);
+ if (converted.IsOnStack()) {
+ argument_range.Extend(converted.stack_range());
} else {
- source_out() << "CallBuiltin(Builtins::k" << callable->name() << ", ";
+ constexpr_arguments.push_back(converted.constexpr_value());
}
- } else if (callable->IsMacro()) {
- if (is_tailcall) {
- std::stringstream stream;
- stream << "can't tail call a macro";
- ReportError(stream.str());
- }
- source_out() << callable->name() << "(";
- } else if (callable->IsRuntimeFunction()) {
- if (is_tailcall) {
- source_out() << "TailCallRuntime(Runtime::k" << callable->name() << ", ";
- } else {
- source_out() << "CallRuntime(Runtime::k" << callable->name() << ", ";
- }
- } else {
- UNREACHABLE();
}
+
if (global_context_.verbose()) {
std::cout << "generating code for call to " << callable_name << "\n";
}
- size_t total_parameters = 0;
- for (size_t i = 0; i < arguments.parameters.size(); ++i) {
- if (total_parameters++ != 0) {
- source_out() << ", ";
- }
- source_out() << variables[i];
- }
-
size_t label_count = callable->signature().labels.size();
if (label_count != arguments.labels.size()) {
std::stringstream s;
@@ -1777,49 +1689,114 @@ VisitResult ImplementationVisitor::GenerateCall(
<< std::to_string(arguments.labels.size()) << ")";
ReportError(s.str());
}
- for (size_t i = 0; i < label_count; ++i) {
- if (total_parameters++ != 0) {
- source_out() << ", ";
+
+ if (auto* builtin = Builtin::DynamicCast(callable)) {
+ assembler().Emit(
+ CallBuiltinInstruction{is_tailcall, builtin, argument_range.Size()});
+ if (is_tailcall) {
+ return VisitResult::NeverResult();
+ } else {
+ size_t slot_count = LoweredSlotCount(return_type);
+ DCHECK_LE(slot_count, 1);
+ // TODO(tebbi): Actually, builtins have to return a value, so we should
+ // assert slot_count == 1 here.
+ return VisitResult(return_type, assembler().TopRange(slot_count));
}
- Label* label = arguments.labels[i];
- size_t callee_label_parameters =
- callable->signature().labels[i].types.size();
- if (label->GetParameterCount() != callee_label_parameters) {
- std::stringstream s;
- s << "label " << label->name()
- << " doesn't have the right number of parameters (found "
- << std::to_string(label->GetParameterCount()) << " expected "
- << std::to_string(callee_label_parameters) << ")";
- ReportError(s.str());
+ } else if (auto* macro = Macro::DynamicCast(callable)) {
+ if (is_tailcall) {
+ ReportError("can't tail call a macro");
}
- source_out() << label->generated();
- size_t j = 0;
- for (auto t : callable->signature().labels[i].types) {
- source_out() << ", ";
- Variable* variable = label->GetParameter(j);
- if (!(variable->type() == t)) {
- std::stringstream s;
- s << "mismatch of label parameters (expected " << *t << " got "
- << *label->GetParameter(j)->type() << " for parameter "
- << std::to_string(i + 1) << ")";
- ReportError(s.str());
+ if (return_type->IsConstexpr()) {
+ DCHECK_EQ(0, arguments.labels.size());
+ std::stringstream result;
+ result << "(" << macro->name() << "(";
+ bool first = true;
+ for (VisitResult arg : arguments.parameters) {
+ DCHECK(!arg.IsOnStack());
+ if (!first) {
+ result << ", ";
+ }
+ first = false;
+ result << arg.constexpr_value();
+ }
+ result << "))";
+ return VisitResult(return_type, result.str());
+ } else if (arguments.labels.empty() &&
+ return_type != TypeOracle::GetNeverType()) {
+ assembler().Emit(CallCsaMacroInstruction{macro, constexpr_arguments});
+ size_t return_slot_count = LoweredSlotCount(return_type);
+ return VisitResult(return_type, assembler().TopRange(return_slot_count));
+ } else {
+ base::Optional<Block*> return_continuation;
+ if (return_type != TypeOracle::GetNeverType()) {
+ return_continuation = assembler().NewBlock();
}
- j++;
- source_out() << variable->value();
- }
- label->MarkUsed();
- }
- if (global_context_.verbose()) {
- std::cout << "finished generating code for call to " << callable_name
- << "\n";
- }
- if (!result_type->IsVoidOrNever() && !is_tailcall &&
- !result_type->IsConstexpr()) {
- source_out() << ")";
+ std::vector<Block*> label_blocks;
+
+ for (size_t i = 0; i < label_count; ++i) {
+ label_blocks.push_back(assembler().NewBlock());
+ }
+
+ assembler().Emit(CallCsaMacroAndBranchInstruction{
+ macro, constexpr_arguments, return_continuation, label_blocks});
+
+ for (size_t i = 0; i < label_count; ++i) {
+ Label* label = arguments.labels[i];
+ size_t callee_label_parameters =
+ callable->signature().labels[i].types.size();
+ if (label->GetParameterCount() != callee_label_parameters) {
+ std::stringstream s;
+ s << "label " << label->name()
+ << " doesn't have the right number of parameters (found "
+ << std::to_string(label->GetParameterCount()) << " expected "
+ << std::to_string(callee_label_parameters) << ")";
+ ReportError(s.str());
+ }
+ assembler().Bind(label_blocks[i]);
+ assembler().Goto(
+ label->block(),
+ LowerParameterTypes(callable->signature().labels[i].types).size());
+
+ size_t j = 0;
+ for (auto t : callable->signature().labels[i].types) {
+ Variable* variable = label->GetParameter(j);
+ if (!(variable->type() == t)) {
+ std::stringstream s;
+ s << "mismatch of label parameters (expected " << *t << " got "
+ << *label->GetParameter(j)->type() << " for parameter "
+ << std::to_string(i + 1) << ")";
+ ReportError(s.str());
+ }
+ j++;
+ }
+ label->MarkUsed();
+ }
+
+ if (return_continuation) {
+ assembler().Bind(*return_continuation);
+ size_t return_slot_count = LoweredSlotCount(return_type);
+ return VisitResult(return_type,
+ assembler().TopRange(return_slot_count));
+ } else {
+ return VisitResult::NeverResult();
+ }
+ }
+ } else if (auto* runtime_function = RuntimeFunction::DynamicCast(callable)) {
+ assembler().Emit(CallRuntimeInstruction{is_tailcall, runtime_function,
+ argument_range.Size()});
+ if (is_tailcall) {
+ return VisitResult::NeverResult();
+ } else {
+ size_t slot_count = LoweredSlotCount(return_type);
+ DCHECK_LE(slot_count, 1);
+ // TODO(tebbi): Actually, runtime functions have to return a value, so
+ // we should assert slot_count == 1 here.
+ return VisitResult(return_type, assembler().TopRange(slot_count));
+ }
+ } else {
+ UNREACHABLE();
}
- source_out() << ");\n";
- return VisitResult(result_type, result_variable_name);
}
void ImplementationVisitor::Visit(StandardDeclaration* decl) {
@@ -1856,45 +1833,40 @@ void ImplementationVisitor::Visit(SpecializationDeclaration* decl) {
VisitResult ImplementationVisitor::Visit(CallExpression* expr,
bool is_tailcall) {
+ StackScope scope(this);
Arguments arguments;
std::string name = expr->callee.name;
- TypeVector specialization_types =
- GetTypeVector(expr->callee.generic_arguments);
- bool has_template_arguments = !specialization_types.empty();
- for (Expression* arg : expr->arguments)
- arguments.parameters.push_back(Visit(arg));
- arguments.labels = LabelsFromIdentifiers(expr->labels);
- VisitResult result;
- if (!has_template_arguments &&
- declarations()->Lookup(expr->callee.name)->IsValue()) {
- result = GeneratePointerCall(&expr->callee, arguments, is_tailcall);
+ TypeVector specialization_types =
+ GetTypeVector(expr->callee.generic_arguments);
+ bool has_template_arguments = !specialization_types.empty();
+ for (Expression* arg : expr->arguments)
+ arguments.parameters.push_back(Visit(arg));
+ arguments.labels = LabelsFromIdentifiers(expr->labels);
+ VisitResult result;
+ if (!has_template_arguments &&
+ declarations()->Lookup(expr->callee.name)->IsValue()) {
+ return scope.Yield(
+ GeneratePointerCall(&expr->callee, arguments, is_tailcall));
} else {
- result = GenerateCall(name, arguments, specialization_types, is_tailcall);
- }
- if (!result.type()->IsVoidOrNever()) {
- GenerateIndent();
- source_out() << "USE(" << RValueFlattenStructs(result) << ");\n";
- }
- if (is_tailcall) {
- result = {TypeOracle::GetNeverType(), ""};
+ return scope.Yield(
+ GenerateCall(name, arguments, specialization_types, is_tailcall));
}
- return result;
}
bool ImplementationVisitor::GenerateLabeledStatementBlocks(
const std::vector<Statement*>& blocks,
- const std::vector<Label*>& statement_labels, Label* merge_label) {
+ const std::vector<Label*>& statement_labels, Block* merge_block) {
bool live = false;
auto label_iterator = statement_labels.begin();
for (Statement* block : blocks) {
- GenerateIndent();
- source_out() << "if (" << (*label_iterator)->generated()
- << "->is_used())\n";
- ScopedIndent indent(this);
-
GenerateLabelBind(*label_iterator++);
- if (!Visit(block)->IsNever()) {
- GenerateLabelGoto(merge_label);
+ const Type* stmt_result;
+ {
+ StackScope stack_scope(this);
+ stmt_result = Visit(block);
+ }
+ if (stmt_result != TypeOracle::GetNeverType()) {
+ assembler().Goto(merge_block);
live = true;
}
}
@@ -1904,15 +1876,14 @@ bool ImplementationVisitor::GenerateLabeledStatementBlocks(
void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
Label* true_label,
Label* false_label) {
- GenerateIndent();
- source_out() << "Branch(" << RValueFlattenStructs(condition) << ", "
- << true_label->generated() << ", " << false_label->generated()
- << ");\n";
+ DCHECK_EQ(condition,
+ VisitResult(TypeOracle::GetBoolType(), assembler().TopRange(1)));
+ assembler().Branch(true_label->block(), false_label->block());
}
bool ImplementationVisitor::GenerateExpressionBranch(
Expression* expression, const std::vector<Label*>& statement_labels,
- const std::vector<Statement*>& statement_blocks, Label* merge_label) {
+ const std::vector<Statement*>& statement_blocks, Block* merge_block) {
// Activate a new scope to define True/False catch labels
Declarations::NodeScopeActivator scope(declarations(), expression);
@@ -1929,23 +1900,28 @@ bool ImplementationVisitor::GenerateExpressionBranch(
}
return GenerateLabeledStatementBlocks(statement_blocks, statement_labels,
- merge_label);
+ merge_block);
}
VisitResult ImplementationVisitor::GenerateImplicitConvert(
const Type* destination_type, VisitResult source) {
+ StackScope scope(this);
+ if (source.type() == TypeOracle::GetNeverType()) {
+ ReportError("it is not allowed to use a value of type never");
+ }
+
if (destination_type == source.type()) {
- return source;
+ return scope.Yield(GenerateCopy(source));
}
if (TypeOracle::IsImplicitlyConvertableFrom(destination_type,
source.type())) {
std::string name =
GetGeneratedCallableName(kFromConstexprMacroName, {destination_type});
- return GenerateCall(name, {{source}, {}}, {}, false);
+ return scope.Yield(GenerateCall(name, {{source}, {}}, {}, false));
} else if (IsAssignableFrom(destination_type, source.type())) {
source.SetType(destination_type);
- return source;
+ return scope.Yield(GenerateCopy(source));
} else {
std::stringstream s;
s << "cannot use expression of type " << *source.type()
@@ -1954,56 +1930,53 @@ VisitResult ImplementationVisitor::GenerateImplicitConvert(
}
}
-std::string ImplementationVisitor::NewTempVariable() {
- std::string name("t");
- name += std::to_string(next_temp_++);
- return name;
-}
-
-std::string ImplementationVisitor::GenerateNewTempVariable(const Type* type) {
- std::string temp = NewTempVariable();
- GenerateIndent();
- source_out() << type->GetGeneratedTypeName() << " " << temp << " = ";
- return temp;
-}
-
-void ImplementationVisitor::GenerateLabelDefinition(Label* label,
- AstNode* node) {
- std::string label_string = label->generated();
- std::string label_string_impl = label_string + "_impl";
- GenerateIndent();
- source_out() << "Label " + label_string_impl + "(this";
- if (node != nullptr) {
- source_out() << ", ";
- GenerateChangedVarsFromControlSplit(node);
- }
- source_out() << ");\n";
- GenerateIndent();
- source_out() << "Label* " + label_string + " = &" << label_string_impl
- << ";\n";
- GenerateIndent();
- source_out() << "USE(" << label_string << ");\n";
+void ImplementationVisitor::CreateBlockForLabel(Label* label,
+ Stack<const Type*> stack) {
+ label->set_block(assembler().NewBlock(std::move(stack), label->IsDeferred()));
}
void ImplementationVisitor::GenerateLabelBind(Label* label) {
- GenerateIndent();
- source_out() << "BIND(" << label->generated() << ");\n";
+ assembler().Bind(label->block());
}
-void ImplementationVisitor::GenerateLabelGoto(Label* label) {
- GenerateIndent();
- source_out() << "Goto(" << label->generated() << ");\n";
+StackRange ImplementationVisitor::GenerateLabelGoto(
+ Label* label, base::Optional<StackRange> arguments) {
+ return assembler().Goto(label->block(), arguments ? arguments->Size() : 0);
}
std::vector<Label*> ImplementationVisitor::LabelsFromIdentifiers(
const std::vector<std::string>& names) {
std::vector<Label*> result;
- for (auto name : names) {
+ result.reserve(names.size());
+ for (const auto& name : names) {
result.push_back(declarations()->LookupLabel(name));
}
return result;
}
+StackRange ImplementationVisitor::LowerParameter(
+ const Type* type, const std::string& parameter_name,
+ Stack<std::string>* lowered_parameters) {
+ if (type->IsStructType()) {
+ const StructType* struct_type = StructType::cast(type);
+ StackRange range = lowered_parameters->TopRange(0);
+ for (auto& field : struct_type->fields()) {
+ StackRange parameter_range = LowerParameter(
+ field.type, parameter_name + "." + field.name, lowered_parameters);
+ range.Extend(parameter_range);
+ }
+ return range;
+ } else {
+ lowered_parameters->Push(parameter_name);
+ return lowered_parameters->TopRange(1);
+ }
+}
+
+std::string ImplementationVisitor::ExternalLabelParameterName(Label* label,
+ size_t i) {
+ return label->external_label_name() + "_parameter_" + std::to_string(i);
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 43520239da..a7440251d7 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/torque/ast.h"
+#include "src/torque/cfg.h"
#include "src/torque/file-visitor.h"
#include "src/torque/global-context.h"
#include "src/torque/types.h"
@@ -18,18 +19,95 @@ namespace v8 {
namespace internal {
namespace torque {
-struct LocationReference {
- LocationReference(Value* value, VisitResult base, VisitResult index)
- : value(value), base(base), index(index) {}
- Value* value;
- VisitResult base;
- VisitResult index;
+// LocationReference is the representation of an l-value, so a value that might
+// allow for assignment. For uniformity, this class can also represent
+// unassignable temporaries. Assignable values fall in two categories:
+// - stack ranges that represent mutable variables, including structs.
+// - field or element access expressions that generate operator calls.
+class LocationReference {
+ public:
+ // An assignable stack range.
+ static LocationReference VariableAccess(VisitResult variable) {
+ DCHECK(variable.IsOnStack());
+ LocationReference result;
+ result.variable_ = std::move(variable);
+ return result;
+ }
+ // An unassignable value. {description} is only used for error messages.
+ static LocationReference Temporary(VisitResult temporary,
+ std::string description) {
+ LocationReference result;
+ result.temporary_ = std::move(temporary);
+ result.temporary_description_ = std::move(description);
+ return result;
+ }
+ static LocationReference ArrayAccess(VisitResult base, VisitResult offset) {
+ LocationReference result;
+ result.eval_function_ = std::string{"[]"};
+ result.assign_function_ = std::string{"[]="};
+ result.call_arguments_ = {base, offset};
+ return result;
+ }
+ static LocationReference FieldAccess(VisitResult object,
+ std::string fieldname) {
+ LocationReference result;
+ result.eval_function_ = "." + fieldname;
+ result.assign_function_ = "." + fieldname + "=";
+ result.call_arguments_ = {object};
+ return result;
+ }
+
+ bool IsConst() const { return temporary_.has_value(); }
+
+ bool IsVariableAccess() const { return variable_.has_value(); }
+ const VisitResult& variable() const {
+ DCHECK(IsVariableAccess());
+ return *variable_;
+ }
+ bool IsTemporary() const { return temporary_.has_value(); }
+ const VisitResult& temporary() const {
+ DCHECK(IsTemporary());
+ return *temporary_;
+ }
+ // For error reporting.
+ const std::string& temporary_description() const {
+ DCHECK(IsTemporary());
+ return *temporary_description_;
+ }
+
+ bool IsCallAccess() const {
+ bool is_call_access = eval_function_.has_value();
+ DCHECK_EQ(is_call_access, assign_function_.has_value());
+ return is_call_access;
+ }
+ const VisitResultVector& call_arguments() const {
+ DCHECK(IsCallAccess());
+ return call_arguments_;
+ }
+ const std::string& eval_function() const {
+ DCHECK(IsCallAccess());
+ return *eval_function_;
+ }
+ const std::string& assign_function() const {
+ DCHECK(IsCallAccess());
+ return *assign_function_;
+ }
+
+ private:
+ base::Optional<VisitResult> variable_;
+ base::Optional<VisitResult> temporary_;
+ base::Optional<std::string> temporary_description_;
+ base::Optional<std::string> eval_function_;
+ base::Optional<std::string> assign_function_;
+ VisitResultVector call_arguments_;
+
+ LocationReference() = default;
};
class ImplementationVisitor : public FileVisitor {
public:
explicit ImplementationVisitor(GlobalContext& global_context)
- : FileVisitor(global_context), indent_(0), next_temp_(0) {}
+ : FileVisitor(global_context) {}
void Visit(Ast* ast) { Visit(ast->default_module()); }
@@ -39,44 +117,23 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(StructExpression* decl);
- LocationReference GetLocationReference(LocationExpression* location);
- LocationReference GetLocationReference(IdentifierExpression* expr) {
- return LocationReference(declarations()->LookupValue(expr->name), {}, {});
- }
+ LocationReference GetLocationReference(Expression* location);
+ LocationReference GetLocationReference(IdentifierExpression* expr);
LocationReference GetLocationReference(FieldAccessExpression* expr);
- LocationReference GetLocationReference(ElementAccessExpression* expr) {
- return LocationReference({}, Visit(expr->array), Visit(expr->index));
- }
-
- std::string RValueFlattenStructs(VisitResult result);
+ LocationReference GetLocationReference(ElementAccessExpression* expr);
- VisitResult GenerateFetchFromLocation(LocationReference reference) {
- const Value* value = reference.value;
- return VisitResult(value->type(), value);
- }
- VisitResult GenerateFetchFromLocation(LocationExpression* location,
- LocationReference reference);
- VisitResult GenerateFetchFromLocation(IdentifierExpression* expr,
- LocationReference reference) {
- return GenerateFetchFromLocation(reference);
- }
- VisitResult GenerateFetchFromLocation(FieldAccessExpression* expr,
- LocationReference reference);
- VisitResult GenerateFetchFromLocation(ElementAccessExpression* expr,
- LocationReference reference) {
- Arguments arguments;
- arguments.parameters = {reference.base, reference.index};
- return GenerateCall("[]", arguments);
- }
+ VisitResult GenerateFetchFromLocation(const LocationReference& reference);
VisitResult GetBuiltinCode(Builtin* builtin);
VisitResult Visit(IdentifierExpression* expr);
VisitResult Visit(FieldAccessExpression* expr) {
- return GenerateFetchFromLocation(expr, GetLocationReference(expr));
+ StackScope scope(this);
+ return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
}
VisitResult Visit(ElementAccessExpression* expr) {
- return GenerateFetchFromLocation(expr, GetLocationReference(expr));
+ StackScope scope(this);
+ return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
}
void Visit(ModuleDeclaration* decl);
@@ -120,8 +177,9 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(StringLiteralExpression* expr);
VisitResult Visit(NumberLiteralExpression* expr);
VisitResult Visit(AssumeTypeImpossibleExpression* expr);
+ VisitResult Visit(TryLabelExpression* expr);
+ VisitResult Visit(StatementExpression* expr);
- const Type* Visit(TryLabelStatement* stmt);
const Type* Visit(ReturnStatement* stmt);
const Type* Visit(GotoStatement* stmt);
const Type* Visit(IfStatement* stmt);
@@ -146,63 +204,70 @@ class ImplementationVisitor : public FileVisitor {
std::string GetDSLAssemblerName(Module* module);
- void GenerateIndent();
-
- class ScopedIndent {
+ // {StackScope} records the stack height at creation time and reconstructs it
+ // when being destructed by emitting a {DeleteRangeInstruction}, except for
+ // the slots protected by {StackScope::Yield}. Calling {Yield(v)} deletes all
+ // slots above the initial stack height except for the slots of {v}, which are
+ // moved to form the only slots above the initial height and marks them to
+ // survive destruction of the {StackScope}. A typical pattern is the
+ // following:
+ //
+ // VisitResult result;
+ // {
+ // StackScope stack_scope(this);
+ // // ... create temporary slots ...
+ // result = stack_scope.Yield(surviving_slots);
+ // }
+ class StackScope {
public:
- explicit ScopedIndent(ImplementationVisitor* visitor, bool new_lines = true)
- : new_lines_(new_lines), visitor_(visitor) {
- if (new_lines) visitor->GenerateIndent();
- visitor->source_out() << "{";
- if (new_lines) visitor->source_out() << "\n";
- visitor->indent_++;
+ explicit StackScope(ImplementationVisitor* visitor) : visitor_(visitor) {
+ base_ = visitor_->assembler().CurrentStack().AboveTop();
+ }
+ VisitResult Yield(VisitResult result) {
+ DCHECK(!yield_called_);
+ yield_called_ = true;
+ if (!result.IsOnStack()) {
+ if (!visitor_->assembler().CurrentBlockIsComplete()) {
+ visitor_->assembler().DropTo(base_);
+ }
+ return result;
+ }
+ DCHECK_LE(base_, result.stack_range().begin());
+ DCHECK_LE(result.stack_range().end(),
+ visitor_->assembler().CurrentStack().AboveTop());
+ visitor_->assembler().DropTo(result.stack_range().end());
+ visitor_->assembler().DeleteRange(
+ StackRange{base_, result.stack_range().begin()});
+ base_ = visitor_->assembler().CurrentStack().AboveTop();
+ return VisitResult(result.type(), visitor_->assembler().TopRange(
+ result.stack_range().Size()));
}
- ~ScopedIndent() {
- visitor_->indent_--;
- visitor_->GenerateIndent();
- visitor_->source_out() << "}";
- if (new_lines_) visitor_->source_out() << "\n";
+
+ ~StackScope() {
+ if (yield_called_) {
+ DCHECK_IMPLIES(
+ !visitor_->assembler().CurrentBlockIsComplete(),
+ base_ == visitor_->assembler().CurrentStack().AboveTop());
+ } else if (!visitor_->assembler().CurrentBlockIsComplete()) {
+ visitor_->assembler().DropTo(base_);
+ }
}
private:
- bool new_lines_;
ImplementationVisitor* visitor_;
+ BottomOffset base_;
+ bool yield_called_ = false;
};
Callable* LookupCall(const std::string& name, const Arguments& arguments,
const TypeVector& specialization_types);
- bool GenerateChangedVarFromControlSplit(const Variable* v, bool first = true);
-
- void GetFlattenedStructsVars(const Variable* base,
- std::set<const Variable*>* vars);
-
- void GenerateChangedVarsFromControlSplit(AstNode* node);
-
const Type* GetCommonType(const Type* left, const Type* right);
VisitResult GenerateCopy(const VisitResult& to_copy);
- void GenerateAssignToVariable(Variable* var, VisitResult value);
-
- void GenerateAssignToLocation(LocationExpression* location,
- const LocationReference& reference,
- VisitResult assignment_value);
-
- void GenerateVariableDeclaration(const Variable* var);
-
- Variable* GeneratePredeclaredVariableDeclaration(
- const std::string& name,
- const base::Optional<VisitResult>& initialization);
-
- Variable* GenerateVariableDeclaration(
- AstNode* node, const std::string& name, bool is_const,
- const base::Optional<const Type*>& type,
- const base::Optional<VisitResult>& initialization = {});
-
- void GenerateParameter(const std::string& parameter_name);
-
- void GenerateParameterList(const NameVector& list, size_t first = 0);
+ void GenerateAssignToLocation(const LocationReference& reference,
+ const VisitResult& assignment_value);
VisitResult GenerateCall(const std::string& callable_name,
Arguments parameters,
@@ -213,7 +278,7 @@ class ImplementationVisitor : public FileVisitor {
bool GenerateLabeledStatementBlocks(
const std::vector<Statement*>& blocks,
- const std::vector<Label*>& statement_labels, Label* merge_label);
+ const std::vector<Label*>& statement_labels, Block* merge_block);
void GenerateBranch(const VisitResult& condition, Label* true_label,
Label* false_label);
@@ -221,7 +286,7 @@ class ImplementationVisitor : public FileVisitor {
bool GenerateExpressionBranch(Expression* expression,
const std::vector<Label*>& statement_labels,
const std::vector<Statement*>& statement_blocks,
- Label* merge_label);
+ Block* merge_block);
void GenerateMacroFunctionDeclaration(std::ostream& o,
const std::string& macro_prefix,
@@ -242,25 +307,40 @@ class ImplementationVisitor : public FileVisitor {
Visit(callable, MakeSignature(signature), body);
}
- std::string NewTempVariable();
-
- std::string GenerateNewTempVariable(const Type* type);
-
- void GenerateLabelDefinition(Label* label, AstNode* node = nullptr);
+ void CreateBlockForLabel(Label* label, Stack<const Type*> stack);
void GenerateLabelBind(Label* label);
- void GenerateLabelGoto(Label* label);
+ StackRange GenerateLabelGoto(Label* label,
+ base::Optional<StackRange> arguments = {});
std::vector<Label*> LabelsFromIdentifiers(
const std::vector<std::string>& names);
+ StackRange LowerParameter(const Type* type, const std::string& parameter_name,
+ Stack<std::string>* lowered_parameters);
+
+ std::string ExternalLabelParameterName(Label* label, size_t i);
+
std::ostream& source_out() { return module_->source_stream(); }
std::ostream& header_out() { return module_->header_stream(); }
- size_t indent_;
- int32_t next_temp_;
+ CfgAssembler& assembler() { return *assembler_; }
+
+ void SetReturnValue(VisitResult return_value) {
+ DCHECK_IMPLIES(return_value_, *return_value_ == return_value);
+ return_value_ = std::move(return_value);
+ }
+
+ VisitResult GetAndClearReturnValue() {
+ VisitResult return_value = *return_value_;
+ return_value_ = base::nullopt;
+ return return_value;
+ }
+
+ base::Optional<CfgAssembler> assembler_;
+ base::Optional<VisitResult> return_value_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/instructions.cc b/deps/v8/src/torque/instructions.cc
new file mode 100644
index 0000000000..13dbd75a2e
--- /dev/null
+++ b/deps/v8/src/torque/instructions.cc
@@ -0,0 +1,204 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/instructions.h"
+#include "src/torque/cfg.h"
+#include "src/torque/type-oracle.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+#define TORQUE_INSTRUCTION_BOILERPLATE_DEFINITIONS(Name) \
+ const InstructionKind Name::kKind = InstructionKind::k##Name; \
+ std::unique_ptr<InstructionBase> Name::Clone() const { \
+ return std::unique_ptr<InstructionBase>(new Name(*this)); \
+ } \
+ void Name::Assign(const InstructionBase& other) { \
+ *this = static_cast<const Name&>(other); \
+ }
+TORQUE_INSTRUCTION_LIST(TORQUE_INSTRUCTION_BOILERPLATE_DEFINITIONS)
+#undef TORQUE_INSTRUCTION_BOILERPLATE_DEFINITIONS
+
+void PeekInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ const Type* type = stack->Peek(slot);
+ if (widened_type) {
+ if (!type->IsSubtypeOf(*widened_type)) {
+ ReportError("type ", type, " is not a subtype of ", *widened_type);
+ }
+ type = *widened_type;
+ }
+ stack->Push(type);
+}
+
+void PokeInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ const Type* type = stack->Top();
+ if (widened_type) {
+ if (!type->IsSubtypeOf(*widened_type)) {
+ ReportError("type ", type, " is not a subtype of ", *widened_type);
+ }
+ type = *widened_type;
+ }
+ stack->Poke(slot, type);
+ stack->Pop();
+}
+
+void DeleteRangeInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ stack->DeleteRange(range);
+}
+
+void PushUninitializedInstruction::TypeInstruction(
+ Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
+ stack->Push(type);
+}
+
+void PushCodePointerInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ stack->Push(type);
+}
+
+void ModuleConstantInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ stack->PushMany(LowerType(constant->type()));
+}
+
+void CallCsaMacroInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ std::vector<const Type*> parameter_types =
+ LowerParameterTypes(macro->signature().parameter_types);
+ for (intptr_t i = parameter_types.size() - 1; i >= 0; --i) {
+ const Type* arg_type = stack->Pop();
+ const Type* parameter_type = parameter_types.back();
+ parameter_types.pop_back();
+ if (arg_type != parameter_type) {
+ ReportError("parameter ", i, ": expected type ", *parameter_type,
+ " but found type ", *arg_type);
+ }
+ }
+ if (!parameter_types.empty()) ReportError("missing arguments");
+
+ stack->PushMany(LowerType(macro->signature().return_type));
+}
+
+void CallCsaMacroAndBranchInstruction::TypeInstruction(
+ Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
+ std::vector<const Type*> parameter_types =
+ LowerParameterTypes(macro->signature().parameter_types);
+ for (intptr_t i = parameter_types.size() - 1; i >= 0; --i) {
+ const Type* arg_type = stack->Pop();
+ const Type* parameter_type = parameter_types.back();
+ parameter_types.pop_back();
+ if (arg_type != parameter_type) {
+ ReportError("parameter ", i, ": expected type ", *parameter_type,
+ " but found type ", *arg_type);
+ }
+ }
+ if (!parameter_types.empty()) ReportError("missing arguments");
+
+ if (label_blocks.size() != macro->signature().labels.size()) {
+ ReportError("wrong number of labels");
+ }
+ for (size_t i = 0; i < label_blocks.size(); ++i) {
+ Stack<const Type*> continuation_stack = *stack;
+ continuation_stack.PushMany(
+ LowerParameterTypes(macro->signature().labels[i].types));
+ label_blocks[i]->SetInputTypes(std::move(continuation_stack));
+ }
+
+ if (macro->signature().return_type != TypeOracle::GetNeverType()) {
+ Stack<const Type*> return_stack = *stack;
+ return_stack.PushMany(LowerType(macro->signature().return_type));
+ if (return_continuation == base::nullopt) {
+ ReportError("missing return continuation.");
+ }
+ (*return_continuation)->SetInputTypes(return_stack);
+ } else {
+ if (return_continuation != base::nullopt) {
+ ReportError("unreachable return continuation.");
+ }
+ }
+}
+
+void CallBuiltinInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ std::vector<const Type*> argument_types = stack->PopMany(argc);
+ if (argument_types !=
+ LowerParameterTypes(builtin->signature().parameter_types)) {
+ ReportError("wrong argument types");
+ }
+ stack->PushMany(LowerType(builtin->signature().return_type));
+}
+
+void CallBuiltinPointerInstruction::TypeInstruction(
+ Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
+ std::vector<const Type*> argument_types = stack->PopMany(argc);
+ const FunctionPointerType* f = FunctionPointerType::DynamicCast(stack->Pop());
+ if (!f) ReportError("expected function pointer type");
+ if (argument_types != LowerParameterTypes(f->parameter_types())) {
+ ReportError("wrong argument types");
+ }
+ stack->PushMany(LowerType(f->return_type()));
+}
+
+void CallRuntimeInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ std::vector<const Type*> argument_types = stack->PopMany(argc);
+ if (argument_types !=
+ LowerParameterTypes(runtime_function->signature().parameter_types,
+ argc)) {
+ ReportError("wrong argument types");
+ }
+ stack->PushMany(LowerType(runtime_function->signature().return_type));
+}
+
+void BranchInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ const Type* condition_type = stack->Pop();
+ if (condition_type != TypeOracle::GetBoolType()) {
+ ReportError("condition has to have type bool");
+ }
+ if_true->SetInputTypes(*stack);
+ if_false->SetInputTypes(*stack);
+}
+
+void ConstexprBranchInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ if_true->SetInputTypes(*stack);
+ if_false->SetInputTypes(*stack);
+}
+
+void GotoInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ destination->SetInputTypes(*stack);
+}
+
+void GotoExternalInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ if (variable_names.size() != stack->Size()) {
+ ReportError("goto external label with wrong parameter count.");
+ }
+}
+
+void ReturnInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ cfg->SetReturnType(stack->Pop());
+}
+
+void PrintConstantStringInstruction::TypeInstruction(
+ Stack<const Type*>* stack, ControlFlowGraph* cfg) const {}
+
+void DebugBreakInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {}
+
+void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ stack->Poke(stack->AboveTop() - 1, destination_type);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
new file mode 100644
index 0000000000..881074e827
--- /dev/null
+++ b/deps/v8/src/torque/instructions.h
@@ -0,0 +1,354 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_INSTRUCTIONS_H_
+#define V8_TORQUE_INSTRUCTIONS_H_
+
+#include <memory>
+
+#include "src/torque/ast.h"
+#include "src/torque/source-positions.h"
+#include "src/torque/types.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class Block;
+class Builtin;
+class ControlFlowGraph;
+class Macro;
+class ModuleConstant;
+class RuntimeFunction;
+
+#define TORQUE_INSTRUCTION_LIST(V) \
+ V(PeekInstruction) \
+ V(PokeInstruction) \
+ V(DeleteRangeInstruction) \
+ V(PushUninitializedInstruction) \
+ V(PushCodePointerInstruction) \
+ V(CallCsaMacroInstruction) \
+ V(ModuleConstantInstruction) \
+ V(CallCsaMacroAndBranchInstruction) \
+ V(CallBuiltinInstruction) \
+ V(CallRuntimeInstruction) \
+ V(CallBuiltinPointerInstruction) \
+ V(BranchInstruction) \
+ V(ConstexprBranchInstruction) \
+ V(GotoInstruction) \
+ V(GotoExternalInstruction) \
+ V(ReturnInstruction) \
+ V(PrintConstantStringInstruction) \
+ V(DebugBreakInstruction) \
+ V(UnsafeCastInstruction)
+
+#define TORQUE_INSTRUCTION_BOILERPLATE() \
+ static const InstructionKind kKind; \
+ std::unique_ptr<InstructionBase> Clone() const override; \
+ void Assign(const InstructionBase& other) override; \
+ void TypeInstruction(Stack<const Type*>* stack, ControlFlowGraph* cfg) \
+ const override;
+
+enum class InstructionKind {
+#define ENUM_ITEM(name) k##name,
+ TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
+#undef ENUM_ITEM
+};
+
+struct InstructionBase {
+ InstructionBase() : pos(CurrentSourcePosition::Get()) {}
+ virtual std::unique_ptr<InstructionBase> Clone() const = 0;
+ virtual void Assign(const InstructionBase& other) = 0;
+ virtual ~InstructionBase() = default;
+
+ virtual void TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const = 0;
+ virtual bool IsBlockTerminator() const { return false; }
+ virtual void AppendSuccessorBlocks(std::vector<Block*>* block_list) const {}
+
+ SourcePosition pos;
+};
+
+class Instruction {
+ public:
+ template <class T>
+ Instruction(T instr) // NOLINT(runtime/explicit)
+ : kind_(T::kKind), instruction_(new T(std::move(instr))) {}
+
+ template <class T>
+ T& Cast() {
+ DCHECK(Is<T>());
+ return static_cast<T&>(*instruction_);
+ }
+
+ template <class T>
+ const T& Cast() const {
+ DCHECK(Is<T>());
+ return static_cast<const T&>(*instruction_);
+ }
+
+ template <class T>
+ bool Is() const {
+ return kind_ == T::kKind;
+ }
+
+ template <class T>
+ T* DynamicCast() {
+ if (Is<T>()) return &Cast<T>();
+ return nullptr;
+ }
+
+ template <class T>
+ const T* DynamicCast() const {
+ if (Is<T>()) return &Cast<T>();
+ return nullptr;
+ }
+
+ Instruction(const Instruction& other)
+ : kind_(other.kind_), instruction_(other.instruction_->Clone()) {}
+ Instruction& operator=(const Instruction& other) {
+ if (kind_ == other.kind_) {
+ instruction_->Assign(*other.instruction_);
+ } else {
+ kind_ = other.kind_;
+ instruction_ = other.instruction_->Clone();
+ }
+ return *this;
+ }
+
+ InstructionKind kind() const { return kind_; }
+ void TypeInstruction(Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
+ return instruction_->TypeInstruction(stack, cfg);
+ }
+
+ InstructionBase* operator->() { return instruction_.get(); }
+ const InstructionBase* operator->() const { return instruction_.get(); }
+
+ private:
+ InstructionKind kind_;
+ std::unique_ptr<InstructionBase> instruction_;
+};
+
+struct PeekInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+
+ PeekInstruction(BottomOffset slot, base::Optional<const Type*> widened_type)
+ : slot(slot), widened_type(widened_type) {}
+
+ BottomOffset slot;
+ base::Optional<const Type*> widened_type;
+};
+
+struct PokeInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+
+ PokeInstruction(BottomOffset slot, base::Optional<const Type*> widened_type)
+ : slot(slot), widened_type(widened_type) {}
+
+ BottomOffset slot;
+ base::Optional<const Type*> widened_type;
+};
+
+// Preserve the top {preserved_slots} number of slots, and delete
+// {deleted_slots} number or slots below.
+struct DeleteRangeInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ explicit DeleteRangeInstruction(StackRange range) : range(range) {}
+
+ StackRange range;
+};
+
+struct PushUninitializedInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ explicit PushUninitializedInstruction(const Type* type) : type(type) {}
+
+ const Type* type;
+};
+
+struct PushCodePointerInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ PushCodePointerInstruction(std::string external_name, const Type* type)
+ : external_name(std::move(external_name)), type(type) {
+ DCHECK(type->IsFunctionPointerType());
+ }
+
+ std::string external_name;
+ const Type* type;
+};
+
+struct ModuleConstantInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ explicit ModuleConstantInstruction(ModuleConstant* constant)
+ : constant(constant) {}
+
+ ModuleConstant* constant;
+};
+
+struct CallCsaMacroInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ CallCsaMacroInstruction(Macro* macro,
+ std::vector<std::string> constexpr_arguments)
+ : macro(macro), constexpr_arguments(constexpr_arguments) {}
+
+ Macro* macro;
+ std::vector<std::string> constexpr_arguments;
+};
+
+struct CallCsaMacroAndBranchInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ CallCsaMacroAndBranchInstruction(Macro* macro,
+ std::vector<std::string> constexpr_arguments,
+ base::Optional<Block*> return_continuation,
+ std::vector<Block*> label_blocks)
+ : macro(macro),
+ constexpr_arguments(constexpr_arguments),
+ return_continuation(return_continuation),
+ label_blocks(label_blocks) {}
+ bool IsBlockTerminator() const override { return true; }
+ void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ if (return_continuation) block_list->push_back(*return_continuation);
+ for (Block* block : label_blocks) block_list->push_back(block);
+ }
+
+ Macro* macro;
+ std::vector<std::string> constexpr_arguments;
+ base::Optional<Block*> return_continuation;
+ std::vector<Block*> label_blocks;
+};
+
+struct CallBuiltinInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return is_tailcall; }
+ CallBuiltinInstruction(bool is_tailcall, Builtin* builtin, size_t argc)
+ : is_tailcall(is_tailcall), builtin(builtin), argc(argc) {}
+
+ bool is_tailcall;
+ Builtin* builtin;
+ size_t argc;
+};
+
+struct CallBuiltinPointerInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return is_tailcall; }
+ CallBuiltinPointerInstruction(bool is_tailcall, Builtin* example_builtin,
+ size_t argc)
+ : is_tailcall(is_tailcall),
+ example_builtin(example_builtin),
+ argc(argc) {}
+
+ bool is_tailcall;
+ Builtin* example_builtin;
+ size_t argc;
+};
+
+struct CallRuntimeInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return is_tailcall; }
+
+ CallRuntimeInstruction(bool is_tailcall, RuntimeFunction* runtime_function,
+ size_t argc)
+ : is_tailcall(is_tailcall),
+ runtime_function(runtime_function),
+ argc(argc) {}
+
+ bool is_tailcall;
+ RuntimeFunction* runtime_function;
+ size_t argc;
+};
+
+struct BranchInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return true; }
+ void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ block_list->push_back(if_true);
+ block_list->push_back(if_false);
+ }
+
+ BranchInstruction(Block* if_true, Block* if_false)
+ : if_true(if_true), if_false(if_false) {}
+
+ Block* if_true;
+ Block* if_false;
+};
+
+struct ConstexprBranchInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return true; }
+ void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ block_list->push_back(if_true);
+ block_list->push_back(if_false);
+ }
+
+ ConstexprBranchInstruction(std::string condition, Block* if_true,
+ Block* if_false)
+ : condition(condition), if_true(if_true), if_false(if_false) {}
+
+ std::string condition;
+ Block* if_true;
+ Block* if_false;
+};
+
+struct GotoInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return true; }
+ void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ block_list->push_back(destination);
+ }
+
+ explicit GotoInstruction(Block* destination) : destination(destination) {}
+
+ Block* destination;
+};
+
+struct GotoExternalInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return true; }
+
+ GotoExternalInstruction(std::string destination,
+ std::vector<std::string> variable_names)
+ : destination(std::move(destination)),
+ variable_names(std::move(variable_names)) {}
+
+ std::string destination;
+ std::vector<std::string> variable_names;
+};
+
+struct ReturnInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return true; }
+};
+
+struct PrintConstantStringInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ explicit PrintConstantStringInstruction(std::string message) {
+ // The normal way to write this triggers a bug in Clang on Windows.
+ this->message = std::move(message);
+ }
+
+ std::string message;
+};
+
+struct DebugBreakInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ bool IsBlockTerminator() const override { return never_continues; }
+ explicit DebugBreakInstruction(bool never_continues)
+ : never_continues(never_continues) {}
+
+ bool never_continues;
+};
+
+struct UnsafeCastInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ explicit UnsafeCastInstruction(const Type* destination_type)
+ : destination_type(destination_type) {}
+
+ const Type* destination_type;
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_INSTRUCTIONS_H_
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index bd5aaa3ec3..ae07ff9bf2 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -29,7 +29,7 @@ DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
class SourceFileMap : public ContextualClass<SourceFileMap> {
public:
- SourceFileMap() {}
+ SourceFileMap() = default;
static const std::string& GetSource(SourceId source) {
return Get().sources_[source.id_];
}
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 92c3fa8815..91f9ef4ed2 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -183,14 +183,68 @@ T* MakeNode(Args... args) {
new T(CurrentSourcePosition::Get(), std::move(args)...)));
}
+void LintGenericParameters(const GenericParameters& parameters) {
+ for (const std::string& parameter : parameters) {
+ if (!IsUpperCamelCase(parameter)) {
+ NamingConventionError("Generic parameter", parameter, "UpperCamelCase");
+ }
+ }
+}
+
+void CheckNotDeferredStatement(Statement* statement) {
+ if (BlockStatement* block = BlockStatement::DynamicCast(statement)) {
+ if (block->deferred) {
+ LintError(
+ "cannot use deferred with a statement block here, it will have no "
+ "effect");
+ }
+ }
+}
+
+Expression* MakeCall(const std::string& callee, bool is_operator,
+ const std::vector<TypeExpression*>& generic_arguments,
+ const std::vector<Expression*>& arguments,
+ const std::vector<Statement*>& otherwise) {
+ std::vector<std::string> labels;
+
+ // All IdentifierExpressions are treated as label names and can be directly
+ // used as labels identifiers. All other statements in a call's otherwise
+ // must create intermediate Labels for the otherwise's statement code.
+ size_t label_id = 0;
+ std::vector<LabelBlock*> temp_labels;
+ for (auto* statement : otherwise) {
+ if (auto* e = ExpressionStatement::DynamicCast(statement)) {
+ if (auto* id = IdentifierExpression::DynamicCast(e->expression)) {
+ if (id->generic_arguments.size() != 0) {
+ ReportError("An otherwise label cannot have generic parameters");
+ }
+ labels.push_back(id->name);
+ continue;
+ }
+ }
+ auto label_name = std::string("_label") + std::to_string(label_id++);
+ labels.push_back(label_name);
+ auto* label_block =
+ MakeNode<LabelBlock>(label_name, ParameterList::Empty(), statement);
+ temp_labels.push_back(label_block);
+ }
+
+ // Create nested try-label expression for all of the temporary Labels that
+ // were created.
+ Expression* result = MakeNode<CallExpression>(
+ callee, false, generic_arguments, arguments, labels);
+ for (auto* label : temp_labels) {
+ result = MakeNode<TryLabelExpression>(result, label);
+ }
+ return result;
+}
+
base::Optional<ParseResult> MakeCall(ParseResultIterator* child_results) {
auto callee = child_results->NextAs<std::string>();
auto generic_args = child_results->NextAs<TypeList>();
auto args = child_results->NextAs<std::vector<Expression*>>();
- auto labels = child_results->NextAs<std::vector<std::string>>();
- Expression* result =
- MakeNode<CallExpression>(callee, false, generic_args, args, labels);
- return ParseResult{result};
+ auto otherwise = child_results->NextAs<std::vector<Statement*>>();
+ return ParseResult{MakeCall(callee, false, generic_args, args, otherwise)};
}
base::Optional<ParseResult> MakeBinaryOperator(
@@ -198,20 +252,17 @@ base::Optional<ParseResult> MakeBinaryOperator(
auto left = child_results->NextAs<Expression*>();
auto op = child_results->NextAs<std::string>();
auto right = child_results->NextAs<Expression*>();
- Expression* result = MakeNode<CallExpression>(
- op, true, TypeList{}, std::vector<Expression*>{left, right},
- std::vector<std::string>{});
- return ParseResult{result};
+ return ParseResult{MakeCall(op, true, TypeList{},
+ std::vector<Expression*>{left, right},
+ std::vector<Statement*>{})};
}
base::Optional<ParseResult> MakeUnaryOperator(
ParseResultIterator* child_results) {
auto op = child_results->NextAs<std::string>();
auto e = child_results->NextAs<Expression*>();
- Expression* result = MakeNode<CallExpression>(op, true, TypeList{},
- std::vector<Expression*>{e},
- std::vector<std::string>{});
- return ParseResult{result};
+ return ParseResult{MakeCall(op, true, TypeList{}, std::vector<Expression*>{e},
+ std::vector<Statement*>{})};
}
template <bool has_varargs>
@@ -233,6 +284,10 @@ base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
}
ParameterList result;
for (NameAndTypeExpression& pair : params) {
+ if (!IsLowerCamelCase(pair.name)) {
+ NamingConventionError("Parameter", pair.name, "lowerCamelCase");
+ }
+
result.names.push_back(std::move(pair.name));
result.types.push_back(pair.type);
}
@@ -269,6 +324,8 @@ base::Optional<ParseResult> MakeExternalMacro(
auto operator_name = child_results->NextAs<base::Optional<std::string>>();
auto name = child_results->NextAs<std::string>();
auto generic_parameters = child_results->NextAs<GenericParameters>();
+ LintGenericParameters(generic_parameters);
+
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
auto labels = child_results->NextAs<LabelAndTypesVector>();
@@ -287,7 +344,13 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
ParseResultIterator* child_results) {
auto operator_name = child_results->NextAs<base::Optional<std::string>>();
auto name = child_results->NextAs<std::string>();
+ if (!IsUpperCamelCase(name)) {
+ NamingConventionError("Macro", name, "UpperCamelCase");
+ }
+
auto generic_parameters = child_results->NextAs<GenericParameters>();
+ LintGenericParameters(generic_parameters);
+
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
auto labels = child_results->NextAs<LabelAndTypesVector>();
@@ -308,7 +371,13 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
ParseResultIterator* child_results) {
auto javascript_linkage = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
+ if (!IsUpperCamelCase(name)) {
+ NamingConventionError("Builtin", name, "UpperCamelCase");
+ }
+
auto generic_parameters = child_results->NextAs<GenericParameters>();
+ LintGenericParameters(generic_parameters);
+
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
auto body = child_results->NextAs<base::Optional<Statement*>>();
@@ -327,6 +396,10 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
base::Optional<ParseResult> MakeConstDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<std::string>();
+ if (!IsValidModuleConstName(name)) {
+ NamingConventionError("Constant", name, "kUpperCamelCase");
+ }
+
auto type = child_results->NextAs<TypeExpression*>();
auto expression = child_results->NextAs<Expression*>();
Declaration* result =
@@ -355,6 +428,9 @@ base::Optional<ParseResult> MakeTypeAliasDeclaration(
base::Optional<ParseResult> MakeTypeDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<std::string>();
+ if (!IsValidTypeName(name)) {
+ NamingConventionError("Type", name, "UpperCamelCase");
+ }
auto extends = child_results->NextAs<base::Optional<std::string>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
auto constexpr_generates =
@@ -368,6 +444,9 @@ base::Optional<ParseResult> MakeTypeDeclaration(
base::Optional<ParseResult> MakeExplicitModuleDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<std::string>();
+ if (!IsSnakeCase(name)) {
+ NamingConventionError("Module", name, "snake_case");
+ }
auto declarations = child_results->NextAs<std::vector<Declaration*>>();
Declaration* result = MakeNode<ExplicitModuleDeclaration>(
std::move(name), std::move(declarations));
@@ -383,6 +462,7 @@ base::Optional<ParseResult> MakeSpecializationDeclaration(
auto return_type = child_results->NextAs<TypeExpression*>();
auto labels = child_results->NextAs<LabelAndTypesVector>();
auto body = child_results->NextAs<Statement*>();
+ CheckNotDeferredStatement(body);
Declaration* result = MakeNode<SpecializationDeclaration>(
std::move(name), std::move(generic_parameters), std::move(parameters),
return_type, std::move(labels), body);
@@ -403,6 +483,8 @@ base::Optional<ParseResult> MakeExternalBuiltin(
auto js_linkage = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
auto generic_parameters = child_results->NextAs<GenericParameters>();
+ LintGenericParameters(generic_parameters);
+
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
BuiltinDeclaration* builtin =
@@ -535,10 +617,11 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
}
BlockStatement* case_block;
if (i < cases.size() - 1) {
- value = MakeNode<CallExpression>(
- "cast", false, std::vector<TypeExpression*>{cases[i].type},
- std::vector<Expression*>{value},
- std::vector<std::string>{"_NextCase"});
+ value =
+ MakeCall("Cast", false, std::vector<TypeExpression*>{cases[i].type},
+ std::vector<Expression*>{value},
+ std::vector<Statement*>{MakeNode<ExpressionStatement>(
+ MakeNode<IdentifierExpression>("_NextCase"))});
case_block = MakeNode<BlockStatement>();
} else {
case_block = current_block;
@@ -550,9 +633,11 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
case_block->statements.push_back(cases[i].block);
if (i < cases.size() - 1) {
BlockStatement* next_block = MakeNode<BlockStatement>();
- current_block->statements.push_back(MakeNode<TryLabelStatement>(
- case_block, std::vector<LabelBlock*>{MakeNode<LabelBlock>(
- "_NextCase", ParameterList::Empty(), next_block)}));
+ current_block->statements.push_back(
+ MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
+ MakeNode<StatementExpression>(case_block),
+ MakeNode<LabelBlock>("_NextCase", ParameterList::Empty(),
+ next_block))));
current_block = next_block;
}
accumulated_types =
@@ -567,6 +652,7 @@ base::Optional<ParseResult> MakeTypeswitchCase(
auto name = child_results->NextAs<base::Optional<std::string>>();
auto type = child_results->NextAs<TypeExpression*>();
auto block = child_results->NextAs<Statement*>();
+ CheckNotDeferredStatement(block);
return ParseResult{TypeswitchCase{child_results->matched_input().pos,
std::move(name), type, block}};
}
@@ -576,6 +662,7 @@ base::Optional<ParseResult> MakeWhileStatement(
auto condition = child_results->NextAs<Expression*>();
auto body = child_results->NextAs<Statement*>();
Statement* result = MakeNode<WhileStatement>(condition, body);
+ CheckNotDeferredStatement(result);
return ParseResult{result};
}
@@ -599,6 +686,10 @@ base::Optional<ParseResult> MakeVarDeclarationStatement(
bool const_qualified = kind == "const";
if (!const_qualified) DCHECK_EQ("let", kind);
auto name = child_results->NextAs<std::string>();
+ if (!IsLowerCamelCase(name)) {
+ NamingConventionError("Variable", name, "lowerCamelCase");
+ }
+
auto type = child_results->NextAs<TypeExpression*>();
base::Optional<Expression*> initializer;
if (child_results->HasNext())
@@ -637,21 +728,27 @@ base::Optional<ParseResult> MakeBlockStatement(
return ParseResult{result};
}
-base::Optional<ParseResult> MakeTryLabelStatement(
+base::Optional<ParseResult> MakeTryLabelExpression(
ParseResultIterator* child_results) {
auto try_block = child_results->NextAs<Statement*>();
+ CheckNotDeferredStatement(try_block);
+ Statement* result = try_block;
auto label_blocks = child_results->NextAs<std::vector<LabelBlock*>>();
- Statement* result =
- MakeNode<TryLabelStatement>(try_block, std::move(label_blocks));
+ for (auto block : label_blocks) {
+ result = MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
+ MakeNode<StatementExpression>(result), block));
+ }
return ParseResult{result};
}
base::Optional<ParseResult> MakeForOfLoopStatement(
ParseResultIterator* child_results) {
auto var_decl = child_results->NextAs<Statement*>();
+ CheckNotDeferredStatement(var_decl);
auto iterable = child_results->NextAs<Expression*>();
auto range = child_results->NextAs<base::Optional<RangeExpression>>();
auto body = child_results->NextAs<Statement*>();
+ CheckNotDeferredStatement(body);
Statement* result =
MakeNode<ForOfLoopStatement>(var_decl, iterable, range, body);
return ParseResult{result};
@@ -663,12 +760,16 @@ base::Optional<ParseResult> MakeForLoopStatement(
auto test = child_results->NextAs<base::Optional<Expression*>>();
auto action = child_results->NextAs<base::Optional<Expression*>>();
auto body = child_results->NextAs<Statement*>();
+ CheckNotDeferredStatement(body);
Statement* result = MakeNode<ForLoopStatement>(var_decl, test, action, body);
return ParseResult{result};
}
base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
auto label = child_results->NextAs<std::string>();
+ if (!IsUpperCamelCase(label)) {
+ NamingConventionError("Label", label, "UpperCamelCase");
+ }
auto parameters = child_results->NextAs<ParameterList>();
auto body = child_results->NextAs<Statement*>();
LabelBlock* result =
@@ -798,6 +899,9 @@ base::Optional<ParseResult> MakeConditionalExpression(
base::Optional<ParseResult> MakeLabelAndTypes(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<std::string>();
+ if (!IsUpperCamelCase(name)) {
+ NamingConventionError("Label", name, "UpperCamelCase");
+ }
auto types = child_results->NextAs<std::vector<TypeExpression*>>();
return ParseResult{LabelAndTypes{std::move(name), std::move(types)}};
}
@@ -963,10 +1067,10 @@ struct TorqueGrammar : Grammar {
Sequence({Token("labels"),
NonemptyList<LabelAndTypes>(&labelParameter, Token(","))}))};
- // Result: std::vector<std::string>
- Symbol* optionalOtherwise{TryOrDefault<std::vector<std::string>>(
+ // Result: std::vector<Statement*>
+ Symbol* optionalOtherwise{TryOrDefault<std::vector<Statement*>>(
Sequence({Token("otherwise"),
- NonemptyList<std::string>(&identifier, Token(","))}))};
+ NonemptyList<Statement*>(&atomarStatement, Token(","))}))};
// Result: NameAndTypeExpression
Symbol nameAndType = {
@@ -987,7 +1091,7 @@ struct TorqueGrammar : Grammar {
MakeParameterListFromNameAndTypeList<true>)};
// Result: std::string
- Symbol* OneOf(std::vector<std::string> alternatives) {
+ Symbol* OneOf(const std::vector<std::string>& alternatives) {
Symbol* result = NewSymbol();
for (const std::string& s : alternatives) {
result->AddRule(Rule({Token(s)}, YieldMatchedInput));
@@ -1147,28 +1251,27 @@ struct TorqueGrammar : Grammar {
expression},
MakeVarDeclarationStatement)};
- // Disallow ambiguous dangling else by only allowing an {atomarStatement} as
- // a then-clause. Result: Statement*
+ // Result: Statement*
Symbol atomarStatement = {
- Rule({&block}),
- Rule({expression, Token(";")}, MakeExpressionStatement),
- Rule({Token("return"), Optional<Expression*>(expression), Token(";")},
+ Rule({expression}, MakeExpressionStatement),
+ Rule({Token("return"), Optional<Expression*>(expression)},
MakeReturnStatement),
- Rule({Token("tail"), &callExpression, Token(";")}, MakeTailCallStatement),
- Rule({Token("break"), Token(";")}, MakeBreakStatement),
- Rule({Token("continue"), Token(";")}, MakeContinueStatement),
+ Rule({Token("tail"), &callExpression}, MakeTailCallStatement),
+ Rule({Token("break")}, MakeBreakStatement),
+ Rule({Token("continue")}, MakeContinueStatement),
Rule({Token("goto"), &identifier,
- TryOrDefault<std::vector<Expression*>>(&argumentList), Token(";")},
+ TryOrDefault<std::vector<Expression*>>(&argumentList)},
MakeGotoStatement),
- Rule({OneOf({"debug", "unreachable"}), Token(";")}, MakeDebugStatement)};
+ Rule({OneOf({"debug", "unreachable"})}, MakeDebugStatement)};
// Result: Statement*
Symbol statement = {
- Rule({&atomarStatement}),
+ Rule({&block}),
+ Rule({&atomarStatement, Token(";")}),
Rule({&varDeclaration, Token(";")}),
Rule({&varDeclarationWithInitialization, Token(";")}),
Rule({Token("if"), CheckIf(Token("constexpr")), Token("("), expression,
- Token(")"), &atomarStatement,
+ Token(")"), &statement,
Optional<Statement*>(Sequence({Token("else"), &statement}))},
MakeIfStatement),
Rule(
@@ -1179,28 +1282,26 @@ struct TorqueGrammar : Grammar {
},
MakeTypeswitchStatement),
Rule({Token("try"), &block, NonemptyList<LabelBlock*>(&labelBlock)},
- MakeTryLabelStatement),
+ MakeTryLabelExpression),
Rule({OneOf({"assert", "check"}), Token("("), &expressionWithSource,
Token(")"), Token(";")},
MakeAssertStatement),
- Rule({Token("while"), Token("("), expression, Token(")"),
- &atomarStatement},
+ Rule({Token("while"), Token("("), expression, Token(")"), &statement},
MakeWhileStatement),
Rule({Token("for"), Token("("), &varDeclaration, Token("of"), expression,
- Optional<RangeExpression>(&rangeSpecifier), Token(")"),
- &atomarStatement},
+ Optional<RangeExpression>(&rangeSpecifier), Token(")"), &statement},
MakeForOfLoopStatement),
Rule({Token("for"), Token("("),
Optional<Statement*>(&varDeclarationWithInitialization), Token(";"),
Optional<Expression*>(expression), Token(";"),
- Optional<Expression*>(expression), Token(")"), &atomarStatement},
+ Optional<Expression*>(expression), Token(")"), &statement},
MakeForLoopStatement)};
// Result: TypeswitchCase
Symbol typeswitchCase = {
Rule({Token("case"), Token("("),
Optional<std::string>(Sequence({&identifier, Token(":")})), &type,
- Token(")"), &block},
+ Token(")"), Token(":"), &block},
MakeTypeswitchCase)};
// Result: base::Optional<Statement*>
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index 8fcd0be6eb..fd47251d59 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -26,6 +26,8 @@ int WrappedMain(int argc, const char** argv) {
CurrentSourceFile::Scope unknown_sourcefile_scope(
SourceFileMap::AddSource("<unknown>"));
CurrentAst::Scope ast_scope;
+ LintErrorStatus::Scope lint_error_status_scope;
+
for (int i = 1; i < argc; ++i) {
// Check for options
if (!strcmp("-o", argv[i])) {
@@ -76,6 +78,9 @@ int WrappedMain(int argc, const char** argv) {
visitor.GenerateImplementation(output_directory, module.second.get());
}
}
+
+ if (LintErrorStatus::HasLintErrors()) std::abort();
+
return 0;
}
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 261f085edb..4f009a8f32 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -255,7 +255,7 @@ bool Signature::HasSameTypesAs(const Signature& other) const {
return false;
}
size_t i = 0;
- for (auto l : labels) {
+ for (const auto& l : labels) {
if (l.types != other.labels[i++].types) {
return false;
}
@@ -293,28 +293,66 @@ bool operator<(const Type& a, const Type& b) {
return a.MangledName() < b.MangledName();
}
-VisitResult::VisitResult(const Type* type, const Value* declarable)
- : type_(type), value_(), declarable_(declarable) {}
-
-std::string VisitResult::LValue() const {
- return std::string("*") + (declarable_ ? (*declarable_)->value() : value_);
+VisitResult ProjectStructField(VisitResult structure,
+ const std::string& fieldname) {
+ DCHECK(structure.IsOnStack());
+ BottomOffset begin = structure.stack_range().begin();
+ const StructType* type = StructType::cast(structure.type());
+ for (auto& field : type->fields()) {
+ BottomOffset end = begin + LoweredSlotCount(field.type);
+ if (field.name == fieldname) {
+ return VisitResult(field.type, StackRange{begin, end});
+ }
+ begin = end;
+ }
+ UNREACHABLE();
}
-std::string VisitResult::RValue() const {
- std::string result;
- if (declarable()) {
- auto value = *declarable();
- if (value->IsVariable() && !Variable::cast(value)->IsDefined()) {
- std::stringstream s;
- s << "\"" << value->name() << "\" is used before it is defined";
- ReportError(s.str());
+namespace {
+void AppendLoweredTypes(const Type* type, std::vector<const Type*>* result) {
+ DCHECK_NE(type, TypeOracle::GetNeverType());
+ if (type->IsConstexpr()) return;
+ if (type == TypeOracle::GetVoidType()) return;
+ if (auto* s = StructType::DynamicCast(type)) {
+ for (const NameAndType& field : s->fields()) {
+ AppendLoweredTypes(field.type, result);
}
- result = value->RValue();
} else {
- result = value_;
+ result->push_back(type);
+ }
+}
+} // namespace
+
+TypeVector LowerType(const Type* type) {
+ TypeVector result;
+ AppendLoweredTypes(type, &result);
+ return result;
+}
+
+size_t LoweredSlotCount(const Type* type) { return LowerType(type).size(); }
+
+TypeVector LowerParameterTypes(const TypeVector& parameters) {
+ std::vector<const Type*> result;
+ for (const Type* t : parameters) {
+ AppendLoweredTypes(t, &result);
+ }
+ return result;
+}
+
+TypeVector LowerParameterTypes(const ParameterTypes& parameter_types,
+ size_t arg_count) {
+ std::vector<const Type*> result = LowerParameterTypes(parameter_types.types);
+ for (size_t i = parameter_types.types.size(); i < arg_count; ++i) {
+ DCHECK(parameter_types.var_args);
+ AppendLoweredTypes(TypeOracle::GetObjectType(), &result);
}
- return "implicit_cast<" + type()->GetGeneratedTypeName() + ">(" + result +
- ")";
+ return result;
+}
+
+VisitResult VisitResult::NeverResult() {
+ VisitResult result;
+ result.type_ = TypeOracle::GetNeverType();
+ return result;
}
} // namespace torque
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 24acaea5c7..e94413e4c9 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -44,7 +44,7 @@ class TypeBase {
kUnionType,
kStructType
};
- virtual ~TypeBase() {}
+ virtual ~TypeBase() = default;
bool IsAbstractType() const { return kind() == Kind::kAbstractType; }
bool IsFunctionPointerType() const {
return kind() == Kind::kFunctionPointerType;
@@ -344,22 +344,35 @@ inline std::ostream& operator<<(std::ostream& os, const Type& t) {
class VisitResult {
public:
- VisitResult() {}
- VisitResult(const Type* type, const std::string& value)
- : type_(type), value_(value), declarable_{} {}
- VisitResult(const Type* type, const Value* declarable);
+ VisitResult() = default;
+ VisitResult(const Type* type, const std::string& constexpr_value)
+ : type_(type), constexpr_value_(constexpr_value) {
+ DCHECK(type->IsConstexpr());
+ }
+ static VisitResult NeverResult();
+ VisitResult(const Type* type, StackRange stack_range)
+ : type_(type), stack_range_(stack_range) {
+ DCHECK(!type->IsConstexpr());
+ }
const Type* type() const { return type_; }
- base::Optional<const Value*> declarable() const { return declarable_; }
- std::string LValue() const;
- std::string RValue() const;
+ const std::string& constexpr_value() const { return *constexpr_value_; }
+ const StackRange& stack_range() const { return *stack_range_; }
void SetType(const Type* new_type) { type_ = new_type; }
+ bool IsOnStack() const { return stack_range_ != base::nullopt; }
+ bool operator==(const VisitResult& other) const {
+ return type_ == other.type_ && constexpr_value_ == other.constexpr_value_ &&
+ stack_range_ == other.stack_range_;
+ }
private:
const Type* type_ = nullptr;
- std::string value_;
- base::Optional<const Value*> declarable_;
+ base::Optional<std::string> constexpr_value_;
+ base::Optional<StackRange> stack_range_;
};
+VisitResult ProjectStructField(VisitResult structure,
+ const std::string& fieldname);
+
class VisitResultVector : public std::vector<VisitResult> {
public:
VisitResultVector() : std::vector<VisitResult>() {}
@@ -420,6 +433,12 @@ bool IsAssignableFrom(const Type* to, const Type* from);
bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
const std::vector<Label*>& labels);
+TypeVector LowerType(const Type* type);
+size_t LoweredSlotCount(const Type* type);
+TypeVector LowerParameterTypes(const TypeVector& parameters);
+TypeVector LowerParameterTypes(const ParameterTypes& parameter_types,
+ size_t vararg_count = 0);
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index fb3f66ab02..b39ea288e0 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -7,6 +7,7 @@
#include <iostream>
#include <string>
+#include "src/base/logging.h"
#include "src/torque/ast.h"
#include "src/torque/utils.h"
@@ -76,9 +77,89 @@ std::string CurrentPositionAsString() {
return PositionAsString(CurrentSourcePosition::Get());
}
-[[noreturn]] void ReportError(const std::string& error) {
+DEFINE_CONTEXTUAL_VARIABLE(LintErrorStatus)
+
+[[noreturn]] void ReportErrorString(const std::string& error) {
std::cerr << CurrentPositionAsString() << ": Torque error: " << error << "\n";
- std::abort();
+ v8::base::OS::Abort();
+}
+
+void LintError(const std::string& error) {
+ LintErrorStatus::SetLintError();
+ std::cerr << CurrentPositionAsString() << ": Lint error: " << error << "\n";
+}
+
+void NamingConventionError(const std::string& type, const std::string& name,
+ const std::string& convention) {
+ std::stringstream sstream;
+ sstream << type << " \"" << name << "\" doesn't follow \"" << convention
+ << "\" naming convention.";
+ LintError(sstream.str());
+}
+
+namespace {
+
+bool ContainsUnderscore(const std::string& s) {
+ if (s.empty()) return false;
+ return s.find("_") != std::string::npos;
+}
+
+bool ContainsUpperCase(const std::string& s) {
+ if (s.empty()) return false;
+ return std::any_of(s.begin(), s.end(), [](char c) { return isupper(c); });
+}
+
+// Torque has some module constants that are used like language level
+// keywords, e.g.: 'True', 'Undefined', etc.
+// These do not need to follow the default naming convention for constants.
+bool IsKeywordLikeName(const std::string& s) {
+ static const std::vector<std::string> keyword_like_constants{
+ "True", "False", "Hole", "Null", "Undefined"};
+
+ return std::find(keyword_like_constants.begin(), keyword_like_constants.end(),
+ s) != keyword_like_constants.end();
+}
+
+// Untagged/MachineTypes like 'int32', 'intptr' etc. follow a 'all-lowercase'
+// naming convention and are those exempt from the normal type convention.
+bool IsMachineType(const std::string& s) {
+ static const std::vector<std::string> machine_types{
+ "void", "never", "int32", "uint32", "int64", "intptr",
+ "uintptr", "float32", "float64", "bool", "string", "int31"};
+
+ return std::find(machine_types.begin(), machine_types.end(), s) !=
+ machine_types.end();
+}
+
+} // namespace
+
+bool IsLowerCamelCase(const std::string& s) {
+ if (s.empty()) return false;
+ return islower(s[0]) && !ContainsUnderscore(s);
+}
+
+bool IsUpperCamelCase(const std::string& s) {
+ if (s.empty()) return false;
+ return isupper(s[0]) && !ContainsUnderscore(s);
+}
+
+bool IsSnakeCase(const std::string& s) {
+ if (s.empty()) return false;
+ return !ContainsUpperCase(s);
+}
+
+bool IsValidModuleConstName(const std::string& s) {
+ if (s.empty()) return false;
+ if (IsKeywordLikeName(s)) return true;
+
+ return s[0] == 'k' && IsUpperCamelCase(s.substr(1));
+}
+
+bool IsValidTypeName(const std::string& s) {
+ if (s.empty()) return false;
+ if (IsMachineType(s)) return true;
+
+ return IsUpperCamelCase(s);
}
std::string CamelifyString(const std::string& underscore_string) {
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 0612048589..16e3b03ed4 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -10,6 +10,7 @@
#include <vector>
#include "src/base/functional.h"
+#include "src/torque/contextual.h"
namespace v8 {
namespace internal {
@@ -20,7 +21,37 @@ typedef std::vector<std::string> NameVector;
std::string StringLiteralUnquote(const std::string& s);
std::string StringLiteralQuote(const std::string& s);
-[[noreturn]] void ReportError(const std::string& error);
+class LintErrorStatus : public ContextualClass<LintErrorStatus> {
+ public:
+ LintErrorStatus() : has_lint_errors_(false) {}
+
+ static bool HasLintErrors() { return Get().has_lint_errors_; }
+ static void SetLintError() { Get().has_lint_errors_ = true; }
+
+ private:
+ bool has_lint_errors_;
+};
+
+void LintError(const std::string& error);
+
+// Prints a LintError with the format "{type} '{name}' doesn't follow
+// '{convention}' naming convention".
+void NamingConventionError(const std::string& type, const std::string& name,
+ const std::string& convention);
+
+bool IsLowerCamelCase(const std::string& s);
+bool IsUpperCamelCase(const std::string& s);
+bool IsSnakeCase(const std::string& s);
+bool IsValidModuleConstName(const std::string& s);
+bool IsValidTypeName(const std::string& s);
+
+[[noreturn]] void ReportErrorString(const std::string& error);
+template <class... Args>
+[[noreturn]] void ReportError(Args&&... args) {
+ std::stringstream s;
+ USE((s << std::forward<Args>(args))...);
+ ReportErrorString(s.str());
+}
std::string CamelifyString(const std::string& underscore_string);
std::string DashifyString(const std::string& underscore_string);
@@ -82,6 +113,149 @@ void PrintCommaSeparatedList(std::ostream& os, const T& list) {
}
}
+struct BottomOffset {
+ size_t offset;
+ BottomOffset& operator++() {
+ ++offset;
+ return *this;
+ }
+ BottomOffset operator+(size_t x) const { return BottomOffset{offset + x}; }
+ BottomOffset operator-(size_t x) const {
+ DCHECK_LE(x, offset);
+ return BottomOffset{offset - x};
+ }
+ bool operator<(const BottomOffset& other) const {
+ return offset < other.offset;
+ }
+ bool operator<=(const BottomOffset& other) const {
+ return offset <= other.offset;
+ }
+ bool operator==(const BottomOffset& other) const {
+ return offset == other.offset;
+ }
+ bool operator!=(const BottomOffset& other) const {
+ return offset != other.offset;
+ }
+};
+
+inline std::ostream& operator<<(std::ostream& out, BottomOffset from_bottom) {
+ return out << "BottomOffset{" << from_bottom.offset << "}";
+}
+
+// An iterator-style range of stack slots.
+class StackRange {
+ public:
+ StackRange(BottomOffset begin, BottomOffset end) : begin_(begin), end_(end) {
+ DCHECK_LE(begin_, end_);
+ }
+
+ bool operator==(const StackRange& other) const {
+ return begin_ == other.begin_ && end_ == other.end_;
+ }
+
+ void Extend(StackRange adjacent) {
+ DCHECK_EQ(end_, adjacent.begin_);
+ end_ = adjacent.end_;
+ }
+
+ size_t Size() const { return end_.offset - begin_.offset; }
+ BottomOffset begin() const { return begin_; }
+ BottomOffset end() const { return end_; }
+
+ private:
+ BottomOffset begin_;
+ BottomOffset end_;
+};
+
+template <class T>
+class Stack {
+ public:
+ using value_type = T;
+ Stack() = default;
+ Stack(std::initializer_list<T> initializer)
+ : Stack(std::vector<T>(initializer)) {}
+ explicit Stack(std::vector<T> v) : elements_(std::move(v)) {}
+ size_t Size() const { return elements_.size(); }
+ const T& Peek(BottomOffset from_bottom) const {
+ return elements_.at(from_bottom.offset);
+ }
+ void Poke(BottomOffset from_bottom, T x) {
+ elements_.at(from_bottom.offset) = std::move(x);
+ }
+ void Push(T x) { elements_.push_back(std::move(x)); }
+ StackRange TopRange(size_t slot_count) const {
+ DCHECK_GE(Size(), slot_count);
+ return StackRange{AboveTop() - slot_count, AboveTop()};
+ }
+ StackRange PushMany(const std::vector<T>& v) {
+ for (const T& x : v) {
+ Push(x);
+ }
+ return TopRange(v.size());
+ }
+ const T& Top() const { return Peek(AboveTop() - 1); }
+ T Pop() {
+ T result = std::move(elements_.back());
+ elements_.pop_back();
+ return result;
+ }
+ std::vector<T> PopMany(size_t count) {
+ DCHECK_GE(elements_.size(), count);
+ std::vector<T> result;
+ result.reserve(count);
+ for (auto it = elements_.end() - count; it != elements_.end(); ++it) {
+ result.push_back(std::move(*it));
+ }
+ elements_.resize(elements_.size() - count);
+ return result;
+ }
+ // The invalid offset above the top element. This is useful for StackRange.
+ BottomOffset AboveTop() const { return BottomOffset{Size()}; }
+ // Delete the slots in {range}, moving higher slots to fill the gap.
+ void DeleteRange(StackRange range) {
+ DCHECK_LE(range.end(), AboveTop());
+ for (BottomOffset i = range.begin();
+ i < std::min(range.end(), AboveTop() - range.Size()); ++i) {
+ elements_[i.offset] = std::move(elements_[i.offset + range.Size()]);
+ }
+ elements_.resize(elements_.size() - range.Size());
+ }
+
+ bool operator==(const Stack& other) const {
+ return elements_ == other.elements_;
+ }
+ bool operator!=(const Stack& other) const {
+ return elements_ != other.elements_;
+ }
+
+ T* begin() { return elements_.data(); }
+ T* end() { return begin() + elements_.size(); }
+ const T* begin() const { return elements_.data(); }
+ const T* end() const { return begin() + elements_.size(); }
+
+ private:
+ std::vector<T> elements_;
+};
+
+template <class T>
+T* CheckNotNull(T* x) {
+ CHECK_NOT_NULL(x);
+ return x;
+}
+
+class ToString {
+ public:
+ template <class T>
+ ToString& operator<<(T&& x) {
+ s_ << std::forward<T>(x);
+ return *this;
+ }
+ operator std::string() { return s_.str(); }
+
+ private:
+ std::stringstream s_;
+};
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 52b3bc3461..2f530f9279 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -50,7 +50,7 @@ enum CategoryGroupEnabledFlags {
trace_event_internal::TraceID::WithScope(scope, id)
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
- *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ TRACE_EVENT_API_LOAD_CATEGORY_GROUP_ENABLED() & \
(kEnabledForRecording_CategoryGroupEnabledFlags | \
kEnabledForEventCallback_CategoryGroupEnabledFlags)
@@ -127,6 +127,9 @@ enum CategoryGroupEnabledFlags {
#define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::Relaxed_Load(&(var))
#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
v8::base::Relaxed_Store(&(var), (value))
+#define TRACE_EVENT_API_LOAD_CATEGORY_GROUP_ENABLED() \
+ v8::base::Relaxed_Load(reinterpret_cast<const v8::base::Atomic8*>( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled)))
////////////////////////////////////////////////////////////////////////////////
@@ -175,7 +178,7 @@ enum CategoryGroupEnabledFlags {
v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, \
v8::internal::tracing::kNoId, flags, ##__VA_ARGS__); \
} \
- } while (0)
+ } while (false)
// Implementation detail: internal macro to create static category and add begin
// event if the category is enabled. Also adds the end event when the scope
@@ -227,7 +230,7 @@ enum CategoryGroupEnabledFlags {
trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
v8::internal::tracing::kNoId, trace_event_flags, ##__VA_ARGS__); \
} \
- } while (0)
+ } while (false)
// Adds a trace event with a given timestamp.
#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
@@ -240,7 +243,7 @@ enum CategoryGroupEnabledFlags {
v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, \
v8::internal::tracing::kNoId, flags, timestamp, ##__VA_ARGS__); \
} \
- } while (0)
+ } while (false)
// Adds a trace event with a given id and timestamp.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
@@ -257,7 +260,7 @@ enum CategoryGroupEnabledFlags {
v8::internal::tracing::kNoId, trace_event_flags, timestamp, \
##__VA_ARGS__); \
} \
- } while (0)
+ } while (false)
// Adds a trace event with a given id, thread_id, and timestamp. This redirects
// to INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP as we presently do not care
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 86bcd66128..072e15318b 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -19,7 +19,7 @@ namespace internal {
TransitionArray* TransitionsAccessor::transitions() {
DCHECK_EQ(kFullTransitionArray, encoding());
- return TransitionArray::cast(raw_transitions_->ToStrongHeapObject());
+ return TransitionArray::cast(raw_transitions_->GetHeapObjectAssumeStrong());
}
CAST_ACCESSOR(TransitionArray)
@@ -31,7 +31,7 @@ bool TransitionArray::HasPrototypeTransitions() {
WeakFixedArray* TransitionArray::GetPrototypeTransitions() {
DCHECK(HasPrototypeTransitions()); // Callers must check first.
Object* prototype_transitions =
- Get(kPrototypeTransitionsIndex)->ToStrongHeapObject();
+ Get(kPrototypeTransitionsIndex)->GetHeapObjectAssumeStrong();
return WeakFixedArray::cast(prototype_transitions);
}
@@ -52,12 +52,13 @@ int TransitionArray::NumberOfPrototypeTransitions(
if (proto_transitions->length() == 0) return 0;
MaybeObject* raw =
proto_transitions->Get(kProtoTransitionNumberOfEntriesOffset);
- return Smi::ToInt(raw->ToSmi());
+ return Smi::ToInt(raw->cast<Smi>());
}
Name* TransitionArray::GetKey(int transition_number) {
DCHECK(transition_number < number_of_transitions());
- return Name::cast(Get(ToKeyIndex(transition_number))->ToStrongHeapObject());
+ return Name::cast(
+ Get(ToKeyIndex(transition_number))->GetHeapObjectAssumeStrong());
}
Name* TransitionsAccessor::GetKey(int transition_number) {
@@ -67,7 +68,7 @@ Name* TransitionsAccessor::GetKey(int transition_number) {
UNREACHABLE();
return nullptr;
case kWeakRef: {
- Map* map = Map::cast(raw_transitions_->ToWeakHeapObject());
+ Map* map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
return GetSimpleTransitionKey(map);
}
case kFullTransitionArray:
@@ -100,7 +101,7 @@ PropertyDetails TransitionsAccessor::GetTargetDetails(Name* name, Map* target) {
// static
Map* TransitionsAccessor::GetTargetFromRaw(MaybeObject* raw) {
- return Map::cast(raw->ToWeakHeapObject());
+ return Map::cast(raw->GetHeapObjectAssumeWeak());
}
MaybeObject* TransitionArray::GetRawTarget(int transition_number) {
@@ -120,7 +121,7 @@ Map* TransitionsAccessor::GetTarget(int transition_number) {
UNREACHABLE();
return nullptr;
case kWeakRef:
- return Map::cast(raw_transitions_->ToWeakHeapObject());
+ return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
case kFullTransitionArray:
return transitions()->GetTarget(transition_number);
}
@@ -129,7 +130,8 @@ Map* TransitionsAccessor::GetTarget(int transition_number) {
void TransitionArray::SetRawTarget(int transition_number, MaybeObject* value) {
DCHECK(transition_number < number_of_transitions());
- DCHECK(value->IsWeakHeapObject() && value->ToWeakHeapObject()->IsMap());
+ DCHECK(value->IsWeak());
+ DCHECK(value->GetHeapObjectAssumeWeak()->IsMap());
WeakFixedArray::Set(ToTargetIndex(transition_number), value);
}
@@ -137,7 +139,7 @@ bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
Map** target) {
MaybeObject* raw = GetRawTarget(transition_number);
HeapObject* heap_object;
- if (raw->ToStrongHeapObject(&heap_object) &&
+ if (raw->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsUndefined(isolate)) {
return false;
}
@@ -153,7 +155,7 @@ int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
int TransitionArray::number_of_transitions() const {
if (length() < kFirstIndex) return 0;
- return Smi::ToInt(Get(kTransitionLengthIndex)->ToSmi());
+ return Smi::ToInt(Get(kTransitionLengthIndex)->cast<Smi>());
}
int TransitionArray::CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 2ca28d9321..6c55f53b03 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -14,12 +14,11 @@ namespace internal {
void TransitionsAccessor::Initialize() {
raw_transitions_ = map_->raw_transitions();
HeapObject* heap_object;
- if (raw_transitions_->IsSmi() ||
- raw_transitions_->IsClearedWeakHeapObject()) {
+ if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
encoding_ = kUninitialized;
- } else if (raw_transitions_->IsWeakHeapObject()) {
+ } else if (raw_transitions_->IsWeak()) {
encoding_ = kWeakRef;
- } else if (raw_transitions_->ToStrongHeapObject(&heap_object)) {
+ } else if (raw_transitions_->GetHeapObjectIfStrong(&heap_object)) {
if (heap_object->IsTransitionArray()) {
encoding_ = kFullTransitionArray;
} else {
@@ -37,7 +36,7 @@ void TransitionsAccessor::Initialize() {
Map* TransitionsAccessor::GetSimpleTransition() {
switch (encoding()) {
case kWeakRef:
- return Map::cast(raw_transitions_->ToWeakHeapObject());
+ return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
default:
return nullptr;
}
@@ -46,7 +45,7 @@ Map* TransitionsAccessor::GetSimpleTransition() {
bool TransitionsAccessor::HasSimpleTransitionTo(Map* map) {
switch (encoding()) {
case kWeakRef:
- return raw_transitions_->ToWeakHeapObject() == map;
+ return raw_transitions_->GetHeapObjectAssumeWeak() == map;
case kPrototypeInfo:
case kUninitialized:
case kFullTransitionArray:
@@ -215,7 +214,7 @@ Map* TransitionsAccessor::SearchTransition(Name* name, PropertyKind kind,
case kUninitialized:
return nullptr;
case kWeakRef: {
- Map* map = Map::cast(raw_transitions_->ToWeakHeapObject());
+ Map* map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
if (!IsMatchingMap(map, name, kind, attributes)) return nullptr;
return map;
}
@@ -268,7 +267,7 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
case kFullTransitionArray:
return Handle<String>::null();
case kWeakRef: {
- Map* target = Map::cast(raw_transitions_->ToWeakHeapObject());
+ Map* target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
PropertyDetails details = GetSimpleTargetDetails(target);
if (details.location() != kField) return Handle<String>::null();
DCHECK_EQ(kData, details.kind());
@@ -318,9 +317,9 @@ bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
int new_number_of_transitions = 0;
for (int i = 0; i < number_of_transitions; i++) {
MaybeObject* target = array->Get(header + i);
- DCHECK(target->IsClearedWeakHeapObject() ||
- (target->IsWeakHeapObject() && target->ToWeakHeapObject()->IsMap()));
- if (!target->IsClearedWeakHeapObject()) {
+ DCHECK(target->IsCleared() ||
+ (target->IsWeak() && target->GetHeapObject()->IsMap()));
+ if (!target->IsCleared()) {
if (new_number_of_transitions != i) {
array->Set(header + new_number_of_transitions, target);
}
@@ -399,9 +398,10 @@ Handle<Map> TransitionsAccessor::GetPrototypeTransition(
for (int i = 0; i < length; i++) {
MaybeObject* target =
cache->Get(TransitionArray::kProtoTransitionHeaderSize + i);
- DCHECK(target->IsClearedWeakHeapObject() || target->IsWeakHeapObject());
- if (!target->IsClearedWeakHeapObject()) {
- Map* map = Map::cast(target->ToWeakHeapObject());
+ DCHECK(target->IsWeakOrCleared());
+ HeapObject* heap_object;
+ if (target->GetHeapObjectIfWeak(&heap_object)) {
+ Map* map = Map::cast(heap_object);
if (map->prototype() == *prototype) {
return handle(map, isolate_);
}
@@ -452,9 +452,9 @@ void TransitionsAccessor::ReplaceTransitions(MaybeObject* new_transitions) {
if (encoding() == kFullTransitionArray) {
TransitionArray* old_transitions = transitions();
#if DEBUG
- CheckNewTransitionsAreConsistent(old_transitions,
- new_transitions->ToStrongHeapObject());
- DCHECK(old_transitions != new_transitions->ToStrongHeapObject());
+ CheckNewTransitionsAreConsistent(
+ old_transitions, new_transitions->GetHeapObjectAssumeStrong());
+ DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong());
#endif
// Transition arrays are not shared. When one is replaced, it should not
// keep referenced objects alive, so we zap it.
@@ -499,7 +499,8 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
case kUninitialized:
break;
case kWeakRef: {
- Map* simple_target = Map::cast(raw_transitions_->ToWeakHeapObject());
+ Map* simple_target =
+ Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
TransitionsAccessor(isolate_, simple_target, no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
break;
@@ -511,12 +512,13 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
for (int i = 0; i < length; ++i) {
int index = TransitionArray::kProtoTransitionHeaderSize + i;
MaybeObject* target = proto_trans->Get(index);
- DCHECK(target->IsClearedWeakHeapObject() ||
- target->IsWeakHeapObject());
- if (target->IsClearedWeakHeapObject()) continue;
- TransitionsAccessor(isolate_, Map::cast(target->ToWeakHeapObject()),
- no_gc)
- .TraverseTransitionTreeInternal(callback, data, no_gc);
+ HeapObject* heap_object;
+ if (target->GetHeapObjectIfWeak(&heap_object)) {
+ TransitionsAccessor(isolate_, Map::cast(heap_object), no_gc)
+ .TraverseTransitionTreeInternal(callback, data, no_gc);
+ } else {
+ DCHECK(target->IsCleared());
+ }
}
}
for (int i = 0; i < transitions()->number_of_transitions(); ++i) {
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index c25de9c1e9..0e9dbf248c 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -23,9 +23,9 @@ namespace trap_handler {
// TODO(eholk): Support trap handlers on other platforms.
#if V8_TARGET_ARCH_X64 && V8_OS_LINUX && !V8_OS_ANDROID
-#define V8_TRAP_HANDLER_SUPPORTED 1
+#define V8_TRAP_HANDLER_SUPPORTED true
#else
-#define V8_TRAP_HANDLER_SUPPORTED 0
+#define V8_TRAP_HANDLER_SUPPORTED false
#endif
struct ProtectedInstructionData {
@@ -100,12 +100,6 @@ inline void ClearThreadInWasm() {
}
}
-class ThreadInWasmScope {
- public:
- ThreadInWasmScope() { SetThreadInWasm(); }
- ~ThreadInWasmScope() { ClearThreadInWasm(); }
-};
-
bool RegisterDefaultTrapHandler();
V8_EXPORT_PRIVATE void RestoreOriginalSignalHandler();
diff --git a/deps/v8/src/turbo-assembler.cc b/deps/v8/src/turbo-assembler.cc
index d6134806fa..4bb09047bb 100644
--- a/deps/v8/src/turbo-assembler.cc
+++ b/deps/v8/src/turbo-assembler.cc
@@ -32,7 +32,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
// check if any of the fast paths can be applied.
int builtin_index;
- Heap::RootListIndex root_index;
+ RootIndex root_index;
if (isolate()->heap()->IsRootHandle(object, &root_index)) {
// Roots are loaded relative to the root register.
LoadRoot(destination, root_index);
@@ -84,8 +84,9 @@ void TurboAssemblerBase::IndirectLoadExternalReference(
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffset(Heap::RootListIndex root_index) {
- return (root_index << kPointerSizeLog2) - kRootRegisterBias;
+int32_t TurboAssemblerBase::RootRegisterOffset(RootIndex root_index) {
+ return (static_cast<int32_t>(root_index) << kPointerSizeLog2) -
+ kRootRegisterBias;
}
// static
@@ -105,10 +106,8 @@ intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
// static
bool TurboAssemblerBase::IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference) {
- Address start = reinterpret_cast<Address>(isolate);
- Address end = isolate->heap()->root_register_addressable_end();
Address address = reference.address();
- return start <= address && address < end;
+ return isolate->root_register_addressable_region().contains(address);
}
// static
diff --git a/deps/v8/src/turbo-assembler.h b/deps/v8/src/turbo-assembler.h
index 44fbbca64c..70048962dd 100644
--- a/deps/v8/src/turbo-assembler.h
+++ b/deps/v8/src/turbo-assembler.h
@@ -16,7 +16,10 @@ namespace internal {
// platform-independent bits.
class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
public:
- Isolate* isolate() const { return isolate_; }
+ Isolate* isolate() const {
+ DCHECK(!options().v8_agnostic_code);
+ return isolate_;
+ }
Handle<HeapObject> CodeObject() const {
DCHECK(!code_object_.is_null());
@@ -49,9 +52,9 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
intptr_t offset) = 0;
virtual void LoadRootRelative(Register destination, int32_t offset) = 0;
- virtual void LoadRoot(Register destination, Heap::RootListIndex index) = 0;
+ virtual void LoadRoot(Register destination, RootIndex index) = 0;
- static int32_t RootRegisterOffset(Heap::RootListIndex root_index);
+ static int32_t RootRegisterOffset(RootIndex root_index);
static int32_t RootRegisterOffsetForExternalReferenceIndex(
int reference_index);
@@ -61,11 +64,16 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
Isolate* isolate, const ExternalReference& reference);
// An address is addressable through kRootRegister if it is located within
- // [isolate, roots_ + root_register_addressable_end_offset[.
+ // isolate->root_register_addressable_region().
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
protected:
+ TurboAssemblerBase(const AssemblerOptions& options, void* buffer,
+ int buffer_size)
+ : TurboAssemblerBase(nullptr, options.EnableV8AgnosticCode(), buffer,
+ buffer_size, CodeObjectRequired::kNo) {}
+
TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object);
@@ -97,7 +105,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
// Avoids emitting calls to the {Builtins::kAbort} builtin when emitting debug
// code during the lifetime of this scope object. For disabling debug code
// entirely use the {DontEmitDebugCodeScope} instead.
-class HardAbortScope BASE_EMBEDDED {
+class HardAbortScope {
public:
explicit HardAbortScope(TurboAssemblerBase* assembler)
: assembler_(assembler), old_value_(assembler->should_abort_hard()) {
diff --git a/deps/v8/src/unicode-cache.h b/deps/v8/src/unicode-cache.h
index 8f4badae8f..ddc81b738c 100644
--- a/deps/v8/src/unicode-cache.h
+++ b/deps/v8/src/unicode-cache.h
@@ -16,7 +16,7 @@ namespace internal {
// Caching predicates used by scanners.
class UnicodeCache {
public:
- UnicodeCache() {}
+ UnicodeCache() = default;
typedef unibrow::Utf8Decoder<512> Utf8Decoder;
StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
index ab69d0d390..c87e192ad0 100644
--- a/deps/v8/src/unicode-decoder.h
+++ b/deps/v8/src/unicode-decoder.h
@@ -81,7 +81,7 @@ class V8_EXPORT_PRIVATE Utf8DecoderBase {
template <size_t kBufferSize>
class Utf8Decoder : public Utf8DecoderBase {
public:
- inline Utf8Decoder() {}
+ inline Utf8Decoder() = default;
explicit inline Utf8Decoder(const v8::internal::Vector<const char>& stream);
inline void Reset(const v8::internal::Vector<const char>& stream);
inline size_t WriteUtf16(
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index dddf22c4c6..68e69324f9 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -28,7 +28,7 @@ const int kMaxMappingSize = 4;
template <class T, int size = 256>
class Predicate {
public:
- inline Predicate() { }
+ inline Predicate() = default;
inline bool get(uchar c);
private:
@@ -68,7 +68,7 @@ class Predicate {
template <class T, int size = 256>
class Mapping {
public:
- inline Mapping() { }
+ inline Mapping() = default;
inline int get(uchar c, uchar n, uchar* result);
private:
friend class Test;
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 052664f87f..e799e9ad85 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -345,7 +345,7 @@ static void MemMoveWrapper(void* dest, const void* src, size_t size) {
static MemMoveFunction memmove_function = &MemMoveWrapper;
// Defined in codegen-ia32.cc.
-MemMoveFunction CreateMemMoveFunction(Isolate* isolate);
+MemMoveFunction CreateMemMoveFunction();
// Copy memory area to disjoint memory area.
void MemMove(void* dest, const void* src, size_t size) {
@@ -369,46 +369,40 @@ V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
&MemCopyUint16Uint8Wrapper;
// Defined in codegen-arm.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
- MemCopyUint8Function stub);
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- Isolate* isolate, MemCopyUint16Uint8Function stub);
+ MemCopyUint16Uint8Function stub);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
&MemCopyUint8Wrapper;
// Defined in codegen-mips.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
- MemCopyUint8Function stub);
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
#endif
static bool g_memcopy_functions_initialized = false;
-void init_memcopy_functions(Isolate* isolate) {
+void init_memcopy_functions() {
if (g_memcopy_functions_initialized) return;
g_memcopy_functions_initialized = true;
#if V8_TARGET_ARCH_IA32
- MemMoveFunction generated_memmove = CreateMemMoveFunction(isolate);
+ MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != nullptr) {
memmove_function = generated_memmove;
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
- memcopy_uint8_function =
- CreateMemCopyUint8Function(isolate, &MemCopyUint8Wrapper);
+ memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
memcopy_uint16_uint8_function =
- CreateMemCopyUint16Uint8Function(isolate, &MemCopyUint16Uint8Wrapper);
+ CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
- memcopy_uint8_function =
- CreateMemCopyUint8Function(isolate, &MemCopyUint8Wrapper);
+ memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
#endif
}
-
+// Returns false iff d is NaN, +0, or -0.
bool DoubleToBoolean(double d) {
- // NaN, +0, and -0 should return the false object
IeeeDoubleArchType u;
-
u.d = d;
if (u.bits.exp == 2047) {
// Detect NaN for IEEE double precision floating point.
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index f4669524a7..90a1227a2b 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -1,3 +1,4 @@
+
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -56,6 +57,20 @@ inline bool CStringEquals(const char* s1, const char* s2) {
return (s1 == s2) || (s1 != nullptr && s2 != nullptr && strcmp(s1, s2) == 0);
}
+// Checks if value is in range [lower_limit, higher_limit] using a single
+// branch.
+template <typename T, typename U>
+inline bool IsInRange(T value, U lower_limit, U higher_limit) {
+ DCHECK_LE(lower_limit, higher_limit);
+ STATIC_ASSERT(sizeof(U) <= sizeof(T));
+ typedef typename std::make_unsigned<T>::type unsigned_T;
+ // Use static_cast to support enum classes.
+ return static_cast<unsigned_T>(static_cast<unsigned_T>(value) -
+ static_cast<unsigned_T>(lower_limit)) <=
+ static_cast<unsigned_T>(static_cast<unsigned_T>(higher_limit) -
+ static_cast<unsigned_T>(lower_limit));
+}
+
// X must be a power of 2. Returns the number of trailing zeros.
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type>
@@ -152,13 +167,11 @@ inline bool IsAligned(T value, U alignment) {
return (value & (alignment - 1)) == 0;
}
-
-// Returns true if (addr + offset) is aligned.
+// Returns true if {addr + offset} is aligned.
inline bool IsAddressAligned(Address addr,
intptr_t alignment,
int offset = 0) {
- intptr_t offs = OffsetFrom(addr + offset);
- return IsAligned(offs, alignment);
+ return IsAligned(addr + offset, alignment);
}
@@ -516,7 +529,7 @@ inline uint32_t ComputeAddressHash(Address address) {
// Generated memcpy/memmove
// Initializes the codegen support that depends on CPU features.
-void init_memcopy_functions(Isolate* isolate);
+void init_memcopy_functions();
#if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely
@@ -1609,108 +1622,6 @@ static inline V ByteReverse(V value) {
}
}
-// Represents a linked list that threads through the nodes in the linked list.
-// Entries in the list are pointers to nodes. The nodes need to have a T**
-// next() method that returns the location where the next value is stored.
-template <typename T>
-class ThreadedList final {
- public:
- ThreadedList() : head_(nullptr), tail_(&head_) {}
- void Add(T* v) {
- DCHECK_NULL(*tail_);
- DCHECK_NULL(*v->next());
- *tail_ = v;
- tail_ = v->next();
- }
-
- void Clear() {
- head_ = nullptr;
- tail_ = &head_;
- }
-
- class Iterator final {
- public:
- Iterator& operator++() {
- entry_ = (*entry_)->next();
- return *this;
- }
- bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
- T* operator*() { return *entry_; }
- T* operator->() { return *entry_; }
- Iterator& operator=(T* entry) {
- T* next = *(*entry_)->next();
- *entry->next() = next;
- *entry_ = entry;
- return *this;
- }
-
- private:
- explicit Iterator(T** entry) : entry_(entry) {}
-
- T** entry_;
-
- friend class ThreadedList;
- };
-
- class ConstIterator final {
- public:
- ConstIterator& operator++() {
- entry_ = (*entry_)->next();
- return *this;
- }
- bool operator!=(const ConstIterator& other) {
- return entry_ != other.entry_;
- }
- const T* operator*() const { return *entry_; }
-
- private:
- explicit ConstIterator(T* const* entry) : entry_(entry) {}
-
- T* const* entry_;
-
- friend class ThreadedList;
- };
-
- Iterator begin() { return Iterator(&head_); }
- Iterator end() { return Iterator(tail_); }
-
- ConstIterator begin() const { return ConstIterator(&head_); }
- ConstIterator end() const { return ConstIterator(tail_); }
-
- void Rewind(Iterator reset_point) {
- tail_ = reset_point.entry_;
- *tail_ = nullptr;
- }
-
- void MoveTail(ThreadedList<T>* parent, Iterator location) {
- if (parent->end() != location) {
- DCHECK_NULL(*tail_);
- *tail_ = *location;
- tail_ = parent->tail_;
- parent->Rewind(location);
- }
- }
-
- bool is_empty() const { return head_ == nullptr; }
-
- // Slow. For testing purposes.
- int LengthForTest() {
- int result = 0;
- for (Iterator t = begin(); t != end(); ++t) ++result;
- return result;
- }
- T* AtForTest(int i) {
- Iterator t = begin();
- while (i-- > 0) ++t;
- return *t;
- }
-
- private:
- T* head_;
- T** tail_;
- DISALLOW_COPY_AND_ASSIGN(ThreadedList);
-};
-
V8_EXPORT_PRIVATE bool PassesFilter(Vector<const char> name,
Vector<const char> filter);
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 4d152d4d4e..98a807963c 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -4,6 +4,8 @@
#include "src/v8.h"
+#include <fstream>
+
#include "src/api.h"
#include "src/base/atomicops.h"
#include "src/base/once.h"
@@ -72,6 +74,12 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_semi_space_size = 1;
}
+ if (FLAG_trace_turbo) {
+ // Create an empty file shared by the process (e.g. the wasm engine).
+ std::ofstream(Isolate::GetTurboCfgFileName(nullptr).c_str(),
+ std::ios_base::trunc);
+ }
+
base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 7fde0c9ec4..ac32b7465e 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -59,7 +59,7 @@ class ThreadVisitor {
virtual void VisitThread(Isolate* isolate, ThreadLocalTop* top) = 0;
protected:
- virtual ~ThreadVisitor() {}
+ virtual ~ThreadVisitor() = default;
};
class ThreadManager {
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 0633d19a2a..3d80634c97 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -517,14 +517,12 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
- case WASM_MODULE_TYPE: {
- auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
- if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) {
+ case WASM_MODULE_TYPE:
+ if (!FLAG_wasm_disable_structured_cloning) {
// Only write WebAssembly modules if not disabled by a flag.
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
}
break;
- }
case WASM_MEMORY_TYPE: {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if (enabled_features.threads) {
@@ -836,7 +834,7 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
ThrowDataCloneError(MessageTemplate::kDataCloneErrorNeuteredArrayBuffer);
return Nothing<bool>();
}
- double byte_length = array_buffer->byte_length()->Number();
+ double byte_length = array_buffer->byte_length();
if (byte_length > std::numeric_limits<uint32_t>::max()) {
ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
return Nothing<bool>();
@@ -867,8 +865,8 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
tag = ArrayBufferViewTag::kDataView;
}
WriteVarint(static_cast<uint8_t>(tag));
- WriteVarint(NumberToUint32(view->byte_offset()));
- WriteVarint(NumberToUint32(view->byte_length()));
+ WriteVarint(static_cast<uint32_t>(view->byte_offset()));
+ WriteVarint(static_cast<uint32_t>(view->byte_length()));
return ThrowIfOutOfMemory();
}
@@ -1149,6 +1147,7 @@ void ValueDeserializer::TransferArrayBuffer(
}
MaybeHandle<Object> ValueDeserializer::ReadObject() {
+ DisallowJavascriptExecution no_js(isolate_);
MaybeHandle<Object> result = ReadObjectInternal();
// ArrayBufferView is special in that it consumes the value before it, even
@@ -1275,7 +1274,6 @@ MaybeHandle<String> ValueDeserializer::ReadString() {
}
MaybeHandle<BigInt> ValueDeserializer::ReadBigInt() {
- if (!FLAG_harmony_bigint) return MaybeHandle<BigInt>();
uint32_t bitfield;
if (!ReadVarint<uint32_t>().To(&bitfield)) return MaybeHandle<BigInt>();
int bytelength = BigInt::DigitsByteLengthForBitfield(bitfield);
@@ -1473,6 +1471,11 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
// hole. Past version 11, undefined means undefined.
if (version_ < 11 && element->IsUndefined(isolate_)) continue;
+ // Make sure elements is still large enough.
+ if (i >= static_cast<uint32_t>(elements->length())) {
+ return MaybeHandle<JSArray>();
+ }
+
elements->set(i, *element);
}
@@ -1594,8 +1597,12 @@ MaybeHandle<JSMap> ValueDeserializer::ReadJSMap() {
}
Handle<Object> argv[2];
- if (!ReadObject().ToHandle(&argv[0]) || !ReadObject().ToHandle(&argv[1]) ||
- Execution::Call(isolate_, map_set, map, arraysize(argv), argv)
+ if (!ReadObject().ToHandle(&argv[0]) || !ReadObject().ToHandle(&argv[1])) {
+ return MaybeHandle<JSMap>();
+ }
+
+ AllowJavascriptExecution allow_js(isolate_);
+ if (Execution::Call(isolate_, map_set, map, arraysize(argv), argv)
.is_null()) {
return MaybeHandle<JSMap>();
}
@@ -1630,8 +1637,10 @@ MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
}
Handle<Object> argv[1];
- if (!ReadObject().ToHandle(&argv[0]) ||
- Execution::Call(isolate_, set_add, set, arraysize(argv), argv)
+ if (!ReadObject().ToHandle(&argv[0])) return MaybeHandle<JSSet>();
+
+ AllowJavascriptExecution allow_js(isolate_);
+ if (Execution::Call(isolate_, set_add, set, arraysize(argv), argv)
.is_null()) {
return MaybeHandle<JSSet>();
}
@@ -1704,7 +1713,7 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer() {
MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) {
- uint32_t buffer_byte_length = NumberToUint32(buffer->byte_length());
+ uint32_t buffer_byte_length = static_cast<uint32_t>(buffer->byte_length());
uint8_t tag = 0;
uint32_t byte_offset = 0;
uint32_t byte_length = 0;
@@ -1719,15 +1728,6 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
ExternalArrayType external_array_type = kExternalInt8Array;
unsigned element_size = 0;
- if (!FLAG_harmony_bigint) {
- // Refuse to construct BigInt64Arrays unless the flag is on.
- ArrayBufferViewTag cast_tag = static_cast<ArrayBufferViewTag>(tag);
- if (cast_tag == ArrayBufferViewTag::kBigInt64Array ||
- cast_tag == ArrayBufferViewTag::kBigUint64Array) {
- return MaybeHandle<JSArrayBufferView>();
- }
- }
-
switch (static_cast<ArrayBufferViewTag>(tag)) {
case ArrayBufferViewTag::kDataView: {
Handle<JSDataView> data_view =
@@ -1755,9 +1755,7 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
}
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
- auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
- if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
- expect_inline_wasm()) {
+ if (FLAG_wasm_disable_structured_cloning || expect_inline_wasm()) {
return MaybeHandle<JSObject>();
}
@@ -1779,9 +1777,7 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
}
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
- auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
- if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
- !expect_inline_wasm()) {
+ if (FLAG_wasm_disable_structured_cloning || !expect_inline_wasm()) {
return MaybeHandle<JSObject>();
}
@@ -1988,6 +1984,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate_, object, key, &success, LookupIterator::OWN);
+ CHECK_EQ(LookupIterator::NOT_FOUND, it.state());
if (!success ||
JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
.is_null()) {
@@ -2022,6 +2019,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate_, object, key, &success, LookupIterator::OWN);
+ CHECK_EQ(LookupIterator::NOT_FOUND, it.state());
if (!success ||
JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
.is_null()) {
@@ -2069,6 +2067,7 @@ static Maybe<bool> SetPropertiesFromKeyValuePairs(Isolate* isolate,
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, key, &success, LookupIterator::OWN);
+ CHECK_EQ(LookupIterator::NOT_FOUND, it.state());
if (!success ||
JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
.is_null()) {
diff --git a/deps/v8/src/vector-slot-pair.cc b/deps/v8/src/vector-slot-pair.cc
index e639a9037e..9a1d13c697 100644
--- a/deps/v8/src/vector-slot-pair.cc
+++ b/deps/v8/src/vector-slot-pair.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-VectorSlotPair::VectorSlotPair() {}
+VectorSlotPair::VectorSlotPair() = default;
int VectorSlotPair::index() const {
return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
@@ -17,22 +17,24 @@ int VectorSlotPair::index() const {
bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
return lhs.slot() == rhs.slot() &&
- lhs.vector().location() == rhs.vector().location();
+ lhs.vector().location() == rhs.vector().location() &&
+ lhs.ic_state() == rhs.ic_state();
}
bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
return !(lhs == rhs);
}
-std::ostream& operator<<(std::ostream& os, const VectorSlotPair& pair) {
- if (pair.IsValid()) {
- return os << "VectorSlotPair(" << pair.slot() << ")";
+std::ostream& operator<<(std::ostream& os, const VectorSlotPair& p) {
+ if (p.IsValid()) {
+ return os << "VectorSlotPair(" << p.slot() << ", "
+ << InlineCacheState2String(p.ic_state()) << ")";
}
return os << "VectorSlotPair(INVALID)";
}
size_t hash_value(VectorSlotPair const& p) {
- return base::hash_combine(p.slot(), p.vector().location());
+ return base::hash_combine(p.slot(), p.vector().location(), p.ic_state());
}
} // namespace internal
diff --git a/deps/v8/src/vector-slot-pair.h b/deps/v8/src/vector-slot-pair.h
index cd9434c630..cb99d06112 100644
--- a/deps/v8/src/vector-slot-pair.h
+++ b/deps/v8/src/vector-slot-pair.h
@@ -19,25 +19,29 @@ class FeedbackVector;
class V8_EXPORT_PRIVATE VectorSlotPair {
public:
VectorSlotPair();
- VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {}
+ VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot,
+ InlineCacheState ic_state)
+ : vector_(vector), slot_(slot), ic_state_(ic_state) {}
bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
Handle<FeedbackVector> vector() const { return vector_; }
FeedbackSlot slot() const { return slot_; }
+ InlineCacheState ic_state() const { return ic_state_; }
int index() const;
private:
Handle<FeedbackVector> vector_;
FeedbackSlot slot_;
+ InlineCacheState ic_state_ = UNINITIALIZED;
};
bool operator==(VectorSlotPair const&, VectorSlotPair const&);
bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
-std::ostream& operator<<(std::ostream& os, const VectorSlotPair& pair);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ VectorSlotPair const&);
size_t hash_value(VectorSlotPair const&);
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/visitors.h
index 9098d8b471..ebe58c3e75 100644
--- a/deps/v8/src/visitors.h
+++ b/deps/v8/src/visitors.h
@@ -54,9 +54,9 @@ enum class Root {
// Abstract base class for visiting, and optionally modifying, the
// pointers contained in roots. Used in GC and serialization/deserialization.
-class RootVisitor BASE_EMBEDDED {
+class RootVisitor {
public:
- virtual ~RootVisitor() {}
+ virtual ~RootVisitor() = default;
// Visits a contiguous arrays of pointers in the half-open range
// [start, end). Any or all of the values may be modified on return.
@@ -82,9 +82,9 @@ class RelocIterator;
// Abstract base class for visiting, and optionally modifying, the
// pointers contained in Objects. Used in GC and serialization/deserialization.
-class ObjectVisitor BASE_EMBEDDED {
+class ObjectVisitor {
public:
- virtual ~ObjectVisitor() {}
+ virtual ~ObjectVisitor() = default;
// Visits a contiguous arrays of pointers in the half-open range
// [start, end). Any or all of the values may be modified on return.
@@ -93,6 +93,15 @@ class ObjectVisitor BASE_EMBEDDED {
virtual void VisitPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end) = 0;
+ // Custom weak pointers must be ignored by the GC but not other
+ // visitors. They're used for e.g., lists that are recreated after GC. The
+ // default implementation treats them as strong pointers. Visitors who want to
+ // ignore them must override this function with empty.
+ virtual void VisitCustomWeakPointers(HeapObject* host, Object** start,
+ Object** end) {
+ VisitPointers(host, start, end);
+ }
+
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(HeapObject* host, Object** p) {
VisitPointers(host, p, p + 1);
@@ -100,6 +109,9 @@ class ObjectVisitor BASE_EMBEDDED {
virtual void VisitPointer(HeapObject* host, MaybeObject** p) {
VisitPointers(host, p, p + 1);
}
+ virtual void VisitCustomWeakPointer(HeapObject* host, Object** p) {
+ VisitCustomWeakPointers(host, p, p + 1);
+ }
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects ...
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index cf74c63870..a9bd08b6cd 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -17,7 +17,7 @@ namespace internal {
// VMState object leaves a state by popping the current state from the
// stack.
template <StateTag Tag>
-class VMState BASE_EMBEDDED {
+class VMState {
public:
explicit inline VMState(Isolate* isolate);
inline ~VMState();
@@ -27,8 +27,7 @@ class VMState BASE_EMBEDDED {
StateTag previous_tag_;
};
-
-class ExternalCallbackScope BASE_EMBEDDED {
+class ExternalCallbackScope {
public:
inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 725bed590f..24c6d90ec6 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -161,6 +161,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f32_div)
UNIMPLEMENTED_FP_BINOP(f32_min)
UNIMPLEMENTED_FP_BINOP(f32_max)
+UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
UNIMPLEMENTED_FP_UNOP(f32_ceil)
@@ -174,6 +175,7 @@ UNIMPLEMENTED_FP_BINOP(f64_mul)
UNIMPLEMENTED_FP_BINOP(f64_div)
UNIMPLEMENTED_FP_BINOP(f64_min)
UNIMPLEMENTED_FP_BINOP(f64_max)
+UNIMPLEMENTED_FP_BINOP(f64_copysign)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
@@ -212,6 +214,10 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
+ BAILOUT("i32_shr");
+}
+
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
@@ -237,6 +243,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return false;
}
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
+ int amount) {
+ BAILOUT("i64_shr");
+}
+
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
// This is a nop on arm.
}
@@ -248,6 +259,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i32");
+}
+
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index cdc2dc2a45..c73a60fd7d 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -391,11 +391,24 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
Register amount, LiftoffRegList pinned) { \
instruction(dst.W(), src.W(), amount.W()); \
}
+#define I32_SHIFTOP_I(name, instruction) \
+ I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, int amount) { \
+ DCHECK(is_uint5(amount)); \
+ instruction(dst.W(), src.W(), amount); \
+ }
#define I64_SHIFTOP(name, instruction) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount, LiftoffRegList pinned) { \
instruction(dst.gp().X(), src.gp().X(), amount.X()); \
}
+#define I64_SHIFTOP_I(name, instruction) \
+ I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
+ int amount) { \
+ DCHECK(is_uint6(amount)); \
+ instruction(dst.gp().X(), src.gp().X(), amount); \
+ }
I32_BINOP(i32_add, Add)
I32_BINOP(i32_sub, Sub)
@@ -405,7 +418,7 @@ I32_BINOP(i32_or, Orr)
I32_BINOP(i32_xor, Eor)
I32_SHIFTOP(i32_shl, Lsl)
I32_SHIFTOP(i32_sar, Asr)
-I32_SHIFTOP(i32_shr, Lsr)
+I32_SHIFTOP_I(i32_shr, Lsr)
I64_BINOP(i64_add, Add)
I64_BINOP(i64_sub, Sub)
I64_BINOP(i64_mul, Mul)
@@ -414,7 +427,7 @@ I64_BINOP(i64_or, Orr)
I64_BINOP(i64_xor, Eor)
I64_SHIFTOP(i64_shl, Lsl)
I64_SHIFTOP(i64_sar, Asr)
-I64_SHIFTOP(i64_shr, Lsr)
+I64_SHIFTOP_I(i64_shr, Lsr)
FP32_BINOP(f32_add, Fadd)
FP32_BINOP(f32_sub, Fsub)
FP32_BINOP(f32_mul, Fmul)
@@ -450,7 +463,9 @@ FP64_UNOP(f64_sqrt, Fsqrt)
#undef FP64_UNOP
#undef FP64_UNOP_RETURN_TRUE
#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
Clz(dst.W(), src.W());
@@ -611,6 +626,16 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
Sxtw(dst, src);
}
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("f64_copysign");
+}
+
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
@@ -749,6 +774,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ sxtb(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ sxth(dst, src);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ sxtb(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ sxth(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ sxtw(dst.gp(), src.gp());
+}
+
void LiftoffAssembler::emit_jump(Label* label) { B(label); }
void LiftoffAssembler::emit_jump(Register target) { Br(target); }
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 1fef62542a..3a0ace0d62 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -121,6 +121,11 @@ inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
}
}
+inline void SignExtendI32ToI64(Assembler* assm, LiftoffRegister reg) {
+ assm->mov(reg.high_gp(), reg.low_gp());
+ assm->sar(reg.high_gp(), 31);
+}
+
constexpr DoubleRegister kScratchDoubleReg = xmm7;
constexpr int kSubSpSize = 6; // 6 bytes for "sub esp, <imm32>"
@@ -247,8 +252,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI64Load8S:
movsx_b(dst.low_gp(), src_op);
- mov(dst.high_gp(), dst.low_gp());
- sar(dst.high_gp(), 31);
+ liftoff::SignExtendI32ToI64(this, dst);
break;
case LoadType::kI32Load16U:
movzx_w(dst.gp(), src_op);
@@ -262,8 +266,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI64Load16S:
movsx_w(dst.low_gp(), src_op);
- mov(dst.high_gp(), dst.low_gp());
- sar(dst.high_gp(), 31);
+ liftoff::SignExtendI32ToI64(this, dst);
break;
case LoadType::kI32Load:
mov(dst.gp(), src_op);
@@ -274,8 +277,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI64Load32S:
mov(dst.low_gp(), src_op);
- mov(dst.high_gp(), dst.low_gp());
- sar(dst.high_gp(), 31);
+ liftoff::SignExtendI32ToI64(this, dst);
break;
case LoadType::kI64Load: {
// Compute the operand for the load of the upper half.
@@ -664,6 +666,12 @@ void LiftoffAssembler::emit_i32_shr(Register dst, Register src, Register amount,
pinned);
}
+void LiftoffAssembler::emit_i32_shr(Register dst, Register src, int amount) {
+ if (dst != src) mov(dst, src);
+ DCHECK(is_uint5(amount));
+ shr(dst, amount);
+}
+
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
Label nonzero_input;
Label continuation;
@@ -879,6 +887,13 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair_cl, pinned);
}
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ int amount) {
+ if (dst != src) Move(dst, src, kWasmI64);
+ DCHECK(is_uint6(amount));
+ ShrPair(dst.high_gp(), dst.low_gp(), amount);
+}
+
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
// This is a nop on ia32.
}
@@ -1012,6 +1027,20 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
liftoff::MinOrMax::kMax);
}
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ static constexpr int kF32SignBit = 1 << 31;
+ Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+ Movd(scratch, lhs); // move {lhs} into {scratch}.
+ and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
+ Movd(scratch2, rhs); // move {rhs} into {scratch2}.
+ and_(scratch2, Immediate(kF32SignBit)); // isolate sign bit in {scratch2}.
+ or_(scratch, scratch2); // combine {scratch2} into {scratch}.
+ Movd(dst, scratch); // move result into {dst}.
+}
+
void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
@@ -1121,6 +1150,24 @@ void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
liftoff::MinOrMax::kMin);
}
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ static constexpr int kF32SignBit = 1 << 31;
+ // On ia32, we cannot hold the whole f64 value in a gp register, so we just
+ // operate on the upper half (UH).
+ Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+
+ Pextrd(scratch, lhs, 1); // move UH of {lhs} into {scratch}.
+ and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
+ Pextrd(scratch2, rhs, 1); // move UH of {rhs} into {scratch2}.
+ and_(scratch2, Immediate(kF32SignBit)); // isolate sign bit in {scratch2}.
+ or_(scratch, scratch2); // combine {scratch2} into {scratch}.
+ movsd(dst, lhs); // move {lhs} into {dst}.
+ Pinsrd(dst, scratch, 1); // insert {scratch} into UH of {dst}.
+}
+
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
liftoff::EmitFloatMinOrMax<double>(this, dst, lhs, rhs,
@@ -1266,7 +1313,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
case kExprI64SConvertI32:
if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
- mov(dst.high_gp(), src.gp());
+ if (dst.high_gp() != src.gp()) mov(dst.high_gp(), src.gp());
sar(dst.high_gp(), 31);
return true;
case kExprI64UConvertI32:
@@ -1318,6 +1365,32 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ movsx_b(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ movsx_w(dst, src);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ movsx_b(dst.low_gp(), src.low_gp());
+ liftoff::SignExtendI32ToI64(this, dst);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ movsx_w(dst.low_gp(), src.low_gp());
+ liftoff::SignExtendI32ToI64(this, dst);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.low_gp() != src.low_gp()) mov(dst.low_gp(), src.low_gp());
+ liftoff::SignExtendI32ToI64(this, dst);
+}
+
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 1d604925cc..63cc7344b3 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -286,7 +286,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
auto& dst = stack_state[dst_idx];
auto& src = source.stack_state[src_idx];
// Just initialize to any register; will be overwritten before use.
- LiftoffRegister reg(Register::from_code<0>());
+ LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
if (src.is_reg() && is_free(src.reg())) {
reg = src.reg();
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index cfc412d671..673aa4125f 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -248,7 +248,7 @@ class LiftoffAssembler : public TurboAssembler {
};
LiftoffAssembler();
- ~LiftoffAssembler();
+ ~LiftoffAssembler() override;
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
@@ -399,6 +399,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegList pinned = {});
inline void emit_i32_shr(Register dst, Register src, Register amount,
LiftoffRegList pinned = {});
+ inline void emit_i32_shr(Register dst, Register src, int amount);
// i32 unops.
inline bool emit_i32_clz(Register dst, Register src);
@@ -433,6 +434,8 @@ class LiftoffAssembler : public TurboAssembler {
Register amount, LiftoffRegList pinned = {});
inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned = {});
+ inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ int amount);
inline void emit_i32_to_intptr(Register dst, Register src);
@@ -452,6 +455,21 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_sub(dst, lhs, rhs);
}
}
+ inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
+ if (kPointerSize == 8) {
+ emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
+ LiftoffRegister(rhs));
+ } else {
+ emit_i32_and(dst, lhs, rhs);
+ }
+ }
+ inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
+ if (kPointerSize == 8) {
+ emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
+ } else {
+ emit_i32_shr(dst, src, amount);
+ }
+ }
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
@@ -466,6 +484,8 @@ class LiftoffAssembler : public TurboAssembler {
DoubleRegister rhs);
inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
+ inline void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
// f32 unops.
inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
@@ -489,6 +509,8 @@ class LiftoffAssembler : public TurboAssembler {
DoubleRegister rhs);
inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
+ inline void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
// f64 unops.
inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
@@ -499,10 +521,15 @@ class LiftoffAssembler : public TurboAssembler {
inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
- // type conversions.
inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
LiftoffRegister src, Label* trap = nullptr);
+ inline void emit_i32_signextend_i8(Register dst, Register src);
+ inline void emit_i32_signextend_i16(Register dst, Register src);
+ inline void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src);
+
inline void emit_jump(Label*);
inline void emit_jump(Register);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index dbd106d481..d77e7cde4a 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -16,6 +16,7 @@
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/object-access.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -39,11 +40,23 @@ namespace {
} while (false)
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
- (WasmInstanceObject::k##name##Offset - kHeapObjectTag)
+ ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
-#define LOAD_INSTANCE_FIELD(dst, name, type) \
- __ LoadFromInstance(dst.gp(), WASM_INSTANCE_OBJECT_OFFSET(name), \
- LoadType(type).size());
+template <int expected_size, int actual_size>
+struct assert_field_size {
+ static_assert(expected_size == actual_size,
+ "field in WasmInstance does not have the expected size");
+ static constexpr int size = actual_size;
+};
+
+#define WASM_INSTANCE_OBJECT_SIZE(name) \
+ (WasmInstanceObject::k##name##OffsetEnd - \
+ WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent)
+
+#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
+ __ LoadFromInstance( \
+ dst.gp(), WASM_INSTANCE_OBJECT_OFFSET(name), \
+ assert_field_size<WASM_INSTANCE_OBJECT_SIZE(name), load_size>::size);
#ifdef DEBUG
#define DEBUG_CODE_COMMENT(str) \
@@ -95,8 +108,6 @@ constexpr Vector<const ValueType> kTypes_ilfd = ArrayVector(kTypesArr_ilfd);
class LiftoffCompiler {
public:
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
-
// TODO(clemensh): Make this a template parameter.
static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
@@ -253,14 +264,14 @@ class LiftoffCompiler {
uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
const int num_lowered_params = 1 + needs_reg_pair(type);
// Initialize to anything, will be set in the loop and used afterwards.
- LiftoffRegister reg = LiftoffRegister::from_code(kGpReg, 0);
+ LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
LiftoffRegList pinned;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
compiler::LinkageLocation param_loc =
descriptor_->GetInputLocation(input_idx + pair_idx);
// Initialize to anything, will be set in both arms of the if.
- LiftoffRegister in_reg = LiftoffRegister::from_code(kGpReg, 0);
+ LiftoffRegister in_reg = kGpCacheRegList.GetFirstRegSet();
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister();
@@ -273,7 +284,7 @@ class LiftoffCompiler {
// Move to a cache register (spill one if necessary).
// Note that we cannot create a {LiftoffRegister} for reg_code, since
// {LiftoffRegister} can only store cache regs.
- LiftoffRegister in_reg = __ GetUnusedRegister(rc, pinned);
+ in_reg = __ GetUnusedRegister(rc, pinned);
if (rc == kGpReg) {
__ Move(in_reg.gp(), Register::from_code(reg_code), type);
} else {
@@ -300,7 +311,7 @@ class LiftoffCompiler {
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
LiftoffRegister limit_address = __ GetUnusedRegister(kGpReg);
- LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kPointerSize);
__ StackCheck(ool.label.get(), limit_address.gp());
__ bind(ool.continuation.get());
}
@@ -344,7 +355,7 @@ class LiftoffCompiler {
}
DCHECK_EQ(input_idx, descriptor_->InputCount());
// Set to a gp register, to mark this uninitialized.
- LiftoffRegister zero_double_reg(Register::from_code<0>());
+ LiftoffRegister zero_double_reg = kGpCacheRegList.GetFirstRegSet();
DCHECK(zero_double_reg.is_gp());
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
@@ -624,6 +635,20 @@ class LiftoffCompiler {
__ emit_##fn(dst.gp(), src.gp()); \
}); \
break;
+#define CASE_I32_SIGN_EXTENSION(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasmI32, kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.gp(), src.gp()); \
+ }); \
+ break;
+#define CASE_I64_SIGN_EXTENSION(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasmI64, kWasmI64>( \
+ [=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst, src); \
+ }); \
+ break;
#define CASE_FLOAT_UNOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
EmitUnOp<kWasm##type, kWasm##type>( \
@@ -692,6 +717,11 @@ class LiftoffCompiler {
&ExternalReference::wasm_uint64_to_float64, kNoTrap)
CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap)
+ CASE_I32_SIGN_EXTENSION(I32SExtendI8, i32_signextend_i8)
+ CASE_I32_SIGN_EXTENSION(I32SExtendI16, i32_signextend_i16)
+ CASE_I64_SIGN_EXTENSION(I64SExtendI8, i64_signextend_i8)
+ CASE_I64_SIGN_EXTENSION(I64SExtendI16, i64_signextend_i16)
+ CASE_I64_SIGN_EXTENSION(I64SExtendI32, i64_signextend_i32)
case kExprI32Popcnt:
EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
&ExternalReference::wasm_word32_popcnt);
@@ -706,6 +736,8 @@ class LiftoffCompiler {
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_I32_UNOP
+#undef CASE_I32_SIGN_EXTENSION
+#undef CASE_I64_SIGN_EXTENSION
#undef CASE_FLOAT_UNOP
#undef CASE_FLOAT_UNOP_WITH_CFALLBACK
#undef CASE_TYPE_CONVERSION
@@ -875,12 +907,14 @@ class LiftoffCompiler {
CASE_FLOAT_BINOP(F32Div, F32, f32_div)
CASE_FLOAT_BINOP(F32Min, F32, f32_min)
CASE_FLOAT_BINOP(F32Max, F32, f32_max)
+ CASE_FLOAT_BINOP(F32CopySign, F32, f32_copysign)
CASE_FLOAT_BINOP(F64Add, F64, f64_add)
CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
CASE_FLOAT_BINOP(F64Div, F64, f64_div)
CASE_FLOAT_BINOP(F64Min, F64, f64_min)
CASE_FLOAT_BINOP(F64Max, F64, f64_max)
+ CASE_FLOAT_BINOP(F64CopySign, F64, f64_copysign)
case WasmOpcode::kExprI32DivS:
EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
@@ -1139,12 +1173,12 @@ class LiftoffCompiler {
uint32_t* offset) {
LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg));
if (global->mutability && global->imported) {
- LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerSize);
__ Load(addr, addr.gp(), no_reg, global->index * sizeof(Address),
kPointerLoadType, pinned);
*offset = 0;
} else {
- LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerSize);
*offset = global->offset;
}
return addr;
@@ -1161,7 +1195,7 @@ class LiftoffCompiler {
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LoadType type = LoadType::ForValueType(global->type);
- __ Load(value, addr.gp(), no_reg, offset, type, pinned);
+ __ Load(value, addr.gp(), no_reg, offset, type, pinned, nullptr, true);
__ PushRegister(global->type, value);
}
@@ -1175,7 +1209,7 @@ class LiftoffCompiler {
LiftoffRegister addr = GetGlobalBaseAndOffset(global, pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type);
- __ Store(addr.gp(), no_reg, offset, reg, type, pinned);
+ __ Store(addr.gp(), no_reg, offset, reg, type, pinned, nullptr, true);
}
void Unreachable(FullDecoder* decoder) {
@@ -1353,7 +1387,7 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
if (kPointerSize == 8) {
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
@@ -1443,7 +1477,7 @@ class LiftoffCompiler {
// Set context to zero (Smi::kZero) for the runtime call.
__ TurboAssembler::Move(kContextRegister, Smi::kZero);
LiftoffRegister centry(kJavaScriptCallCodeStartRegister);
- LOAD_INSTANCE_FIELD(centry, CEntryStub, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(centry, CEntryStub, kPointerSize);
__ CallRuntimeWithCEntry(runtime_function, centry.gp());
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
@@ -1464,9 +1498,9 @@ class LiftoffCompiler {
}
LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
__ LoadConstant(tmp, WasmValue(*offset));
- __ emit_i32_add(index.gp(), index.gp(), tmp.gp());
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, LoadType::kI32Load);
- __ emit_i32_and(index.gp(), index.gp(), tmp.gp());
+ __ emit_ptrsize_add(index.gp(), index.gp(), tmp.gp());
+ LOAD_INSTANCE_FIELD(tmp, MemoryMask, kPointerSize);
+ __ emit_ptrsize_and(index.gp(), index.gp(), tmp.gp());
*offset = 0;
return index;
}
@@ -1485,7 +1519,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Load from memory");
LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
RegClass rc = reg_class_for(value_type);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
@@ -1498,7 +1532,7 @@ class LiftoffCompiler {
}
__ PushRegister(value_type, value);
- if (FLAG_wasm_trace_memory) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index.gp(),
offset, decoder->position());
}
@@ -1519,7 +1553,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Store to memory");
LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
uint32_t protected_store_pc = 0;
__ Store(addr.gp(), index.gp(), offset, value, type, pinned,
&protected_store_pc, true);
@@ -1528,22 +1562,16 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
- if (FLAG_wasm_trace_memory) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, type.mem_rep(), index.gp(), offset,
decoder->position());
}
}
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
- LiftoffRegList pinned;
- LiftoffRegister mem_size = pinned.set(__ GetUnusedRegister(kGpReg));
- LiftoffRegister tmp_const =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
- // TODO(clemensh): Shift by immediate directly.
- __ LoadConstant(tmp_const,
- WasmValue(int32_t{WhichPowerOf2(kWasmPageSize)}));
- __ emit_i32_shr(mem_size.gp(), mem_size.gp(), tmp_const.gp(), pinned);
+ LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg);
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
+ __ emit_ptrsize_shr(mem_size.gp(), mem_size.gp(), kWasmPageSizeLog2);
__ PushRegister(kWasmI32, mem_size);
}
@@ -1602,17 +1630,17 @@ class LiftoffCompiler {
LiftoffRegister imported_targets = tmp;
LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
- kPointerLoadType);
+ kPointerSize);
__ Load(target, imported_targets.gp(), no_reg,
imm.index * sizeof(Address), kPointerLoadType, pinned);
LiftoffRegister imported_instances = tmp;
LOAD_INSTANCE_FIELD(imported_instances, ImportedFunctionInstances,
- kPointerLoadType);
+ kPointerSize);
LiftoffRegister target_instance = tmp;
__ Load(target_instance, imported_instances.gp(), no_reg,
- compiler::FixedArrayOffsetMinusTag(imm.index), kPointerLoadType,
- pinned);
+ ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index),
+ kPointerLoadType, pinned);
LiftoffRegister* explicit_instance = &target_instance;
Register target_reg = target.gp();
@@ -1684,8 +1712,7 @@ class LiftoffCompiler {
// Compare against table size stored in
// {instance->indirect_function_table_size}.
- LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize,
- LoadType::kI32Load);
+ LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size);
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
index.gp(), tmp_const.gp());
@@ -1714,7 +1741,7 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerSize);
__ LoadConstant(tmp_const,
WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
// TODO(wasm): use a emit_i32_shli() instead of a multiply.
@@ -1739,14 +1766,13 @@ class LiftoffCompiler {
}
// Load the target from {instance->ift_targets[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerLoadType);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerSize);
__ Load(scratch, table.gp(), index.gp(), 0, kPointerLoadType, pinned);
// Load the instance from {instance->ift_instances[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableInstances,
- kPointerLoadType);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableInstances, kPointerSize);
__ Load(tmp_const, table.gp(), index.gp(),
- (FixedArray::kHeaderSize - kHeapObjectTag), kPointerLoadType,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(0), kPointerLoadType,
pinned);
LiftoffRegister* explicit_instance = &tmp_const;
@@ -1835,6 +1861,8 @@ class LiftoffCompiler {
os << "\n";
#endif
}
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
};
} // namespace
@@ -1903,6 +1931,7 @@ WasmCode* LiftoffCompilationUnit::FinishCompilation(ErrorThrower*) {
#undef __
#undef TRACE
#undef WASM_INSTANCE_OBJECT_OFFSET
+#undef WASM_INSTANCE_OBJECT_SIZE
#undef LOAD_INSTANCE_FIELD
#undef DEBUG_CODE_COMMENT
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 19bf88948d..c3f89eb506 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -46,11 +46,11 @@ static inline constexpr RegClass reg_class_for(ValueType type) {
// Maximum code of a gp cache register.
static constexpr int kMaxGpRegCode =
8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
- base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs);
+ base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs) - 1;
// Maximum code of an fp cache register.
static constexpr int kMaxFpRegCode =
8 * sizeof(kLiftoffAssemblerFpCacheRegs) -
- base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs);
+ base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs) - 1;
// LiftoffRegister encodes both gp and fp in a unified index space.
// [0 .. kMaxGpRegCode] encodes gp registers,
// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
@@ -72,16 +72,23 @@ class LiftoffRegister {
using storage_t = std::conditional<
needed_bits <= 8, uint8_t,
std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
- static_assert(8 * sizeof(storage_t) >= needed_bits &&
- 8 * sizeof(storage_t) < 2 * needed_bits,
- "right type has been chosen");
+
+ static_assert(8 * sizeof(storage_t) >= needed_bits,
+ "chosen type is big enough");
+ // Check for smallest required data type being chosen.
+ // Special case for uint8_t as there are no smaller types.
+ static_assert((8 * sizeof(storage_t) < 2 * needed_bits) ||
+ (sizeof(storage_t) == sizeof(uint8_t)),
+ "chosen type is small enough");
public:
explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
+ DCHECK_NE(0, kLiftoffAssemblerGpCacheRegs & reg.bit());
DCHECK_EQ(reg, gp());
}
explicit LiftoffRegister(DoubleRegister reg)
: LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
+ DCHECK_NE(0, kLiftoffAssemblerFpCacheRegs & reg.bit());
DCHECK_EQ(reg, fp());
}
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index bb18994618..cc8170b499 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -15,6 +15,14 @@ namespace wasm {
namespace liftoff {
+#if defined(V8_TARGET_BIG_ENDIAN)
+constexpr int32_t kLowWordOffset = 4;
+constexpr int32_t kHighWordOffset = 0;
+#else
+constexpr int32_t kLowWordOffset = 0;
+constexpr int32_t kHighWordOffset = 4;
+#endif
+
// fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
// slot is located at fp-16.
constexpr int32_t kConstantStackSpace = 8;
@@ -41,8 +49,10 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
assm->lw(dst.gp(), src);
break;
case kWasmI64:
- assm->lw(dst.low_gp(), src);
- assm->lw(dst.high_gp(), MemOperand(base, offset + 4));
+ assm->lw(dst.low_gp(),
+ MemOperand(base, offset + liftoff::kLowWordOffset));
+ assm->lw(dst.high_gp(),
+ MemOperand(base, offset + liftoff::kHighWordOffset));
break;
case kWasmF32:
assm->lwc1(dst.fp(), src);
@@ -63,8 +73,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usw(src.gp(), dst);
break;
case kWasmI64:
- assm->Usw(src.low_gp(), dst);
- assm->Usw(src.high_gp(), MemOperand(base, offset + 4));
+ assm->Usw(src.low_gp(),
+ MemOperand(base, offset + liftoff::kLowWordOffset));
+ assm->Usw(src.high_gp(),
+ MemOperand(base, offset + liftoff::kHighWordOffset));
break;
case kWasmF32:
assm->Uswc1(src.fp(), dst, t8);
@@ -106,11 +118,6 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
switch (type.value()) {
case LoadType::kI64Load8U:
case LoadType::kI64Load8S:
- // Swap low and high registers.
- assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
- assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
- assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
- V8_FALLTHROUGH;
case LoadType::kI32Load8U:
case LoadType::kI32Load8S:
// No need to change endianness for byte size.
@@ -140,20 +147,20 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
break;
case LoadType::kI64Load16U:
- assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2);
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.low_gp(), 2);
assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
break;
case LoadType::kI64Load16S:
- assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2);
- assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2);
+ assm->sra(tmp.high_gp(), tmp.low_gp(), 31);
break;
case LoadType::kI64Load32U:
- assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
break;
case LoadType::kI64Load32S:
- assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
+ assm->sra(tmp.high_gp(), tmp.low_gp(), 31);
break;
default:
UNREACHABLE();
@@ -179,11 +186,6 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
LiftoffRegister tmp = src;
switch (type.value()) {
case StoreType::kI64Store8:
- // Swap low and high registers.
- assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
- assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
- assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
- V8_FALLTHROUGH;
case StoreType::kI32Store8:
// No need to change endianness for byte size.
return;
@@ -193,21 +195,27 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
V8_FALLTHROUGH;
case StoreType::kI32Store:
- case StoreType::kI32Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break;
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
case StoreType::kF64Store:
is_float = true;
tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
V8_FALLTHROUGH;
case StoreType::kI64Store:
- case StoreType::kI64Store32:
- case StoreType::kI64Store16:
assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
break;
+ case StoreType::kI64Store32:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
+ break;
+ case StoreType::kI64Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2);
+ break;
default:
UNREACHABLE();
}
@@ -358,11 +366,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
sra(dst.high_gp(), dst.high_gp(), 31);
break;
case LoadType::kI64Load: {
- MemOperand src_op_upper = (offset_reg != no_reg)
- ? MemOperand(src, offset_imm + 4)
- : MemOperand(src_addr, offset_imm + 4);
- TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
+ MemOperand src_op =
+ (offset_reg != no_reg)
+ ? MemOperand(src, offset_imm + liftoff::kLowWordOffset)
+ : MemOperand(src_addr, offset_imm + liftoff::kLowWordOffset);
+ MemOperand src_op_upper =
+ (offset_reg != no_reg)
+ ? MemOperand(src, offset_imm + liftoff::kHighWordOffset)
+ : MemOperand(src_addr, offset_imm + liftoff::kHighWordOffset);
TurboAssembler::Ulw(dst.low_gp(), src_op);
+ TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
break;
}
case LoadType::kF32Load:
@@ -377,6 +390,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) {
+ pinned.set(src_op.rm());
liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
}
#endif
@@ -396,6 +410,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_store_mem) {
+ pinned.set(dst_op.rm());
LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
// Save original value.
Move(tmp, src, type.value_type());
@@ -427,11 +442,16 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usw(src.gp(), dst_op);
break;
case StoreType::kI64Store: {
- MemOperand dst_op_upper = (offset_reg != no_reg)
- ? MemOperand(dst, offset_imm + 4)
- : MemOperand(dst_addr, offset_imm + 4);
- TurboAssembler::Usw(src.high_gp(), dst_op_upper);
+ MemOperand dst_op =
+ (offset_reg != no_reg)
+ ? MemOperand(dst, offset_imm + liftoff::kLowWordOffset)
+ : MemOperand(dst_addr, offset_imm + liftoff::kLowWordOffset);
+ MemOperand dst_op_upper =
+ (offset_reg != no_reg)
+ ? MemOperand(dst, offset_imm + liftoff::kHighWordOffset)
+ : MemOperand(dst_addr, offset_imm + liftoff::kHighWordOffset);
TurboAssembler::Usw(src.low_gp(), dst_op);
+ TurboAssembler::Usw(src.high_gp(), dst_op_upper);
break;
}
case StoreType::kF32Store:
@@ -624,12 +644,20 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
Register dst, Register src, Register amount, LiftoffRegList pinned) { \
instruction(dst, src, amount); \
}
+#define I32_SHIFTOP_I(name, instruction) \
+ I32_SHIFTOP(name, instruction##v) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ int amount) { \
+ DCHECK(is_uint5(amount)); \
+ instruction(dst, src, amount); \
+ }
I32_SHIFTOP(shl, sllv)
I32_SHIFTOP(sar, srav)
-I32_SHIFTOP(shr, srlv)
+I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -745,6 +773,13 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair, pinned);
}
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ int amount) {
+ DCHECK(is_uint6(amount));
+ ShrPair(dst.high_gp(), dst.low_gp(), src.high_gp(), src.low_gp(), amount,
+ kScratchReg);
+}
+
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
// This is a nop on mips32.
}
@@ -779,6 +814,11 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
bind(&done);
}
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("f32_copysign");
+}
+
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
@@ -801,6 +841,11 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
bind(&done);
}
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("f64_copysign");
+}
+
#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
@@ -1027,6 +1072,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i32");
+}
+
void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label);
}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 4bbfc18251..a2447d8b32 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -72,6 +72,9 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) {
case kWasmI32:
+ assm->daddiu(sp, sp, -kPointerSize);
+ assm->sw(reg.gp(), MemOperand(sp, 0));
+ break;
case kWasmI64:
assm->push(reg.gp());
break;
@@ -107,22 +110,18 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
V8_FALLTHROUGH;
case LoadType::kI64Load32U:
assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
- assm->dsrl32(tmp.gp(), tmp.gp(), 0);
break;
case LoadType::kI32Load:
case LoadType::kI64Load32S:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- assm->dsra32(tmp.gp(), tmp.gp(), 0);
break;
case LoadType::kI32Load16S:
case LoadType::kI64Load16S:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
- assm->dsra32(tmp.gp(), tmp.gp(), 0);
break;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
- assm->dsrl32(tmp.gp(), tmp.gp(), 0);
break;
case LoadType::kF64Load:
is_float = true;
@@ -165,18 +164,24 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
V8_FALLTHROUGH;
case StoreType::kI32Store:
- case StoreType::kI32Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break;
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
case StoreType::kF64Store:
is_float = true;
tmp = assm->GetUnusedRegister(kGpReg, pinned);
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
V8_FALLTHROUGH;
case StoreType::kI64Store:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
case StoreType::kI64Store32:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
case StoreType::kI64Store16:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break;
default:
UNREACHABLE();
@@ -274,12 +279,13 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- MemOperand src_op(src_addr, offset_imm);
+ Register src = no_reg;
if (offset_reg != no_reg) {
- Register src = GetUnusedRegister(kGpReg, pinned).gp();
+ src = GetUnusedRegister(kGpReg, pinned).gp();
emit_ptrsize_add(src, src_addr, offset_reg);
- src_op = MemOperand(src, offset_imm);
}
+ MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
+ : MemOperand(src_addr, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
@@ -321,6 +327,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) {
+ pinned.set(src_op.rm());
liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
}
#endif
@@ -340,6 +347,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_store_mem) {
+ pinned.set(dst_op.rm());
LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
// Save original value.
Move(tmp, src, type.value_type());
@@ -550,12 +558,20 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
Register dst, Register src, Register amount, LiftoffRegList pinned) { \
instruction(dst, src, amount); \
}
+#define I32_SHIFTOP_I(name, instruction) \
+ I32_SHIFTOP(name, instruction##v) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ int amount) { \
+ DCHECK(is_uint5(amount)); \
+ instruction(dst, src, amount); \
+ }
I32_SHIFTOP(shl, sllv)
I32_SHIFTOP(sar, srav)
-I32_SHIFTOP(shr, srlv)
+I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -628,12 +644,20 @@ I64_BINOP(xor, xor_)
LiftoffRegList pinned) { \
instruction(dst.gp(), src.gp(), amount); \
}
+#define I64_SHIFTOP_I(name, instruction) \
+ I64_SHIFTOP(name, instruction##v) \
+ void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ DCHECK(is_uint6(amount)); \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
I64_SHIFTOP(shl, dsllv)
I64_SHIFTOP(sar, dsrav)
-I64_SHIFTOP(shr, dsrlv)
+I64_SHIFTOP_I(shr, dsrl)
#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
addu(dst, src, zero_reg);
@@ -669,6 +693,11 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
bind(&done);
}
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("f32_copysign");
+}
+
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
@@ -691,6 +720,11 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
bind(&done);
}
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("f64_copysign");
+}
+
#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
@@ -928,6 +962,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i32");
+}
+
void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label);
}
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 9164db2188..e9dcd419ba 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -166,6 +166,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f32_div)
UNIMPLEMENTED_FP_BINOP(f32_min)
UNIMPLEMENTED_FP_BINOP(f32_max)
+UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
UNIMPLEMENTED_FP_UNOP(f32_ceil)
@@ -179,6 +180,7 @@ UNIMPLEMENTED_FP_BINOP(f64_mul)
UNIMPLEMENTED_FP_BINOP(f64_div)
UNIMPLEMENTED_FP_BINOP(f64_min)
UNIMPLEMENTED_FP_BINOP(f64_max)
+UNIMPLEMENTED_FP_BINOP(f64_copysign)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
@@ -217,6 +219,10 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
+ BAILOUT("i32_shr");
+}
+
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
@@ -246,6 +252,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
+ int amount) {
+ BAILOUT("i64_shr");
+}
+
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_PPC64
BAILOUT("emit_i32_to_intptr");
@@ -261,6 +272,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i32");
+}
+
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index e39dd90166..970cfe5753 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -166,6 +166,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f32_div)
UNIMPLEMENTED_FP_BINOP(f32_min)
UNIMPLEMENTED_FP_BINOP(f32_max)
+UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
UNIMPLEMENTED_FP_UNOP(f32_ceil)
@@ -179,6 +180,7 @@ UNIMPLEMENTED_FP_BINOP(f64_mul)
UNIMPLEMENTED_FP_BINOP(f64_div)
UNIMPLEMENTED_FP_BINOP(f64_min)
UNIMPLEMENTED_FP_BINOP(f64_max)
+UNIMPLEMENTED_FP_BINOP(f64_copysign)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
@@ -217,6 +219,10 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
+ BAILOUT("i32_shr");
+}
+
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
@@ -246,6 +252,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
+ int amount) {
+ BAILOUT("i64_shr");
+}
+
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
BAILOUT("emit_i32_to_intptr");
@@ -261,6 +272,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ BAILOUT("emit_i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_i64_signextend_i32");
+}
+
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index f6a8e09b4e..6805e19a76 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -23,9 +23,11 @@ namespace wasm {
namespace liftoff {
+constexpr Register kScratchRegister2 = r11;
+static_assert(kScratchRegister != kScratchRegister2, "collision");
static_assert((kLiftoffAssemblerGpCacheRegs &
- Register::ListOf<kScratchRegister>()) == 0,
- "scratch register must not be used as cache registers");
+ Register::ListOf<kScratchRegister, kScratchRegister2>()) == 0,
+ "scratch registers must not be used as cache registers");
constexpr DoubleRegister kScratchDoubleReg2 = xmm14;
static_assert(kScratchDoubleReg != kScratchDoubleReg2, "collision");
@@ -619,6 +621,12 @@ void LiftoffAssembler::emit_i32_shr(Register dst, Register src, Register amount,
&Assembler::shrl_cl, pinned);
}
+void LiftoffAssembler::emit_i32_shr(Register dst, Register src, int amount) {
+ if (dst != src) movl(dst, src);
+ DCHECK(is_uint5(amount));
+ shrl(dst, Immediate(amount));
+}
+
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
Label nonzero_input;
Label continuation;
@@ -756,6 +764,13 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&Assembler::shrq_cl, pinned);
}
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ int amount) {
+ if (dst.gp() != src.gp()) movl(dst.gp(), src.gp());
+ DCHECK(is_uint6(amount));
+ shrq(dst.gp(), Immediate(amount));
+}
+
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
movsxlq(dst, src);
}
@@ -885,6 +900,17 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
liftoff::MinOrMax::kMax);
}
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ static constexpr int kF32SignBit = 1 << 31;
+ Movd(kScratchRegister, lhs);
+ andl(kScratchRegister, Immediate(~kF32SignBit));
+ Movd(liftoff::kScratchRegister2, rhs);
+ andl(liftoff::kScratchRegister2, Immediate(kF32SignBit));
+ orl(kScratchRegister, liftoff::kScratchRegister2);
+ Movd(dst, kScratchRegister);
+}
+
void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
@@ -994,6 +1020,20 @@ void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
liftoff::MinOrMax::kMin);
}
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ // Extract sign bit from {rhs} into {kScratchRegister2}.
+ Movq(liftoff::kScratchRegister2, rhs);
+ shrq(liftoff::kScratchRegister2, Immediate(63));
+ shlq(liftoff::kScratchRegister2, Immediate(63));
+ // Reset sign bit of {lhs} (in {kScratchRegister}).
+ Movq(kScratchRegister, lhs);
+ btrq(kScratchRegister, Immediate(63));
+ // Combine both values into {kScratchRegister} and move into {dst}.
+ orq(kScratchRegister, liftoff::kScratchRegister2);
+ Movq(dst, kScratchRegister);
+}
+
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
liftoff::EmitFloatMinOrMax<double>(this, dst, lhs, rhs,
@@ -1213,6 +1253,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
}
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ movsxbl(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ movsxwl(dst, src);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ movsxbq(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ movsxwq(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ movsxlq(dst.gp(), src.gp());
+}
+
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 3dd9aff9c6..74955f9ede 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -56,7 +56,7 @@ class Decoder {
DCHECK_EQ(static_cast<uint32_t>(end - start), end - start);
}
- virtual ~Decoder() {}
+ virtual ~Decoder() = default;
inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
@@ -375,27 +375,6 @@ class Decoder {
}
};
-// Reference to a string in the wire bytes.
-class WireBytesRef {
- public:
- WireBytesRef() : WireBytesRef(0, 0) {}
- WireBytesRef(uint32_t offset, uint32_t length)
- : offset_(offset), length_(length) {
- DCHECK_IMPLIES(offset_ == 0, length_ == 0);
- DCHECK_LE(offset_, offset_ + length_); // no uint32_t overflow.
- }
-
- uint32_t offset() const { return offset_; }
- uint32_t length() const { return length_; }
- uint32_t end_offset() const { return offset_ + length_; }
- bool is_empty() const { return length_ == 0; }
- bool is_set() const { return offset_ != 0; }
-
- private:
- uint32_t offset_;
- uint32_t length_;
-};
-
#undef TRACE
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 3e0a0da46e..0e49ee7e97 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -487,7 +487,8 @@ enum ControlKind : uint8_t {
kControlBlock,
kControlLoop,
kControlTry,
- kControlTryCatch
+ kControlTryCatch,
+ kControlTryCatchAll
};
enum Reachability : uint8_t {
@@ -534,9 +535,12 @@ struct ControlBase {
bool is_if_else() const { return kind == kControlIfElse; }
bool is_block() const { return kind == kControlBlock; }
bool is_loop() const { return kind == kControlLoop; }
- bool is_try() const { return is_incomplete_try() || is_try_catch(); }
bool is_incomplete_try() const { return kind == kControlTry; }
bool is_try_catch() const { return kind == kControlTryCatch; }
+ bool is_try_catchall() const { return kind == kControlTryCatchAll; }
+ bool is_try() const {
+ return is_incomplete_try() || is_try_catch() || is_try_catchall();
+ }
inline Merge<Value>* br_merge() {
return is_loop() ? &this->start_merge : &this->end_merge;
@@ -737,6 +741,13 @@ class WasmDecoder : public Decoder {
}
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
+ case kLocalExceptRef:
+ if (enabled.eh) {
+ type = kWasmExceptRef;
+ break;
+ }
+ decoder->error(decoder->pc() - 1, "invalid local type");
+ return false;
case kLocalS128:
if (enabled.simd) {
type = kWasmS128;
@@ -776,7 +787,7 @@ class WasmDecoder : public Decoder {
break;
case kExprSetLocal: // fallthru
case kExprTeeLocal: {
- LocalIndexImmediate<Decoder::kValidate> imm(decoder, pc);
+ LocalIndexImmediate<validate> imm(decoder, pc);
if (assigned->length() > 0 &&
imm.index < static_cast<uint32_t>(assigned->length())) {
// Unverified code might have an out-of-bounds index.
@@ -806,8 +817,7 @@ class WasmDecoder : public Decoder {
return VALIDATE(decoder->ok()) ? assigned : nullptr;
}
- inline bool Validate(const byte* pc,
- LocalIndexImmediate<Decoder::kValidate>& imm) {
+ inline bool Validate(const byte* pc, LocalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < total_locals())) {
errorf(pc + 1, "invalid local index: %u", imm.index);
return false;
@@ -1034,7 +1044,7 @@ class WasmDecoder : public Decoder {
case kExprSetLocal:
case kExprTeeLocal:
case kExprGetLocal: {
- LocalIndexImmediate<Decoder::kValidate> imm(decoder, pc);
+ LocalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
case kExprBrTable: {
@@ -1466,7 +1476,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprThrow: {
CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<Decoder::kValidate> imm(this, this->pc_);
+ ExceptionIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
PopArgs(imm.exception->ToFunctionSig());
@@ -1490,7 +1500,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprCatch: {
// TODO(kschimpf): Fix to use type signature of exception.
CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<Decoder::kValidate> imm(this, this->pc_);
+ ExceptionIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
@@ -1524,9 +1534,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprCatchAll: {
- // TODO(kschimpf): Implement.
CHECK_PROTOTYPE_OPCODE(eh);
- OPCODE_ERROR(opcode, "not implemented yet");
+ if (!VALIDATE(!control_.empty())) {
+ this->error("catch-all does not match any try");
+ break;
+ }
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_try())) {
+ this->error("catch-all does not match any try");
+ break;
+ }
+ if (!VALIDATE(!c->is_try_catchall())) {
+ this->error("catch-all already present for try");
+ break;
+ }
+ c->kind = kControlTryCatchAll;
+ // TODO(mstarzinger): Implement control flow for catch-all.
break;
}
case kExprLoop: {
@@ -1581,7 +1604,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
Control* c = &control_.back();
if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch in try");
+ this->error(this->pc_, "missing catch or catch-all in try");
break;
}
if (c->is_onearmed_if()) {
@@ -1742,7 +1765,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprGetLocal: {
- LocalIndexImmediate<Decoder::kValidate> imm(this, this->pc_);
+ LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto* value = Push(imm.type);
CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm);
@@ -1750,7 +1773,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprSetLocal: {
- LocalIndexImmediate<Decoder::kValidate> imm(this, this->pc_);
+ LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(0, local_type_vec_[imm.index]);
CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm);
@@ -1758,7 +1781,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprTeeLocal: {
- LocalIndexImmediate<Decoder::kValidate> imm(this, this->pc_);
+ LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(0, local_type_vec_[imm.index]);
auto* result = Push(value.type);
@@ -2445,7 +2468,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return true;
}
- virtual void onFirstError() {
+ void onFirstError() override {
this->end_ = this->pc_; // Terminate decoding loop.
TRACE(" !%s\n", this->error_msg_.c_str());
CALL_INTERFACE(OnFirstError);
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index beb8716d9a..2c5ea465cc 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -37,7 +37,7 @@ struct SsaEnv {
compiler::WasmInstanceCacheNodes instance_cache;
TFNode** locals;
- bool go() { return state >= kReached; }
+ bool reached() const { return state >= kReached; }
void Kill(State new_state = kControlEnd) {
state = new_state;
locals = nullptr;
@@ -52,7 +52,7 @@ struct SsaEnv {
#define BUILD(func, ...) \
([&] { \
- DCHECK(ssa_env_->go()); \
+ DCHECK(ssa_env_->reached()); \
DCHECK(decoder->ok()); \
return CheckForException(decoder, builder_->func(__VA_ARGS__)); \
})()
@@ -99,6 +99,12 @@ class WasmGraphBuildingInterface {
// instance parameter.
TFNode* start = builder_->Start(
static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
+ ssa_env->effect = start;
+ ssa_env->control = start;
+ // Initialize effect and control before initializing the locals default
+ // values (which might require instance loads) or loading the context.
+ builder_->set_effect_ptr(&ssa_env->effect);
+ builder_->set_control_ptr(&ssa_env->control);
// Initialize the instance parameter (index 0).
builder_->set_instance_node(builder_->Param(kWasmInstanceParameterIndex));
// Initialize local variables. Parameters are shifted by 1 because of the
@@ -115,18 +121,13 @@ class WasmGraphBuildingInterface {
ssa_env->locals[index++] = node;
}
}
- ssa_env->effect = start;
- ssa_env->control = start;
- // Initialize effect and control before loading the context.
- builder_->set_effect_ptr(&ssa_env->effect);
- builder_->set_control_ptr(&ssa_env->control);
LoadContextIntoSsa(ssa_env);
SetEnv(ssa_env);
}
// Reload the instance cache entries into the Ssa Environment.
void LoadContextIntoSsa(SsaEnv* ssa_env) {
- if (!ssa_env || !ssa_env->go()) return;
+ if (!ssa_env || !ssa_env->reached()) return;
builder_->InitInstanceCache(&ssa_env->instance_cache);
}
@@ -180,7 +181,9 @@ class WasmGraphBuildingInterface {
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- if (ssa_env_->go()) BUILD(BranchNoHint, cond.node, &if_true, &if_false);
+ if (ssa_env_->reached()) {
+ BUILD(BranchNoHint, cond.node, &if_true, &if_false);
+ }
SsaEnv* end_env = ssa_env_;
SsaEnv* false_env = Split(decoder, ssa_env_);
false_env->control = if_false;
@@ -427,55 +430,46 @@ class WasmGraphBuildingInterface {
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
+ TFNode* exception = block->try_info->exception;
current_catch_ = block->previous_catch;
SsaEnv* catch_env = block->try_info->catch_env;
SetEnv(catch_env);
- TFNode* compare_i32 = nullptr;
- if (block->try_info->exception == nullptr) {
- // Catch not applicable, no possible throws in the try
- // block. Create dummy code so that body of catch still
- // compiles. Note: This only happens because the current
- // implementation only builds a landing pad if some node in the
- // try block can (possibly) throw.
- //
- // TODO(kschimpf): Always generate a landing pad for a try block.
- compare_i32 = BUILD(Int32Constant, 0);
- } else {
- // Get the exception and see if wanted exception.
- TFNode* caught_tag = BUILD(GetExceptionRuntimeId);
- TFNode* exception_tag = BUILD(ConvertExceptionTagToRuntimeId, imm.index);
- compare_i32 = BUILD(Binop, kExprI32Eq, caught_tag, exception_tag);
- }
+ // The catch block is unreachable if no possible throws in the try block
+ // exist. We only build a landing pad if some node in the try block can
+ // (possibly) throw. Otherwise the below catch environments remain empty.
+ DCHECK_EQ(exception != nullptr, ssa_env_->reached());
TFNode* if_catch = nullptr;
TFNode* if_no_catch = nullptr;
- BUILD(BranchNoHint, compare_i32, &if_catch, &if_no_catch);
+ if (exception != nullptr) {
+ // Get the exception tag and see if it matches the expected one.
+ TFNode* caught_tag = BUILD(GetExceptionTag, exception);
+ TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
+ TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
+ BUILD(BranchNoHint, compare, &if_catch, &if_no_catch);
+ }
SsaEnv* if_no_catch_env = Split(decoder, ssa_env_);
if_no_catch_env->control = if_no_catch;
SsaEnv* if_catch_env = Steal(decoder->zone(), ssa_env_);
if_catch_env->control = if_catch;
- // TODO(kschimpf): Generalize to allow more catches. Will force
- // moving no_catch code to END opcode.
SetEnv(if_no_catch_env);
- BUILD(Rethrow);
- Unreachable(decoder);
- EndControl(decoder, block);
+ if (exception != nullptr) {
+ // TODO(kschimpf): Generalize to allow more catches. Will force
+ // moving no_catch code to END opcode.
+ BUILD(Rethrow, exception);
+ Unreachable(decoder);
+ EndControl(decoder, block);
+ }
SetEnv(if_catch_env);
-
- if (block->try_info->exception == nullptr) {
- // No caught value, make up filler nodes so that catch block still
- // compiles.
- for (Value& value : values) {
- value.node = DefaultValue(value.type);
- }
- } else {
+ if (exception != nullptr) {
// TODO(kschimpf): Can't use BUILD() here, GetExceptionValues() returns
// TFNode** rather than TFNode*. Fix to add landing pads.
- TFNode** caught_values = builder_->GetExceptionValues(imm.exception);
+ TFNode** caught_values =
+ builder_->GetExceptionValues(exception, imm.exception);
for (size_t i = 0, e = values.size(); i < e; ++i) {
values[i].node = caught_values[i];
}
@@ -594,6 +588,9 @@ class WasmGraphBuildingInterface {
return builder_->Float64Constant(0);
case kWasmS128:
return builder_->S128Zero();
+ case kWasmAnyRef:
+ case kWasmExceptRef:
+ return builder_->RefNull();
default:
UNREACHABLE();
}
@@ -601,7 +598,7 @@ class WasmGraphBuildingInterface {
void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- if (!ssa_env_->go()) return;
+ if (!ssa_env_->reached()) return;
SsaEnv* target = c->end_env;
const bool first = target->state == SsaEnv::kUnreachable;
@@ -624,7 +621,7 @@ class WasmGraphBuildingInterface {
void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
DCHECK_NOT_NULL(to);
- if (!from->go()) return;
+ if (!from->reached()) return;
switch (to->state) {
case SsaEnv::kUnreachable: { // Overwrite destination.
to->state = SsaEnv::kReached;
@@ -685,7 +682,7 @@ class WasmGraphBuildingInterface {
}
SsaEnv* PrepareForLoop(FullDecoder* decoder, SsaEnv* env) {
- if (!env->go()) return Split(decoder, env);
+ if (!env->reached()) return Split(decoder, env);
env->state = SsaEnv::kMerged;
env->control = builder_->Loop(env->control);
@@ -739,7 +736,7 @@ class WasmGraphBuildingInterface {
result->control = from->control;
result->effect = from->effect;
- if (from->go()) {
+ if (from->reached()) {
result->state = SsaEnv::kReached;
result->locals =
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
@@ -759,7 +756,7 @@ class WasmGraphBuildingInterface {
// unreachable.
SsaEnv* Steal(Zone* zone, SsaEnv* from) {
DCHECK_NOT_NULL(from);
- if (!from->go()) return UnreachableEnv(zone);
+ if (!from->reached()) return UnreachableEnv(zone);
SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
result->state = SsaEnv::kReached;
result->locals = from->locals;
@@ -791,11 +788,9 @@ class WasmGraphBuildingInterface {
arg_nodes[i + 1] = args[i].node;
}
if (index_node) {
- builder_->CallIndirect(index, arg_nodes, &return_nodes,
- decoder->position());
+ BUILD(CallIndirect, index, arg_nodes, &return_nodes, decoder->position());
} else {
- builder_->CallDirect(index, arg_nodes, &return_nodes,
- decoder->position());
+ BUILD(CallDirect, index, arg_nodes, &return_nodes, decoder->position());
}
int return_count = static_cast<int>(sig->return_count());
for (int i = 0; i < return_count; ++i) {
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index c4209d8c9c..4cec770ecc 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -45,13 +45,11 @@ ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier() {
WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine,
ModuleEnv* env,
NativeModule* native_module,
- FunctionBody body, WasmName name,
- int index, Counters* counters,
- ExecutionTier mode)
+ FunctionBody body, int index,
+ Counters* counters, ExecutionTier mode)
: env_(env),
wasm_engine_(wasm_engine),
func_body_(body),
- func_name_(name),
counters_(counters),
func_index_(index),
native_module_(native_module),
@@ -71,7 +69,7 @@ WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine,
// Declared here such that {LiftoffCompilationUnit} and
// {TurbofanWasmCompilationUnit} can be opaque in the header file.
-WasmCompilationUnit::~WasmCompilationUnit() {}
+WasmCompilationUnit::~WasmCompilationUnit() = default;
void WasmCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
auto size_histogram = SELECT_WASM_COUNTER(counters_, env_->module->origin,
@@ -155,7 +153,6 @@ WasmCode* WasmCompilationUnit::CompileWasmFunction(
WasmCompilationUnit unit(isolate->wasm_engine(), env, native_module,
function_body,
- wire_bytes.GetNameOrNull(function, env->module),
function->func_index, isolate->counters(), mode);
unit.ExecuteCompilation(detected);
return unit.FinishCompilation(thrower);
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 7e19f4d12f..b821f1f64d 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -86,7 +86,7 @@ class WasmCompilationUnit final {
// If used exclusively from a foreground thread, Isolate::counters() may be
// used by callers to pass Counters.
WasmCompilationUnit(WasmEngine* wasm_engine, ModuleEnv*, NativeModule*,
- FunctionBody, WasmName, int index, Counters*,
+ FunctionBody, int index, Counters*,
ExecutionTier = GetDefaultExecutionTier());
~WasmCompilationUnit();
@@ -109,7 +109,6 @@ class WasmCompilationUnit final {
ModuleEnv* env_;
WasmEngine* wasm_engine_;
FunctionBody func_body_;
- WasmName func_name_;
Counters* counters_;
int func_index_;
NativeModule* native_module_;
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 892a4e980e..61ec6bc32a 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -180,23 +180,15 @@ void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
class JSToWasmWrapperCache {
public:
- Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate,
- const NativeModule* native_module,
- uint32_t func_index,
- UseTrapHandler use_trap_handler) {
- const WasmModule* module = native_module->module();
- const WasmFunction* func = &module->functions[func_index];
- bool is_import = func_index < module->num_imported_functions;
- std::pair<bool, FunctionSig> key(is_import, *func->sig);
+ Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
+ bool is_import) {
+ std::pair<bool, FunctionSig> key(is_import, *sig);
Handle<Code>& cached = cache_[key];
- if (!cached.is_null()) return cached;
-
- Handle<Code> code =
- compiler::CompileJSToWasmWrapper(isolate, native_module, func->sig,
- is_import, use_trap_handler)
- .ToHandleChecked();
- cached = code;
- return code;
+ if (cached.is_null()) {
+ cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
+ .ToHandleChecked();
+ }
+ return cached;
}
private:
@@ -245,6 +237,7 @@ class InstanceBuilder {
Handle<JSArrayBuffer> globals_;
std::vector<TableInstance> table_instances_;
std::vector<Handle<JSFunction>> js_wrappers_;
+ std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
Handle<WasmExportedFunction> start_function_;
JSToWasmWrapperCache js_to_wasm_cache_;
std::vector<SanitizedImport> sanitized_imports_;
@@ -324,6 +317,10 @@ class InstanceBuilder {
void InitializeTables(Handle<WasmInstanceObject> instance);
void LoadTableSegments(Handle<WasmInstanceObject> instance);
+
+ // Creates new exception tags for all exceptions. Note that some tags might
+ // already exist if they were imported, those tags will be re-used.
+ void InitializeExceptions(Handle<WasmInstanceObject> instance);
};
} // namespace
@@ -337,6 +334,7 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
if (!instance.is_null() && builder.ExecuteStartFunction()) {
return instance;
}
+ DCHECK(isolate->has_pending_exception() || thrower->error());
return {};
}
@@ -348,18 +346,8 @@ WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
compilation_timer.Start();
ModuleEnv* module_env = native_module->compilation_state()->module_env();
- // TODO(wasm): Refactor this to only get the name if it is really needed for
- // tracing / debugging.
- WasmName func_name;
- {
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
- WireBytesRef name_ref =
- module_env->module->LookupFunctionName(wire_bytes, func_index);
- func_name = wire_bytes.GetName(name_ref);
- }
- TRACE_LAZY("Compiling function '%.*s' (#%d).\n", func_name.length(),
- func_name.start(), func_index);
+ TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
const uint8_t* module_start = native_module->wire_bytes().start();
@@ -370,7 +358,7 @@ WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
ErrorThrower thrower(isolate, "WasmLazyCompile");
WasmCompilationUnit unit(isolate->wasm_engine(), module_env, native_module,
- body, func_name, func_index, isolate->counters());
+ body, func_index, isolate->counters());
unit.ExecuteCompilation(
native_module->compilation_state()->detected_features());
WasmCode* wasm_code = unit.FinishCompilation(&thrower);
@@ -414,42 +402,6 @@ Address CompileLazy(Isolate* isolate, NativeModule* native_module,
return result->instruction_start();
}
-namespace {
-bool compile_lazy(const WasmModule* module) {
- return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
-}
-
-byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
- return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
-}
-
-void RecordStats(const Code* code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(code->body_size());
- counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
-}
-
-bool in_bounds(uint32_t offset, size_t size, size_t upper) {
- return offset + size <= upper && offset + size >= offset;
-}
-
-using WasmInstanceMap =
- IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
-
-double MonotonicallyIncreasingTimeInMs() {
- return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
- base::Time::kMillisecondsPerSecond;
-}
-
-ModuleEnv CreateDefaultModuleEnv(const WasmModule* module,
- bool allow_trap_handler = true) {
- UseTrapHandler use_trap_handler =
- trap_handler::IsTrapHandlerEnabled() && allow_trap_handler
- ? kUseTrapHandler
- : kNoTrapHandler;
- return ModuleEnv(module, use_trap_handler, kRuntimeExceptionSupport);
-}
-
// The CompilationUnitBuilder builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
// CompilationState when {Commit} is called.
@@ -460,17 +412,17 @@ class CompilationUnitBuilder {
compilation_state_(native_module->compilation_state()) {}
void AddUnit(const WasmFunction* function, uint32_t buffer_offset,
- Vector<const uint8_t> bytes, WasmName name) {
+ Vector<const uint8_t> bytes) {
switch (compilation_state_->compile_mode()) {
case CompileMode::kTiering:
- tiering_units_.emplace_back(CreateUnit(
- function, buffer_offset, bytes, name, ExecutionTier::kOptimized));
- baseline_units_.emplace_back(CreateUnit(
- function, buffer_offset, bytes, name, ExecutionTier::kBaseline));
+ tiering_units_.emplace_back(CreateUnit(function, buffer_offset, bytes,
+ ExecutionTier::kOptimized));
+ baseline_units_.emplace_back(CreateUnit(function, buffer_offset, bytes,
+ ExecutionTier::kBaseline));
return;
case CompileMode::kRegular:
baseline_units_.emplace_back(
- CreateUnit(function, buffer_offset, bytes, name,
+ CreateUnit(function, buffer_offset, bytes,
WasmCompilationUnit::GetDefaultExecutionTier()));
return;
}
@@ -493,13 +445,12 @@ class CompilationUnitBuilder {
std::unique_ptr<WasmCompilationUnit> CreateUnit(const WasmFunction* function,
uint32_t buffer_offset,
Vector<const uint8_t> bytes,
- WasmName name,
ExecutionTier mode) {
return base::make_unique<WasmCompilationUnit>(
compilation_state_->wasm_engine(), compilation_state_->module_env(),
native_module_,
FunctionBody{function->sig, buffer_offset, bytes.begin(), bytes.end()},
- name, function->func_index,
+ function->func_index,
compilation_state_->isolate()->async_counters().get(), mode);
}
@@ -509,6 +460,42 @@ class CompilationUnitBuilder {
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
+namespace {
+bool compile_lazy(const WasmModule* module) {
+ return FLAG_wasm_lazy_compilation ||
+ (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
+}
+
+byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
+ return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
+}
+
+void RecordStats(const Code* code, Counters* counters) {
+ counters->wasm_generated_code_size()->Increment(code->body_size());
+ counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
+}
+
+bool in_bounds(uint32_t offset, size_t size, size_t upper) {
+ return offset + size <= upper && offset + size >= offset;
+}
+
+using WasmInstanceMap =
+ IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
+
+double MonotonicallyIncreasingTimeInMs() {
+ return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+ base::Time::kMillisecondsPerSecond;
+}
+
+ModuleEnv CreateDefaultModuleEnv(const WasmModule* module,
+ bool allow_trap_handler = true) {
+ UseTrapHandler use_trap_handler =
+ trap_handler::IsTrapHandlerEnabled() && allow_trap_handler
+ ? kUseTrapHandler
+ : kNoTrapHandler;
+ return ModuleEnv(module, use_trap_handler, kRuntimeExceptionSupport);
+}
+
// Run by each compilation task and by the main thread (i.e. in both
// foreground and background threads). The no_finisher_callback is called
// within the result_mutex_ lock when no finishing task is running, i.e. when
@@ -545,9 +532,8 @@ void InitializeCompilationUnits(NativeModule* native_module) {
Vector<const uint8_t> bytes(wire_bytes.start() + func->code.offset(),
func->code.end_offset() - func->code.offset());
- WasmName name = wire_bytes.GetName(func, module);
DCHECK_NOT_NULL(native_module);
- builder.AddUnit(func, buffer_offset, bytes, name);
+ builder.AddUnit(func, buffer_offset, bytes);
}
builder.Commit();
}
@@ -680,7 +666,7 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module,
WasmCode* code = WasmCompilationUnit::CompileWasmFunction(
isolate, native_module, &detected, thrower, module_env, &func);
if (code == nullptr) {
- TruncatedUserString<> name(wire_bytes.GetName(&func, module));
+ TruncatedUserString<> name(wire_bytes.GetNameOrNull(&func, module));
thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(),
name.start());
break;
@@ -716,7 +702,7 @@ void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
&detected, body);
}
if (result.failed()) {
- TruncatedUserString<> name(wire_bytes.GetName(&func, module));
+ TruncatedUserString<> name(wire_bytes.GetNameOrNull(&func, module));
thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
name.length(), name.start(),
result.error_msg().c_str(), result.error_offset());
@@ -890,7 +876,7 @@ MaybeHandle<WasmModuleObject> CompileToModuleObject(
Handle<Script> script;
Handle<ByteArray> asm_js_offset_table;
if (asm_js_script.is_null()) {
- script = CreateWasmScript(isolate, wire_bytes);
+ script = CreateWasmScript(isolate, wire_bytes, wasm_module->source_map_url);
} else {
script = asm_js_script;
asm_js_offset_table =
@@ -1001,7 +987,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// (e.g. https://crbug.com/769637).
// Allocate memory if the initial size is more than 0 pages.
memory_ = AllocateMemory(initial_pages);
- if (memory_.is_null()) return {}; // failed to allocate memory
+ if (memory_.is_null()) {
+ // failed to allocate memory
+ DCHECK(isolate_->has_pending_exception() || thrower_->error());
+ return {};
+ }
}
//--------------------------------------------------------------------------
@@ -1015,6 +1005,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (!memory_tracker->HasFullGuardRegions(
memory_.ToHandleChecked()->backing_store())) {
if (!FLAG_wasm_trap_handler_fallback) {
+ thrower_->LinkError(
+ "Provided memory is lacking guard regions but fallback was "
+ "disabled.");
return {};
}
@@ -1047,7 +1040,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
- MaybeHandle<JSArrayBuffer> old_globals;
uint32_t globals_buffer_size = module_->globals_buffer_size;
if (globals_buffer_size > 0) {
void* backing_store =
@@ -1085,6 +1077,17 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
+ // Set up the exception table used for exception tag checks.
+ //--------------------------------------------------------------------------
+ int exceptions_count = static_cast<int>(module_->exceptions.size());
+ if (exceptions_count > 0) {
+ Handle<FixedArray> exception_table =
+ isolate_->factory()->NewFixedArray(exceptions_count, TENURED);
+ instance->set_exceptions_table(*exception_table);
+ exception_wrappers_.resize(exceptions_count);
+ }
+
+ //--------------------------------------------------------------------------
// Reserve the metadata for indirect function tables.
//--------------------------------------------------------------------------
int table_count = static_cast<int>(module_->tables.size());
@@ -1109,6 +1112,13 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
+ // Initialize the exceptions table.
+ //--------------------------------------------------------------------------
+ if (exceptions_count > 0) {
+ InitializeExceptions(instance);
+ }
+
+ //--------------------------------------------------------------------------
// Create the WebAssembly.Memory object.
//--------------------------------------------------------------------------
if (module_->has_memory) {
@@ -1127,7 +1137,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (!memory_.is_null()) {
// Double-check the {memory} array buffer matches the instance.
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- CHECK_EQ(instance->memory_size(), memory->byte_length()->Number());
+ CHECK_EQ(instance->memory_size(), memory->byte_length());
CHECK_EQ(instance->memory_start(), memory->backing_store());
}
}
@@ -1177,11 +1187,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Install a finalizer on the new instance object.
- //--------------------------------------------------------------------------
- WasmInstanceObject::InstallFinalizer(isolate_, instance);
-
- //--------------------------------------------------------------------------
// Debugging support.
//--------------------------------------------------------------------------
// Set all breakpoints that were set on the shared module.
@@ -1206,14 +1211,14 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
- FunctionSig* sig = module_->functions[start_index].sig;
+ auto& function = module_->functions[start_index];
Handle<Code> wrapper_code = js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, native_module, start_index, use_trap_handler());
+ isolate_, function.sig, function.imported);
// TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
- static_cast<int>(sig->parameter_count()), wrapper_code);
+ static_cast<int>(function.sig->parameter_count()), wrapper_code);
}
DCHECK(!isolate_->has_pending_exception());
@@ -1489,41 +1494,41 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
}
uint32_t func_index = import.index;
DCHECK_EQ(num_imported_functions, func_index);
+ auto js_receiver = Handle<JSReceiver>::cast(value);
FunctionSig* expected_sig = module_->functions[func_index].sig;
- if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
- // The imported function is a WASM function from another instance.
- Handle<WasmExportedFunction> imported_function(
- WasmExportedFunction::cast(*value), isolate_);
- Handle<WasmInstanceObject> imported_instance(
- imported_function->instance(), isolate_);
- FunctionSig* imported_sig =
- imported_instance->module()
- ->functions[imported_function->function_index()]
- .sig;
- if (*imported_sig != *expected_sig) {
+ auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig);
+ switch (kind) {
+ case compiler::WasmImportCallKind::kLinkError:
ReportLinkError(
"imported function does not match the expected type", index,
module_name, import_name);
return -1;
+ case compiler::WasmImportCallKind::kWasmToWasm: {
+ // The imported function is a WASM function from another instance.
+ auto imported_function = Handle<WasmExportedFunction>::cast(value);
+ Handle<WasmInstanceObject> imported_instance(
+ imported_function->instance(), isolate_);
+ // The import reference is the instance object itself.
+ Address imported_target = imported_function->GetWasmCallTarget();
+ ImportedFunctionEntry entry(instance, func_index);
+ entry.set_wasm_to_wasm(*imported_instance, imported_target);
+ break;
+ }
+ default: {
+ // The imported function is a callable.
+ Handle<Code> wrapper_code =
+ compiler::CompileWasmImportCallWrapper(
+ isolate_, kind, expected_sig, func_index, module_->origin,
+ use_trap_handler())
+ .ToHandleChecked();
+ RecordStats(*wrapper_code, isolate_->counters());
+
+ WasmCode* wasm_code =
+ native_module->AddImportWrapper(wrapper_code, func_index);
+ ImportedFunctionEntry entry(instance, func_index);
+ entry.set_wasm_to_js(*js_receiver, wasm_code);
+ break;
}
- // The import reference is the instance object itself.
- Address imported_target = imported_function->GetWasmCallTarget();
- ImportedFunctionEntry entry(instance, func_index);
- entry.set_wasm_to_wasm(*imported_instance, imported_target);
- } else {
- // The imported function is a callable.
- Handle<JSReceiver> js_receiver(JSReceiver::cast(*value), isolate_);
- Handle<Code> wrapper_code =
- compiler::CompileWasmToJSWrapper(
- isolate_, js_receiver, expected_sig, func_index,
- module_->origin, use_trap_handler())
- .ToHandleChecked();
- RecordStats(*wrapper_code, isolate_->counters());
-
- WasmCode* wasm_code = native_module->AddCodeCopy(
- wrapper_code, WasmCode::kWasmToJsWrapper, func_index);
- ImportedFunctionEntry entry(instance, func_index);
- entry.set_wasm_to_js(*js_receiver, wasm_code);
}
num_imported_functions++;
break;
@@ -1619,8 +1624,8 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
// memory_ should have already been assigned in Build().
DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
- uint32_t imported_cur_pages = static_cast<uint32_t>(
- buffer->byte_length()->Number() / kWasmPageSize);
+ uint32_t imported_cur_pages =
+ static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
if (imported_cur_pages < module_->initial_pages) {
thrower_->LinkError(
"memory import %d is smaller than initial %u, got %u", index,
@@ -1740,6 +1745,26 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
}
break;
}
+ case kExternalException: {
+ if (!value->IsWasmExceptionObject()) {
+ ReportLinkError("exception import requires a WebAssembly.Exception",
+ index, module_name, import_name);
+ return -1;
+ }
+ Handle<WasmExceptionObject> imported_exception =
+ Handle<WasmExceptionObject>::cast(value);
+ if (!imported_exception->IsSignatureEqual(
+ module_->exceptions[import.index].sig)) {
+ ReportLinkError("imported exception does not match the expected type",
+ index, module_name, import_name);
+ return -1;
+ }
+ Object* exception_tag = imported_exception->exception_tag();
+ DCHECK(instance->exceptions_table()->get(import.index)->IsUndefined());
+ instance->exceptions_table()->set(import.index, exception_tag);
+ exception_wrappers_[import.index] = imported_exception;
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -1833,7 +1858,7 @@ bool InstanceBuilder::NeedsWrappers() const {
}
// Process the exports, creating wrappers for functions, tables, memories,
-// and globals.
+// globals, and exceptions.
void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<FixedArray> export_wrappers(module_object_->export_wrappers(),
isolate_);
@@ -1964,9 +1989,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Address global_addr =
instance->imported_mutable_globals()[global.index];
- uint32_t buffer_size = 0;
- CHECK(buffer->byte_length()->ToUint32(&buffer_size));
-
+ size_t buffer_size = buffer->byte_length();
Address backing_store =
reinterpret_cast<Address>(buffer->backing_store());
CHECK(global_addr >= backing_store &&
@@ -2011,6 +2034,20 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
break;
}
+ case kExternalException: {
+ const WasmException& exception = module_->exceptions[exp.index];
+ Handle<WasmExceptionObject> wrapper = exception_wrappers_[exp.index];
+ if (wrapper.is_null()) {
+ Handle<HeapObject> exception_tag(
+ HeapObject::cast(instance->exceptions_table()->get(exp.index)),
+ isolate_);
+ wrapper =
+ WasmExceptionObject::New(isolate_, exception.sig, exception_tag);
+ exception_wrappers_[exp.index] = wrapper;
+ }
+ desc.set_value(wrapper);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -2090,7 +2127,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
Handle<Code> wrapper_code =
js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, native_module, func_index, use_trap_handler());
+ isolate_, function->sig, function->imported);
MaybeHandle<String> func_name;
if (module_->origin == kAsmJsOrigin) {
// For modules arising from asm.js, honor the names section.
@@ -2127,6 +2164,20 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
}
}
+void InstanceBuilder::InitializeExceptions(
+ Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate_);
+ for (int index = 0; index < exceptions_table->length(); ++index) {
+ if (!exceptions_table->get(index)->IsUndefined(isolate_)) continue;
+ // TODO(mstarzinger): Tags provide an object identity for each exception,
+ // using {JSObject} here is gigantic hack and we should use a dedicated
+ // object with a much lighter footprint for this purpose here.
+ Handle<HeapObject> exception_tag =
+ isolate_->factory()->NewJSObjectWithNullProto();
+ exceptions_table->set(index, *exception_tag);
+ }
+}
+
AsyncCompileJob::AsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
@@ -2218,6 +2269,12 @@ void AsyncCompileJob::FinishCompile() {
DCHECK_NOT_NULL(isolate_->context());
// Finish the wasm script now and make it public to the debugger.
Handle<Script> script(module_object_->script(), isolate_);
+ if (script->type() == Script::TYPE_WASM &&
+ module_object_->module()->source_map_url.size() != 0) {
+ MaybeHandle<String> src_map_str = isolate_->factory()->NewStringFromUtf8(
+ CStrVector(module_object_->module()->source_map_url.c_str()), TENURED);
+ script->set_source_mapping_url(*src_map_str.ToHandleChecked());
+ }
isolate_->debug()->OnAfterCompile(script);
// Log the code within the generated module for profiling.
@@ -2247,7 +2304,7 @@ void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
// task) and schedule the next step(s), if any.
class AsyncCompileJob::CompileStep {
public:
- virtual ~CompileStep() {}
+ virtual ~CompileStep() = default;
void Run(bool on_foreground) {
if (on_foreground) {
@@ -2278,7 +2335,7 @@ class AsyncCompileJob::CompileTask : public CancelableTask {
job_(job),
on_foreground_(on_foreground) {}
- ~CompileTask() {
+ ~CompileTask() override {
if (job_ != nullptr && on_foreground_) ResetPendingForegroundTask();
}
@@ -2418,7 +2475,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
public:
PrepareAndStartCompile(std::shared_ptr<const WasmModule> module,
bool start_compilation)
- : module_(module), start_compilation_(start_compilation) {}
+ : module_(std::move(module)), start_compilation_(start_compilation) {}
private:
std::shared_ptr<const WasmModule> module_;
@@ -2439,10 +2496,11 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// Create heap objects for script and module bytes to be stored in the
// module object. Asm.js is not compiled asynchronously.
- Handle<Script> script = CreateWasmScript(job_->isolate_, job_->wire_bytes_);
+ const WasmModule* module = module_.get();
+ Handle<Script> script = CreateWasmScript(job_->isolate_, job_->wire_bytes_,
+ module->source_map_url);
Handle<ByteArray> asm_js_offset_table;
- const WasmModule* module = module_.get();
ModuleEnv env = CreateDefaultModuleEnv(module);
// TODO(wasm): Improve efficiency of storing module wire bytes. Only store
// relevant sections, not function bodies
@@ -2713,13 +2771,12 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process function body %d ...\n", next_function_);
- decoder_.DecodeFunctionBody(
- next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
+ decoder_.DecodeFunctionBody(
+ next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
- uint32_t index = next_function_ + decoder_.module()->num_imported_functions;
- const WasmFunction* func = &decoder_.module()->functions[index];
- WasmName name = {nullptr, 0};
- compilation_unit_builder_->AddUnit(func, offset, bytes, name);
+ uint32_t index = next_function_ + decoder_.module()->num_imported_functions;
+ const WasmFunction* func = &decoder_.module()->functions[index];
+ compilation_unit_builder_->AddUnit(func, offset, bytes);
++next_function_;
// This method always succeeds. The return value is necessary to comply with
// the StreamingProcessor interface.
@@ -2822,7 +2879,7 @@ void CompilationState::SetNumberOfFunctionsToCompile(size_t num_functions) {
void CompilationState::SetCallback(
std::function<void(CompilationEvent, ErrorThrower*)> callback) {
DCHECK_NULL(callback_);
- callback_ = callback;
+ callback_ = std::move(callback);
}
void CompilationState::AddCompilationUnits(
@@ -3014,13 +3071,12 @@ void CompileJsToWasmWrappers(Isolate* isolate,
int wrapper_index = 0;
Handle<FixedArray> export_wrappers(module_object->export_wrappers(), isolate);
NativeModule* native_module = module_object->native_module();
- UseTrapHandler use_trap_handler =
- native_module->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler;
const WasmModule* module = native_module->module();
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
+ auto& function = module->functions[exp.index];
Handle<Code> wrapper_code = js_to_wasm_cache.GetOrCompileJSToWasmWrapper(
- isolate, native_module, exp.index, use_trap_handler);
+ isolate, function.sig, function.imported);
export_wrappers->set(wrapper_index, *wrapper_code);
RecordStats(*wrapper_code, isolate->counters());
++wrapper_index;
@@ -3028,7 +3084,8 @@ void CompileJsToWasmWrappers(Isolate* isolate,
}
Handle<Script> CreateWasmScript(Isolate* isolate,
- const ModuleWireBytes& wire_bytes) {
+ const ModuleWireBytes& wire_bytes,
+ const std::string& source_map_url) {
Handle<Script> script =
isolate->factory()->NewScript(isolate->factory()->empty_string());
script->set_context_data(isolate->native_context()->debug_context_id());
@@ -3040,12 +3097,6 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
const int kBufferSize = 32;
char buffer[kBufferSize];
- int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
- DCHECK(url_chars >= 0 && url_chars < kBufferSize);
- MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
- TENURED);
- script->set_source_url(*url_str.ToHandleChecked());
int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
DCHECK(name_chars >= 0 && name_chars < kBufferSize);
@@ -3054,6 +3105,11 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
TENURED);
script->set_name(*name_str.ToHandleChecked());
+ if (source_map_url.size() != 0) {
+ MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
+ CStrVector(source_map_url.c_str()), TENURED);
+ script->set_source_mapping_url(*src_map_str.ToHandleChecked());
+ }
return script;
}
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 934c978d49..f108a5f939 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -63,7 +63,8 @@ void CompileJsToWasmWrappers(Isolate* isolate,
Handle<WasmModuleObject> module_object);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
- Isolate* isolate, const ModuleWireBytes& wire_bytes);
+ Isolate* isolate, const ModuleWireBytes& wire_bytes,
+ const std::string& source_map_url);
// Triggered by the WasmCompileLazy builtin.
// Returns the instruction start of the compiled code object.
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index db9cf45049..6b0a3d6485 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -30,6 +30,7 @@ namespace wasm {
namespace {
constexpr char kNameString[] = "name";
+constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kExceptionString[] = "exception";
constexpr char kUnknownString[] = "<unknown>";
@@ -48,6 +49,8 @@ const char* ExternalKindName(ImportExportKindCode kind) {
return "memory";
case kExternalGlobal:
return "global";
+ case kExternalException:
+ return "exception";
}
return "unknown";
}
@@ -82,6 +85,8 @@ const char* SectionName(SectionCode code) {
return "Data";
case kNameSectionCode:
return kNameString;
+ case kSourceMappingURLSectionCode:
+ return kSourceMappingURLString;
case kExceptionSectionCode:
return kExceptionString;
default:
@@ -219,7 +224,7 @@ class WasmSectionIterator {
}
if (section_code == kUnknownSectionCode) {
- // Check for the known "name" section.
+ // Check for the known "name" or "sourceMappingURL" section.
section_code =
ModuleDecoder::IdentifyUnknownSection(decoder_, section_end_);
// As a side effect, the above function will forward the decoder to after
@@ -262,7 +267,7 @@ class ModuleDecoderImpl : public Decoder {
}
}
- virtual void onFirstError() {
+ void onFirstError() override {
pc_ = end_; // On error, terminate section decoding loop.
}
@@ -337,7 +342,8 @@ class ModuleDecoderImpl : public Decoder {
static_cast<const void*>(bytes.end()));
// Check if the section is out-of-order.
- if (section_code < next_section_) {
+ if (section_code < next_ordered_section_ &&
+ section_code < kFirstUnorderedSection) {
errorf(pc(), "unexpected section: %s", SectionName(section_code));
return;
}
@@ -346,19 +352,31 @@ class ModuleDecoderImpl : public Decoder {
case kUnknownSectionCode:
break;
case kExceptionSectionCode:
- // Note: kExceptionSectionCode > kCodeSectionCode, but must appear
- // before the code section. Hence, treat it as a special case.
- if (++number_of_exception_sections > 1) {
+ // Note: kExceptionSectionCode > kExportSectionCode, but must appear
+ // before the export (and code) section, as well as after the import
+ // section. Hence, treat it as a special case.
+ if (seen_unordered_sections_ & (1 << kExceptionSectionCode)) {
errorf(pc(), "Multiple exception sections not allowed");
return;
- } else if (next_section_ >= kCodeSectionCode) {
- errorf(pc(), "Exception section must appear before the code section");
+ } else if (next_ordered_section_ > kExportSectionCode) {
+ errorf(pc(), "Exception section must appear before export section");
return;
+ } else if (next_ordered_section_ < kImportSectionCode) {
+ next_ordered_section_ = kImportSectionCode + 1;
}
+ seen_unordered_sections_ |= 1 << kExceptionSectionCode;
+ break;
+ case kSourceMappingURLSectionCode:
+ // sourceMappingURL is a custom section and currently can occur anywhere
+ // in the module. In case of multiple sourceMappingURL sections, all
+ // except the first occurrence are ignored.
+ case kNameSectionCode:
+ // TODO(titzer): report out of place name section as a warning.
+ // Be lenient with placement of name section. All except first
+ // occurrence are ignored.
break;
default:
- next_section_ = section_code;
- ++next_section_;
+ next_ordered_section_ = section_code + 1;
break;
}
@@ -401,6 +419,9 @@ class ModuleDecoderImpl : public Decoder {
case kNameSectionCode:
DecodeNameSection();
break;
+ case kSourceMappingURLSectionCode:
+ DecodeSourceMappingURLSection();
+ break;
case kExceptionSectionCode:
if (enabled_features_.eh) {
DecodeExceptionSection();
@@ -521,6 +542,17 @@ class ModuleDecoderImpl : public Decoder {
}
break;
}
+ case kExternalException: {
+ // ===== Imported exception ======================================
+ if (!enabled_features_.eh) {
+ errorf(pos, "unknown import kind 0x%02x", import->kind);
+ break;
+ }
+ import->index = static_cast<uint32_t>(module_->exceptions.size());
+ module_->exceptions.emplace_back(
+ consume_exception_sig(module_->signature_zone.get()));
+ break;
+ }
default:
errorf(pos, "unknown import kind 0x%02x", import->kind);
break;
@@ -655,6 +687,15 @@ class ModuleDecoderImpl : public Decoder {
}
break;
}
+ case kExternalException: {
+ if (!enabled_features_.eh) {
+ errorf(pos, "invalid export kind 0x%02x", exp->kind);
+ break;
+ }
+ WasmException* exception = nullptr;
+ exp->index = consume_exception_index(module_.get(), &exception);
+ break;
+ }
default:
errorf(pos, "invalid export kind 0x%02x", exp->kind);
break;
@@ -803,30 +844,48 @@ class ModuleDecoderImpl : public Decoder {
void DecodeNameSection() {
// TODO(titzer): find a way to report name errors as warnings.
- // Use an inner decoder so that errors don't fail the outer decoder.
- Decoder inner(start_, pc_, end_, buffer_offset_);
- // Decode all name subsections.
- // Be lenient with their order.
- while (inner.ok() && inner.more()) {
- uint8_t name_type = inner.consume_u8("name type");
- if (name_type & 0x80) inner.error("name type if not varuint7");
-
- uint32_t name_payload_len = inner.consume_u32v("name payload length");
- if (!inner.checkAvailable(name_payload_len)) break;
-
- // Decode module name, ignore the rest.
- // Function and local names will be decoded when needed.
- if (name_type == NameSectionKindCode::kModule) {
- WireBytesRef name = consume_string(inner, false, "module name");
- if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
- } else {
- inner.consume_bytes(name_payload_len, "name subsection payload");
+ // ignore all but the first occurrence of name section.
+ if (!(seen_unordered_sections_ & (1 << kNameSectionCode))) {
+ seen_unordered_sections_ |= 1 << kNameSectionCode;
+ // Use an inner decoder so that errors don't fail the outer decoder.
+ Decoder inner(start_, pc_, end_, buffer_offset_);
+ // Decode all name subsections.
+ // Be lenient with their order.
+ while (inner.ok() && inner.more()) {
+ uint8_t name_type = inner.consume_u8("name type");
+ if (name_type & 0x80) inner.error("name type if not varuint7");
+
+ uint32_t name_payload_len = inner.consume_u32v("name payload length");
+ if (!inner.checkAvailable(name_payload_len)) break;
+
+ // Decode module name, ignore the rest.
+ // Function and local names will be decoded when needed.
+ if (name_type == NameSectionKindCode::kModule) {
+ WireBytesRef name = consume_string(inner, false, "module name");
+ if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
+ } else {
+ inner.consume_bytes(name_payload_len, "name subsection payload");
+ }
}
}
// Skip the whole names section in the outer decoder.
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
+ void DecodeSourceMappingURLSection() {
+ Decoder inner(start_, pc_, end_, buffer_offset_);
+ WireBytesRef url = wasm::consume_string(inner, true, "module name");
+ if (inner.ok() &&
+ !(seen_unordered_sections_ & (1 << kSourceMappingURLSectionCode))) {
+ const byte* url_start =
+ inner.start() + inner.GetBufferRelativeOffset(url.offset());
+ module_->source_map_url.assign(reinterpret_cast<const char*>(url_start),
+ url.length());
+ seen_unordered_sections_ |= 1 << kSourceMappingURLSectionCode;
+ }
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+
void DecodeExceptionSection() {
uint32_t exception_count =
consume_count("exception count", kV8MaxWasmExceptions);
@@ -935,13 +994,18 @@ class ModuleDecoderImpl : public Decoder {
std::shared_ptr<WasmModule> module_;
Counters* counters_ = nullptr;
// The type section is the first section in a module.
- uint8_t next_section_ = kFirstSectionInModule;
- uint32_t number_of_exception_sections = 0;
- // We store next_section_ as uint8_t instead of SectionCode so that we can
- // increment it. This static_assert should make sure that SectionCode does not
- // get bigger than uint8_t accidentially.
- static_assert(sizeof(ModuleDecoderImpl::next_section_) == sizeof(SectionCode),
+ uint8_t next_ordered_section_ = kFirstSectionInModule;
+ // We store next_ordered_section_ as uint8_t instead of SectionCode so that we
+ // can increment it. This static_assert should make sure that SectionCode does
+ // not get bigger than uint8_t accidentially.
+ static_assert(sizeof(ModuleDecoderImpl::next_ordered_section_) ==
+ sizeof(SectionCode),
"type mismatch");
+ uint32_t seen_unordered_sections_ = 0;
+ static_assert(kBitsPerByte *
+ sizeof(ModuleDecoderImpl::seen_unordered_sections_) >
+ kLastKnownModuleSection,
+ "not enough bits");
Result<bool> intermediate_result_;
ModuleOrigin origin_;
@@ -1108,6 +1172,10 @@ class ModuleDecoderImpl : public Decoder {
return consume_index("table index", module->tables, table);
}
+ uint32_t consume_exception_index(WasmModule* module, WasmException** except) {
+ return consume_index("exception index", module->exceptions, except);
+ }
+
template <typename T>
uint32_t consume_index(const char* name, std::vector<T>& vector, T** ptr) {
const byte* pos = pc_;
@@ -1482,6 +1550,11 @@ SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
strncmp(reinterpret_cast<const char*>(section_name_start), kNameString,
num_chars(kNameString)) == 0) {
return kNameSectionCode;
+ } else if (string.length() == num_chars(kSourceMappingURLString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kSourceMappingURLString,
+ num_chars(kSourceMappingURLString)) == 0) {
+ return kSourceMappingURLSectionCode;
}
return kUnknownSectionCode;
}
@@ -1609,7 +1682,7 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
namespace {
-bool FindSection(Decoder& decoder, SectionCode section_code) {
+bool FindNameSection(Decoder& decoder) {
static constexpr int kModuleHeaderSize = 8;
decoder.consume_bytes(kModuleHeaderSize, "module header");
@@ -1634,7 +1707,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
DCHECK(names->empty());
Decoder decoder(module_start, module_end);
- if (!FindSection(decoder, kNameSectionCode)) return;
+ if (!FindNameSection(decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -1669,7 +1742,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
DCHECK(result->names.empty());
Decoder decoder(module_start, module_end);
- if (!FindSection(decoder, kNameSectionCode)) return;
+ if (!FindNameSection(decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
diff --git a/deps/v8/src/wasm/object-access.h b/deps/v8/src/wasm/object-access.h
new file mode 100644
index 0000000000..0f4a4d447d
--- /dev/null
+++ b/deps/v8/src/wasm/object-access.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OBJECT_ACCESS_H_
+#define V8_WASM_OBJECT_ACCESS_H_
+
+#include "src/globals.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/shared-function-info.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class ObjectAccess : public AllStatic {
+ public:
+ // Convert an offset into an object to an offset into a tagged object.
+ static constexpr int ToTagged(int offset) { return offset - kHeapObjectTag; }
+
+ // Get the offset into a fixed array for a given {index}.
+ static constexpr int ElementOffsetInTaggedFixedArray(int index) {
+ return ToTagged(FixedArray::OffsetOfElementAt(index));
+ }
+
+ // Get the offset of the context stored in a {JSFunction} object.
+ static constexpr int ContextOffsetInTaggedJSFunction() {
+ return ToTagged(JSFunction::kContextOffset);
+ }
+
+ // Get the offset of the shared function info in a {JSFunction} object.
+ static constexpr int SharedFunctionInfoOffsetInTaggedJSFunction() {
+ return ToTagged(JSFunction::kSharedFunctionInfoOffset);
+ }
+
+ // Get the offset of the formal parameter count in a {SharedFunctionInfo}
+ // object.
+ static constexpr int FormalParameterCountOffsetInSharedFunctionInfo() {
+ return ToTagged(SharedFunctionInfo::kFormalParameterCountOffset);
+ }
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_OBJECT_ACCESS_H_
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 8522b3a500..d34bc4bca9 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -21,6 +21,7 @@ enum ValueType : uint8_t {
kWasmS128,
kWasmAnyRef,
kWasmAnyFunc,
+ kWasmExceptRef,
kWasmVar,
};
@@ -220,6 +221,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
return kLocalS128;
case kWasmAnyRef:
return kLocalAnyRef;
+ case kWasmExceptRef:
+ return kLocalExceptRef;
case kWasmStmt:
return kLocalVoid;
default:
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index c2c425a44e..6495421b8f 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/assembler-inl.h"
+#include "src/base/adapters.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/codegen.h"
@@ -23,7 +24,7 @@
#define TRACE_HEAP(...) \
do { \
- if (FLAG_wasm_trace_native_heap) PrintF(__VA_ARGS__); \
+ if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
} while (false)
namespace v8 {
@@ -44,53 +45,56 @@ struct WasmCodeUniquePtrComparator {
} // namespace
-void DisjointAllocationPool::Merge(AddressRange range) {
- auto dest_it = ranges_.begin();
- auto dest_end = ranges_.end();
+void DisjointAllocationPool::Merge(base::AddressRegion region) {
+ auto dest_it = regions_.begin();
+ auto dest_end = regions_.end();
- // Skip over dest ranges strictly before {range}.
- while (dest_it != dest_end && dest_it->end < range.start) ++dest_it;
+ // Skip over dest regions strictly before {region}.
+ while (dest_it != dest_end && dest_it->end() < region.begin()) ++dest_it;
- // After last dest range: insert and done.
+ // After last dest region: insert and done.
if (dest_it == dest_end) {
- ranges_.push_back(range);
+ regions_.push_back(region);
return;
}
// Adjacent (from below) to dest: merge and done.
- if (dest_it->start == range.end) {
- dest_it->start = range.start;
+ if (dest_it->begin() == region.end()) {
+ base::AddressRegion merged_region{region.begin(),
+ region.size() + dest_it->size()};
+ DCHECK_EQ(merged_region.end(), dest_it->end());
+ *dest_it = merged_region;
return;
}
// Before dest: insert and done.
- if (dest_it->start > range.end) {
- ranges_.insert(dest_it, range);
+ if (dest_it->begin() > region.end()) {
+ regions_.insert(dest_it, region);
return;
}
- // Src is adjacent from above. Merge and check whether the merged range is now
- // adjacent to the next range.
- DCHECK_EQ(dest_it->end, range.start);
- dest_it->end = range.end;
+ // Src is adjacent from above. Merge and check whether the merged region is
+ // now adjacent to the next region.
+ DCHECK_EQ(dest_it->end(), region.begin());
+ dest_it->set_size(dest_it->size() + region.size());
+ DCHECK_EQ(dest_it->end(), region.end());
auto next_dest = dest_it;
++next_dest;
- if (next_dest != dest_end && dest_it->end == next_dest->start) {
- dest_it->end = next_dest->end;
- ranges_.erase(next_dest);
+ if (next_dest != dest_end && dest_it->end() == next_dest->begin()) {
+ dest_it->set_size(dest_it->size() + next_dest->size());
+ DCHECK_EQ(dest_it->end(), next_dest->end());
+ regions_.erase(next_dest);
}
}
-AddressRange DisjointAllocationPool::Allocate(size_t size) {
- for (auto it = ranges_.begin(), end = ranges_.end(); it != end; ++it) {
- size_t range_size = it->size();
- if (size > range_size) continue;
- AddressRange ret{it->start, it->start + size};
- if (size == range_size) {
- ranges_.erase(it);
+base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
+ for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
+ if (size > it->size()) continue;
+ base::AddressRegion ret{it->begin(), size};
+ if (size == it->size()) {
+ regions_.erase(it);
} else {
- it->start += size;
- DCHECK_LT(it->start, it->end);
+ *it = base::AddressRegion{it->begin() + size, it->size() - size};
}
return ret;
}
@@ -141,31 +145,53 @@ bool WasmCode::ShouldBeLogged(Isolate* isolate) {
void WasmCode::LogCode(Isolate* isolate) const {
DCHECK(ShouldBeLogged(isolate));
if (IsAnonymous()) return;
+
ModuleWireBytes wire_bytes(native_module()->wire_bytes());
// TODO(herhut): Allow to log code without on-heap round-trip of the name.
ModuleEnv* module_env = GetModuleEnv(native_module()->compilation_state());
WireBytesRef name_ref =
module_env->module->LookupFunctionName(wire_bytes, index());
- WasmName name_vec = wire_bytes.GetName(name_ref);
- MaybeHandle<String> maybe_name =
- isolate->factory()->NewStringFromUtf8(Vector<const char>::cast(name_vec));
- Handle<String> name;
- if (!maybe_name.ToHandle(&name)) {
- name = isolate->factory()->NewStringFromAsciiChecked("<name too long>");
+ WasmName name_vec = wire_bytes.GetNameOrNull(name_ref);
+ if (!name_vec.is_empty()) {
+ MaybeHandle<String> maybe_name = isolate->factory()->NewStringFromUtf8(
+ Vector<const char>::cast(name_vec));
+ Handle<String> name;
+ if (!maybe_name.ToHandle(&name)) {
+ name = isolate->factory()->NewStringFromAsciiChecked("<name too long>");
+ }
+ int name_length;
+ auto cname =
+ name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
+ RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
+ PROFILE(isolate,
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
+ {cname.get(), static_cast<size_t>(name_length)}));
+ } else {
+ EmbeddedVector<char, 32> generated_name;
+ SNPrintF(generated_name, "wasm-function[%d]", index());
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
+ generated_name));
}
- int name_length;
- auto cname =
- name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
- RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
- PROFILE(isolate,
- CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
- {cname.get(), static_cast<size_t>(name_length)}));
+
if (!source_positions().is_empty()) {
LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
source_positions()));
}
}
+const char* WasmCode::GetRuntimeStubName() const {
+ DCHECK_EQ(WasmCode::kRuntimeStub, kind());
+#define RETURN_NAME(Name) \
+ if (native_module_->runtime_stub_table_[WasmCode::k##Name] == this) { \
+ return #Name; \
+ }
+#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
+ WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
+#undef RETURN_NAME_TRAP
+#undef RETURN_NAME
+ return "<unknown>";
+}
+
void WasmCode::Validate() const {
#ifdef DEBUG
// We expect certain relocation info modes to never appear in {WasmCode}
@@ -309,22 +335,20 @@ WasmCode::~WasmCode() {
}
NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
- bool can_request_more, VirtualMemory* code_space,
+ bool can_request_more, VirtualMemory code_space,
WasmCodeManager* code_manager,
std::shared_ptr<const WasmModule> module,
const ModuleEnv& env)
: enabled_features_(enabled),
module_(std::move(module)),
compilation_state_(NewCompilationState(isolate, env)),
- free_code_space_({code_space->address(), code_space->end()}),
+ free_code_space_(code_space.region()),
wasm_code_manager_(code_manager),
can_request_more_memory_(can_request_more),
use_trap_handler_(env.use_trap_handler) {
DCHECK_EQ(module_.get(), env.module);
DCHECK_NOT_NULL(module_);
- VirtualMemory my_mem;
- owned_code_space_.push_back(my_mem);
- owned_code_space_.back().TakeControl(code_space);
+ owned_code_space_.emplace_back(std::move(code_space));
owned_code_.reserve(num_functions());
uint32_t num_wasm_functions = module_->num_declared_functions;
@@ -359,9 +383,9 @@ void NativeModule::LogWasmCodes(Isolate* isolate) {
}
WasmCode* NativeModule::AddOwnedCode(
- Maybe<uint32_t> index, Vector<const byte> instructions,
- uint32_t stack_slots, size_t safepoint_table_offset,
- size_t handler_table_offset, size_t constant_pool_offset,
+ uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
+ size_t safepoint_table_offset, size_t handler_table_offset,
+ size_t constant_pool_offset,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
@@ -371,18 +395,13 @@ WasmCode* NativeModule::AddOwnedCode(
// Both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
- Address executable_buffer = AllocateForCode(instructions.size());
- if (executable_buffer == kNullAddress) {
- V8::FatalProcessOutOfMemory(nullptr, "NativeModule::AddOwnedCode");
- UNREACHABLE();
- }
+ Vector<byte> executable_buffer = AllocateForCode(instructions.size());
// Ownership will be transferred to {owned_code_} below.
- code = new WasmCode(
- this, index,
- {reinterpret_cast<byte*>(executable_buffer), instructions.size()},
- stack_slots, safepoint_table_offset, handler_table_offset,
- constant_pool_offset, std::move(protected_instructions),
- std::move(reloc_info), std::move(source_position_table), kind, tier);
+ code = new WasmCode(this, index, executable_buffer, stack_slots,
+ safepoint_table_offset, handler_table_offset,
+ constant_pool_offset, std::move(protected_instructions),
+ std::move(reloc_info), std::move(source_position_table),
+ kind, tier);
if (owned_code_.empty() ||
code->instruction_start() > owned_code_.back()->instruction_start()) {
@@ -405,22 +424,25 @@ WasmCode* NativeModule::AddOwnedCode(
return code;
}
-WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
- uint32_t index) {
+WasmCode* NativeModule::AddImportWrapper(Handle<Code> code, uint32_t index) {
// TODO(wasm): Adding instance-specific wasm-to-js wrappers as owned code to
// this NativeModule is a memory leak until the whole NativeModule dies.
- WasmCode* ret = AddAnonymousCode(code, kind);
- ret->index_ = Just(index);
- if (index >= module_->num_imported_functions) set_code(index, ret);
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kWasmToJsWrapper);
+ DCHECK_LT(index, module_->num_imported_functions);
+ ret->index_ = index;
return ret;
}
WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
- ret->index_ = Just(index);
+ ret->index_ = index;
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
- PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
- set_code(index, ret);
+ InstallCode(ret);
+ return ret;
+}
+
+WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kFunction);
return ret;
}
@@ -445,15 +467,15 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
#define COPY_BUILTIN(Name) \
runtime_stub_table_[WasmCode::k##Name] = \
AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \
- WasmCode::kRuntimeStub);
+ WasmCode::kRuntimeStub, #Name);
#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
- WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP);
+ WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
#undef COPY_BUILTIN_TRAP
#undef COPY_BUILTIN
}
-WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
- WasmCode::Kind kind) {
+WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
+ const char* name) {
// For off-heap builtins, we create a copy of the off-heap instruction stream
// instead of the on-heap code object containing the trampoline. Ensure that
// we do not apply the on-heap reloc info to the off-heap instructions.
@@ -473,17 +495,17 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
int safepoint_table_offset =
code->has_safepoint_info() ? code->safepoint_table_offset() : 0;
WasmCode* ret =
- AddOwnedCode(Nothing<uint32_t>(), // index
- instructions, // instructions
- stack_slots, // stack_slots
- safepoint_table_offset, // safepoint_table_offset
- code->handler_table_offset(), // handler_table_offset
- code->constant_pool_offset(), // constant_pool_offset
- {}, // protected_instructions
- std::move(reloc_info), // reloc_info
- std::move(source_pos), // source positions
- kind, // kind
- WasmCode::kOther); // tier
+ AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
+ instructions, // instructions
+ stack_slots, // stack_slots
+ safepoint_table_offset, // safepoint_table_offset
+ code->handler_table_offset(), // handler_table_offset
+ code->constant_pool_offset(), // constant_pool_offset
+ {}, // protected_instructions
+ std::move(reloc_info), // reloc_info
+ std::move(source_pos), // source positions
+ kind, // kind
+ WasmCode::kOther); // tier
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = ret->instruction_start() - code->InstructionStart();
@@ -510,7 +532,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
- if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
+ if (FLAG_print_code || FLAG_print_wasm_code) ret->Print(name);
ret->Validate();
return ret;
}
@@ -523,12 +545,12 @@ WasmCode* NativeModule::AddCode(
OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
desc.reloc_size);
- WasmCode* ret = AddOwnedCode(
- Just(index), {desc.buffer, static_cast<size_t>(desc.instr_size)},
- stack_slots, safepoint_table_offset, handler_table_offset,
- desc.instr_size - desc.constant_pool_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos_table), WasmCode::kFunction, tier);
+ WasmCode* ret =
+ AddOwnedCode(index, {desc.buffer, static_cast<size_t>(desc.instr_size)},
+ stack_slots, safepoint_table_offset, handler_table_offset,
+ desc.instr_size - desc.constant_pool_size,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_pos_table), WasmCode::kFunction, tier);
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = ret->instructions().start() - desc.buffer;
@@ -571,17 +593,17 @@ WasmCode* NativeModule::AddDeserializedCode(
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
- WasmCode* code = AddOwnedCode(
- Just(index), instructions, stack_slots, safepoint_table_offset,
- handler_table_offset, constant_pool_offset,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_position_table), WasmCode::kFunction, tier);
+ WasmCode* code =
+ AddOwnedCode(index, instructions, stack_slots, safepoint_table_offset,
+ handler_table_offset, constant_pool_offset,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_position_table), WasmCode::kFunction, tier);
if (!code->protected_instructions_.is_empty()) {
code->RegisterTrapHandlerData();
}
- set_code(index, code);
- PatchJumpTable(index, code->instruction_start(), WasmCode::kFlushICache);
+ base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ InstallCode(code);
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
return code;
@@ -598,10 +620,7 @@ void NativeModule::PublishCode(WasmCode* code) {
if (!code->protected_instructions_.is_empty()) {
code->RegisterTrapHandlerData();
}
- DCHECK(!code->IsAnonymous());
- set_code(code->index(), code);
- PatchJumpTable(code->index(), code->instruction_start(),
- WasmCode::kFlushICache);
+ InstallCode(code);
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
@@ -618,52 +637,66 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
OwnedVector<byte> instructions = OwnedVector<byte>::New(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
memset(instructions.start(), 0, instructions.size());
- return AddOwnedCode(Nothing<uint32_t>(), // index
- instructions.as_vector(), // instructions
- 0, // stack_slots
- 0, // safepoint_table_offset
- 0, // handler_table_offset
- 0, // constant_pool_offset
- {}, // protected_instructions
- {}, // reloc_info
- {}, // source_pos
- WasmCode::kJumpTable, // kind
- WasmCode::kOther); // tier
+ return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
+ instructions.as_vector(), // instructions
+ 0, // stack_slots
+ 0, // safepoint_table_offset
+ 0, // handler_table_offset
+ 0, // constant_pool_offset
+ {}, // protected_instructions
+ {}, // reloc_info
+ {}, // source_pos
+ WasmCode::kJumpTable, // kind
+ WasmCode::kOther); // tier
}
-void NativeModule::PatchJumpTable(uint32_t func_index, Address target,
- WasmCode::FlushICache flush_icache) {
- DCHECK_LE(module_->num_imported_functions, func_index);
- uint32_t slot_idx = func_index - module_->num_imported_functions;
+void NativeModule::InstallCode(WasmCode* code) {
+ DCHECK_LT(code->index(), num_functions());
+ DCHECK_LE(module_->num_imported_functions, code->index());
+
+ // Update code table.
+ code_table_[code->index() - module_->num_imported_functions] = code;
+
+ // Patch jump table.
+ uint32_t slot_idx = code->index() - module_->num_imported_functions;
JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
- slot_idx, target, flush_icache);
+ slot_idx, code->instruction_start(),
+ WasmCode::kFlushICache);
}
-Address NativeModule::AllocateForCode(size_t size) {
+Vector<byte> NativeModule::AllocateForCode(size_t size) {
+ DCHECK_LT(0, size);
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// This happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
- AddressRange mem = free_code_space_.Allocate(size);
+ base::AddressRegion mem = free_code_space_.Allocate(size);
if (mem.is_empty()) {
- if (!can_request_more_memory_) return kNullAddress;
+ if (!can_request_more_memory_) {
+ V8::FatalProcessOutOfMemory(nullptr,
+ "NativeModule::AllocateForCode reservation");
+ UNREACHABLE();
+ }
Address hint = owned_code_space_.empty() ? kNullAddress
: owned_code_space_.back().end();
- VirtualMemory empty_mem;
- owned_code_space_.push_back(empty_mem);
- VirtualMemory& new_mem = owned_code_space_.back();
- wasm_code_manager_->TryAllocate(size, &new_mem,
- reinterpret_cast<void*>(hint));
- if (!new_mem.IsReserved()) return kNullAddress;
- base::LockGuard<base::Mutex> lock(
- &wasm_code_manager_->native_modules_mutex_);
+
+ VirtualMemory new_mem =
+ wasm_code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
+ if (!new_mem.IsReserved()) {
+ V8::FatalProcessOutOfMemory(nullptr,
+ "NativeModule::AllocateForCode reservation");
+ UNREACHABLE();
+ }
wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
- free_code_space_.Merge({new_mem.address(), new_mem.end()});
+ free_code_space_.Merge(new_mem.region());
+ owned_code_space_.emplace_back(std::move(new_mem));
mem = free_code_space_.Allocate(size);
- if (mem.is_empty()) return kNullAddress;
+ DCHECK(!mem.is_empty());
}
- Address commit_start = RoundUp(mem.start, AllocatePageSize());
- Address commit_end = RoundUp(mem.end, AllocatePageSize());
+ const Address page_size = page_allocator->AllocatePageSize();
+ Address commit_start = RoundUp(mem.begin(), page_size);
+ Address commit_end = RoundUp(mem.end(), page_size);
// {commit_start} will be either mem.start or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
@@ -673,38 +706,43 @@ Address NativeModule::AllocateForCode(size_t size) {
// start is already committed (or we start at the beginning of a page).
// The end needs to be committed all through the end of the page.
if (commit_start < commit_end) {
+ committed_code_space_.fetch_add(commit_end - commit_start);
+ // Committed code cannot grow bigger than maximum code space size.
+ DCHECK_LE(committed_code_space_.load(), kMaxWasmCodeMemory);
#if V8_OS_WIN
- // On Windows, we cannot commit a range that straddles different
+ // On Windows, we cannot commit a region that straddles different
// reservations of virtual memory. Because we bump-allocate, and because, if
// we need more memory, we append that memory at the end of the
// owned_code_space_ list, we traverse that list in reverse order to find
// the reservation(s) that guide how to chunk the region to commit.
- for (auto it = owned_code_space_.crbegin(),
- rend = owned_code_space_.crend();
- it != rend && commit_start < commit_end; ++it) {
- if (commit_end > it->end() || it->address() >= commit_end) continue;
- Address start = std::max(commit_start, it->address());
- size_t commit_size = static_cast<size_t>(commit_end - start);
- DCHECK(IsAligned(commit_size, AllocatePageSize()));
+ for (auto& vmem : base::Reversed(owned_code_space_)) {
+ if (commit_end <= vmem.address() || vmem.end() <= commit_start) continue;
+ Address start = std::max(commit_start, vmem.address());
+ Address end = std::min(commit_end, vmem.end());
+ size_t commit_size = static_cast<size_t>(end - start);
if (!wasm_code_manager_->Commit(start, commit_size)) {
- return kNullAddress;
+ V8::FatalProcessOutOfMemory(nullptr,
+ "NativeModule::AllocateForCode commit");
+ UNREACHABLE();
}
- committed_code_space_.fetch_add(commit_size);
- commit_end = start;
+ // Opportunistically reduce the commit range. This might terminate the
+ // loop early.
+ if (commit_start == start) commit_start = end;
+ if (commit_end == end) commit_end = start;
+ if (commit_start >= commit_end) break;
}
#else
- size_t commit_size = static_cast<size_t>(commit_end - commit_start);
- DCHECK(IsAligned(commit_size, AllocatePageSize()));
- if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
- return kNullAddress;
+ if (!wasm_code_manager_->Commit(commit_start, commit_end - commit_start)) {
+ V8::FatalProcessOutOfMemory(nullptr,
+ "NativeModule::AllocateForCode commit");
+ UNREACHABLE();
}
- committed_code_space_.fetch_add(commit_size);
#endif
}
- DCHECK(IsAligned(mem.start, kCodeAlignment));
- allocated_code_space_.Merge(std::move(mem));
- TRACE_HEAP("Code alloc for %p: %" PRIuPTR ",+%zu\n", this, mem.start, size);
- return mem.start;
+ DCHECK(IsAligned(mem.begin(), kCodeAlignment));
+ allocated_code_space_.Merge(mem);
+ TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, mem.begin(), size);
+ return {reinterpret_cast<byte*>(mem.begin()), mem.size()};
}
WasmCode* NativeModule::Lookup(Address pc) const {
@@ -787,7 +825,8 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
? PageAllocator::kReadWrite
: PageAllocator::kReadWriteExecute;
- bool ret = SetPermissions(start, size, permission);
+ bool ret =
+ SetPermissions(GetPlatformPageAllocator(), start, size, permission);
TRACE_HEAP("Setting rw permissions for %p:%p\n",
reinterpret_cast<void*>(start),
reinterpret_cast<void*>(start + size));
@@ -802,24 +841,37 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
void WasmCodeManager::AssignRanges(Address start, Address end,
NativeModule* native_module) {
+ base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
+}
+
+void WasmCodeManager::AssignRangesAndAddModule(Address start, Address end,
+ NativeModule* native_module) {
+ base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
+ native_modules_.emplace(native_module);
}
-void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
+VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
- size = RoundUp(size, AllocatePageSize());
- DCHECK(!ret->IsReserved());
- if (!memory_tracker_->ReserveAddressSpace(size)) return;
- if (hint == nullptr) hint = GetRandomMmapAddr();
-
- if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
- hint, ret)) {
- DCHECK(!ret->IsReserved());
+ size = RoundUp(size, page_allocator->AllocatePageSize());
+ if (!memory_tracker_->ReserveAddressSpace(size,
+ WasmMemoryTracker::kHardLimit)) {
+ return {};
+ }
+ if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
+
+ VirtualMemory mem(page_allocator, size, hint,
+ page_allocator->AllocatePageSize());
+ if (!mem.IsReserved()) {
memory_tracker_->ReleaseReservation(size);
+ return {};
}
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
- reinterpret_cast<void*>(ret->address()),
- reinterpret_cast<void*>(ret->end()), ret->size());
+ reinterpret_cast<void*>(mem.address()),
+ reinterpret_cast<void*>(mem.end()), mem.size());
+ return mem;
}
void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
@@ -897,7 +949,7 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
static constexpr int kAllocationRetries = 2;
VirtualMemory mem;
for (int retries = 0;; ++retries) {
- TryAllocate(vmem_size, &mem);
+ mem = TryAllocate(vmem_size);
if (mem.IsReserved()) break;
if (retries == kAllocationRetries) {
V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
@@ -911,13 +963,12 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Address start = mem.address();
size_t size = mem.size();
Address end = mem.end();
- std::unique_ptr<NativeModule> ret(new NativeModule(
- isolate, enabled, can_request_more, &mem, this, std::move(module), env));
+ std::unique_ptr<NativeModule> ret(
+ new NativeModule(isolate, enabled, can_request_more, std::move(mem), this,
+ std::move(module), env));
TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", this, start,
size);
- base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
- AssignRanges(start, end, ret.get());
- native_modules_.emplace(ret.get());
+ AssignRangesAndAddModule(start, end, ret.get());
return ret;
}
@@ -925,6 +976,8 @@ bool NativeModule::SetExecutable(bool executable) {
if (is_executable_ == executable) return true;
TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+
if (FLAG_wasm_write_protect_code_memory) {
PageAllocator::Permission permission =
executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
@@ -939,7 +992,8 @@ bool NativeModule::SetExecutable(bool executable) {
// committed or not.
if (can_request_more_memory_) {
for (auto& vmem : owned_code_space_) {
- if (!SetPermissions(vmem.address(), vmem.size(), permission)) {
+ if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
+ permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
@@ -949,16 +1003,18 @@ bool NativeModule::SetExecutable(bool executable) {
return true;
}
#endif
- for (auto& range : allocated_code_space_.ranges()) {
+ for (auto& region : allocated_code_space_.regions()) {
// allocated_code_space_ is fine-grained, so we need to
// page-align it.
- size_t range_size = RoundUp(range.size(), AllocatePageSize());
- if (!SetPermissions(range.start, range_size, permission)) {
+ size_t region_size =
+ RoundUp(region.size(), page_allocator->AllocatePageSize());
+ if (!SetPermissions(page_allocator, region.begin(), region_size,
+ permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n",
- reinterpret_cast<void*>(range.start),
- reinterpret_cast<void*>(range.end), executable);
+ reinterpret_cast<void*>(region.begin()),
+ reinterpret_cast<void*>(region.end()), executable);
}
}
is_executable_ = executable;
@@ -970,29 +1026,22 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
DCHECK_EQ(1, native_modules_.count(native_module));
native_modules_.erase(native_module);
TRACE_HEAP("Freeing NativeModule %p\n", this);
- for (auto& vmem : native_module->owned_code_space_) {
- lookup_map_.erase(vmem.address());
- Free(&vmem);
- DCHECK(!vmem.IsReserved());
+ for (auto& mem : native_module->owned_code_space_) {
+ DCHECK(mem.IsReserved());
+ TRACE_HEAP("VMem Release: %" PRIxPTR ":%" PRIxPTR " (%zu)\n", mem.address(),
+ mem.end(), mem.size());
+ lookup_map_.erase(mem.address());
+ memory_tracker_->ReleaseReservation(mem.size());
+ mem.Free();
+ DCHECK(!mem.IsReserved());
}
native_module->owned_code_space_.clear();
size_t code_size = native_module->committed_code_space_.load();
DCHECK(IsAligned(code_size, AllocatePageSize()));
remaining_uncommitted_code_space_.fetch_add(code_size);
-}
-
-// TODO(wasm): We can make this more efficient if needed. For
-// example, we can preface the first instruction with a pointer to
-// the WasmCode. In the meantime, we have a separate API so we can
-// easily identify those places where we know we have the first
-// instruction PC.
-WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
- WasmCode* code = LookupCode(pc);
- // This method can only be called for valid instruction start addresses.
- DCHECK_NOT_NULL(code);
- DCHECK_EQ(pc, code->instruction_start());
- return code;
+ // Remaining code space cannot grow bigger than maximum code space size.
+ DCHECK_LE(remaining_uncommitted_code_space_.load(), kMaxWasmCodeMemory);
}
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
@@ -1002,12 +1051,12 @@ NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
auto iter = lookup_map_.upper_bound(pc);
if (iter == lookup_map_.begin()) return nullptr;
--iter;
- Address range_start = iter->first;
- Address range_end = iter->second.first;
+ Address region_start = iter->first;
+ Address region_end = iter->second.first;
NativeModule* candidate = iter->second.second;
DCHECK_NOT_NULL(candidate);
- return range_start <= pc && pc < range_end ? candidate : nullptr;
+ return region_start <= pc && pc < region_end ? candidate : nullptr;
}
WasmCode* WasmCodeManager::LookupCode(Address pc) const {
@@ -1015,16 +1064,6 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
return candidate ? candidate->Lookup(pc) : nullptr;
}
-void WasmCodeManager::Free(VirtualMemory* mem) {
- DCHECK(mem->IsReserved());
- void* start = reinterpret_cast<void*>(mem->address());
- void* end = reinterpret_cast<void*>(mem->end());
- size_t size = mem->size();
- mem->Free();
- memory_tracker_->ReleaseReservation(size);
- TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size);
-}
-
size_t WasmCodeManager::remaining_uncommitted_code_space() const {
return remaining_uncommitted_code_space_.load();
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index ffcc05fbcd..65156b7457 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -18,6 +18,7 @@
#include "src/vector.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
@@ -32,47 +33,34 @@ class WasmCodeManager;
class WasmMemoryTracker;
struct WasmModule;
-struct AddressRange {
- Address start;
- Address end;
-
- AddressRange(Address s, Address e) : start(s), end(e) {
- DCHECK_LE(start, end);
- DCHECK_IMPLIES(start == kNullAddress, end == kNullAddress);
- }
- AddressRange() : AddressRange(kNullAddress, kNullAddress) {}
-
- size_t size() const { return static_cast<size_t>(end - start); }
- bool is_empty() const { return start == end; }
- operator bool() const { return start == kNullAddress; }
-};
-
-// Sorted, disjoint and non-overlapping memory ranges. A range is of the
+// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
public:
DisjointAllocationPool() = default;
- explicit DisjointAllocationPool(AddressRange range) : ranges_({range}) {}
+ explicit DisjointAllocationPool(base::AddressRegion region)
+ : regions_({region}) {}
DisjointAllocationPool(DisjointAllocationPool&& other) = default;
DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
- // Merge the parameter range into this object while preserving ordering of the
- // ranges. The assumption is that the passed parameter is not intersecting
- // this object - for example, it was obtained from a previous Allocate.
- void Merge(AddressRange);
+ // Merge the parameter region into this object while preserving ordering of
+ // the regions. The assumption is that the passed parameter is not
+ // intersecting this object - for example, it was obtained from a previous
+ // Allocate.
+ void Merge(base::AddressRegion);
- // Allocate a contiguous range of size {size}. Return an empty pool on
+ // Allocate a contiguous region of size {size}. Return an empty pool on
// failure.
- AddressRange Allocate(size_t size);
+ base::AddressRegion Allocate(size_t size);
- bool IsEmpty() const { return ranges_.empty(); }
- const std::list<AddressRange>& ranges() const { return ranges_; }
+ bool IsEmpty() const { return regions_.empty(); }
+ const std::list<base::AddressRegion>& regions() const { return regions_; }
private:
- std::list<AddressRange> ranges_;
+ std::list<base::AddressRegion> regions_;
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
};
@@ -113,9 +101,12 @@ class V8_EXPORT_PRIVATE WasmCode final {
return source_position_table_.as_vector();
}
- uint32_t index() const { return index_.ToChecked(); }
+ uint32_t index() const {
+ DCHECK(!IsAnonymous());
+ return index_;
+ }
// Anonymous functions are functions that don't carry an index.
- bool IsAnonymous() const { return index_.IsNothing(); }
+ bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
Kind kind() const { return kind_; }
NativeModule* native_module() const { return native_module_; }
Tier tier() const { return tier_; }
@@ -135,6 +126,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
return protected_instructions_.as_vector();
}
+ const char* GetRuntimeStubName() const;
+
void Validate() const;
void Print(const char* name = nullptr) const;
void Disassemble(const char* name, std::ostream& os,
@@ -150,7 +143,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
private:
friend class NativeModule;
- WasmCode(NativeModule* native_module, Maybe<uint32_t> index,
+ WasmCode(NativeModule* native_module, uint32_t index,
Vector<byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
size_t constant_pool_offset,
@@ -185,11 +178,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
// trap_handler_index.
void RegisterTrapHandlerData();
+ static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+ STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
+
Vector<byte> instructions_;
OwnedVector<const byte> reloc_info_;
OwnedVector<const byte> source_position_table_;
NativeModule* native_module_ = nullptr;
- Maybe<uint32_t> index_;
+ uint32_t index_;
Kind kind_;
size_t constant_pool_offset_ = 0;
uint32_t stack_slots_ = 0;
@@ -216,8 +212,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
static constexpr bool kCanAllocateMoreMemory = true;
#endif
- // {AddCode} is thread safe w.r.t. other calls to {AddCode} or {AddCodeCopy},
- // i.e. it can be called concurrently from background threads.
+ // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
+ // code below, i.e. it can be called concurrently from background threads.
WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
OwnedVector<trap_handler::ProtectedInstructionData>
@@ -234,16 +230,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
- // A way to copy over JS-allocated code. This is because we compile
- // certain wrappers using a different pipeline.
- WasmCode* AddCodeCopy(Handle<Code> code, WasmCode::Kind kind, uint32_t index);
+ // Add an import wrapper for wasm-to-JS transitions. This method copies over
+ // JS-allocated code, because we compile wrappers using a different pipeline.
+ WasmCode* AddImportWrapper(Handle<Code> code, uint32_t index);
- // Add an interpreter entry. For the same reason as AddCodeCopy, we
+ // Add an interpreter entry. For the same reason as AddImportWrapper, we
// currently compile these using a different pipeline and we can't get a
// CodeDesc here. When adding interpreter wrappers, we do not insert them in
// the code_table, however, we let them self-identify as the {index} function.
WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
+ // Adds anonymous code for testing purposes.
+ WasmCode* AddCodeForTesting(Handle<Code> code);
+
// When starting lazy compilation, provide the WasmLazyCompile builtin by
// calling SetLazyBuiltin. It will be copied into this NativeModule and the
// jump table will be populated with that copy.
@@ -346,18 +345,20 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class NativeModuleModificationScope;
NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
- bool can_request_more, VirtualMemory* code_space,
+ bool can_request_more, VirtualMemory code_space,
WasmCodeManager* code_manager,
std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
- WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
- Address AllocateForCode(size_t size);
+ WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
+ const char* name = nullptr);
+ // Allocate code space. Returns a valid buffer or fails with OOM (crash).
+ Vector<byte> AllocateForCode(size_t size);
// Primitive for adding code to the native module. All code added to a native
// module is owned by that module. Various callers get to decide on how the
// code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
// whether it has an index or is anonymous, etc.
- WasmCode* AddOwnedCode(Maybe<uint32_t> index, Vector<const byte> instructions,
+ WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
size_t constant_pool_offset,
@@ -368,18 +369,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
- void PatchJumpTable(uint32_t func_index, Address target,
- WasmCode::FlushICache);
+ // Hold the {allocation_mutex_} when calling this method.
+ void InstallCode(WasmCode* code);
Vector<WasmCode*> code_table() const {
return {code_table_.get(), module_->num_declared_functions};
}
- void set_code(uint32_t index, WasmCode* code) {
- DCHECK_LT(index, num_functions());
- DCHECK_LE(module_->num_imported_functions, index);
- DCHECK_EQ(code->index(), index);
- code_table_[index - module_->num_imported_functions] = code;
- }
// Features enabled for this module. We keep a copy of the features that
// were enabled at the time of the creation of this native module,
@@ -390,12 +385,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// AsyncCompileJob).
std::shared_ptr<const WasmModule> module_;
- // Holds all allocated code objects, is maintained to be in ascending order
- // according to the codes instruction start address to allow lookups.
- std::vector<std::unique_ptr<WasmCode>> owned_code_;
-
- std::unique_ptr<WasmCode* []> code_table_;
-
OwnedVector<const byte> wire_bytes_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
@@ -408,13 +397,25 @@ class V8_EXPORT_PRIVATE NativeModule final {
// hence needs to be destructed first when this native module dies.
std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
- // This mutex protects concurrent calls to {AddCode} and {AddCodeCopy}.
+ // This mutex protects concurrent calls to {AddCode} and friends.
mutable base::Mutex allocation_mutex_;
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {allocation_mutex_}:
+
+ // Holds all allocated code objects, is maintained to be in ascending order
+ // according to the codes instruction start address to allow lookups.
+ std::vector<std::unique_ptr<WasmCode>> owned_code_;
+
+ std::unique_ptr<WasmCode* []> code_table_;
+
DisjointAllocationPool free_code_space_;
DisjointAllocationPool allocated_code_space_;
std::list<VirtualMemory> owned_code_space_;
+ // End of fields protected by {allocation_mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
+
WasmCodeManager* wasm_code_manager_;
std::atomic<size_t> committed_code_space_{0};
int modification_scope_depth_ = 0;
@@ -443,7 +444,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
- WasmCode* GetCodeFromStartAddress(Address pc) const;
size_t remaining_uncommitted_code_space() const;
// Add a sample of all module sizes.
@@ -459,22 +459,30 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
private:
friend class NativeModule;
- void TryAllocate(size_t size, VirtualMemory*, void* hint = nullptr);
+ V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
+ void* hint = nullptr);
bool Commit(Address, size_t);
// Currently, we uncommit a whole module, so all we need is account
// for the freed memory size. We do that in FreeNativeModule.
// There's no separate Uncommit.
void FreeNativeModule(NativeModule*);
- void Free(VirtualMemory* mem);
void AssignRanges(Address start, Address end, NativeModule*);
+ void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
bool ShouldForceCriticalMemoryPressureNotification();
WasmMemoryTracker* const memory_tracker_;
+ std::atomic<size_t> remaining_uncommitted_code_space_;
mutable base::Mutex native_modules_mutex_;
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {native_modules_mutex_}:
+
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
std::unordered_set<NativeModule*> native_modules_;
- std::atomic<size_t> remaining_uncommitted_code_space_;
+
+ // End of fields protected by {native_modules_mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 70794fc7ab..8e1f508979 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -25,7 +25,8 @@ enum ValueTypeCode : uint8_t {
kLocalF64 = 0x7c,
kLocalS128 = 0x7b,
kLocalAnyFunc = 0x70,
- kLocalAnyRef = 0x6f
+ kLocalAnyRef = 0x6f,
+ kLocalExceptRef = 0x68,
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
@@ -35,7 +36,8 @@ enum ImportExportKindCode : uint8_t {
kExternalFunction = 0,
kExternalTable = 1,
kExternalMemory = 2,
- kExternalGlobal = 3
+ kExternalGlobal = 3,
+ kExternalException = 4
};
// Binary encoding of maximum and shared flags for memories.
@@ -64,10 +66,12 @@ enum SectionCode : int8_t {
kDataSectionCode = 11, // Data segments
kNameSectionCode = 12, // Name section (encoded as a string)
kExceptionSectionCode = 13, // Exception section
+ kSourceMappingURLSectionCode = 14, // Source Map URL section
// Helper values
kFirstSectionInModule = kTypeSectionCode,
- kLastKnownModuleSection = kExceptionSectionCode,
+ kLastKnownModuleSection = kSourceMappingURLSectionCode,
+ kFirstUnorderedSection = kNameSectionCode,
};
// Binary encoding of name section kinds.
@@ -75,7 +79,7 @@ enum NameSectionKindCode : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
constexpr size_t kWasmPageSize = 0x10000;
constexpr uint32_t kWasmPageSizeLog2 = 16;
-constexpr int kInvalidExceptionTag = -1;
+static_assert(kWasmPageSize == size_t{1} << kWasmPageSizeLog2, "consistency");
// TODO(wasm): Wrap WasmCodePosition in a struct.
using WasmCodePosition = int;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 0d8b1f18aa..425681a5e1 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -138,11 +138,10 @@ class InterpreterHandle {
}
public:
- // TODO(wasm): properly handlify this constructor.
- InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
+ InterpreterHandle(Isolate* isolate, Handle<WasmDebugInfo> debug_info)
: isolate_(isolate),
module_(debug_info->wasm_instance()->module_object()->module()),
- interpreter_(isolate, module_, GetBytes(debug_info),
+ interpreter_(isolate, module_, GetBytes(*debug_info),
handle(debug_info->wasm_instance(), isolate)) {}
~InterpreterHandle() { DCHECK_EQ(0, activations_.size()); }
@@ -416,10 +415,8 @@ class InterpreterHandle {
STATIC_CHAR_VECTOR("memory"));
Handle<JSArrayBuffer> memory_buffer(
instance->memory_object()->array_buffer(), isolate_);
- uint32_t byte_length;
- CHECK(memory_buffer->byte_length()->ToUint32(&byte_length));
Handle<JSTypedArray> uint8_array = isolate_->factory()->NewJSTypedArray(
- kExternalUint8Array, memory_buffer, 0, byte_length);
+ kExternalUint8Array, memory_buffer, 0, memory_buffer->byte_length());
JSObject::SetOwnPropertyIgnoreAttributes(global_scope_object, name,
uint8_array, NONE)
.Assert();
@@ -535,7 +532,7 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
// of the stack.
size_t interpreter_size = FLAG_stack_size * KB * 2;
handle = Managed<wasm::InterpreterHandle>::Allocate(
- isolate, interpreter_size, isolate, *debug_info);
+ isolate, interpreter_size, isolate, debug_info);
debug_info->set_interpreter_handle(*handle);
}
@@ -556,16 +553,17 @@ wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
Handle<FixedArray> GetOrCreateInterpretedFunctions(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
- Handle<Object> obj(debug_info->interpreted_functions(), isolate);
- if (!obj->IsUndefined(isolate)) return Handle<FixedArray>::cast(obj);
-
+ Handle<FixedArray> arr(debug_info->interpreted_functions(), isolate);
int num_functions = debug_info->wasm_instance()
->module_object()
->native_module()
->num_functions();
- Handle<FixedArray> new_arr = isolate->factory()->NewFixedArray(num_functions);
- debug_info->set_interpreted_functions(*new_arr);
- return new_arr;
+ if (arr->length() == 0 && num_functions > 0) {
+ arr = isolate->factory()->NewFixedArray(num_functions);
+ debug_info->set_interpreted_functions(*arr);
+ }
+ DCHECK_EQ(num_functions, arr->length());
+ return arr;
}
} // namespace
@@ -576,6 +574,7 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
factory->NewStruct(WASM_DEBUG_INFO_TYPE, TENURED));
debug_info->set_wasm_instance(*instance);
+ debug_info->set_interpreted_functions(*factory->empty_fixed_array());
instance->set_debug_info(*debug_info);
return debug_info;
}
@@ -590,7 +589,7 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
// account for the growing strategy for the backing store of the stack.
size_t interpreter_size = FLAG_stack_size * KB * 2;
auto interp_handle = Managed<wasm::InterpreterHandle>::Allocate(
- isolate, interpreter_size, isolate, *debug_info);
+ isolate, interpreter_size, isolate, debug_info);
debug_info->set_interpreter_handle(*interp_handle);
auto ret = interp_handle->raw()->interpreter();
ret->SetCallIndirectTestMode();
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 4f772d9bdd..dc78797365 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -24,6 +24,8 @@ WasmEngine::WasmEngine()
WasmEngine::~WasmEngine() {
// All AsyncCompileJobs have been canceled.
DCHECK(jobs_.empty());
+ // All Isolates have been deregistered.
+ DCHECK(isolates_.empty());
}
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
@@ -189,9 +191,11 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
Isolate* isolate, std::shared_ptr<NativeModule> shared_module) {
CHECK_EQ(code_manager(), shared_module->code_manager());
Vector<const byte> wire_bytes = shared_module->wire_bytes();
- Handle<Script> script = CreateWasmScript(isolate, wire_bytes);
+ const WasmModule* module = shared_module->module();
+ Handle<Script> script =
+ CreateWasmScript(isolate, wire_bytes, module->source_map_url);
Handle<WasmModuleObject> module_object =
- WasmModuleObject::New(isolate, shared_module, script);
+ WasmModuleObject::New(isolate, std::move(shared_module), script);
// TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}.
// This requires unlocking the code space here. This should eventually be
@@ -249,6 +253,7 @@ std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
for (auto& entry : jobs_) {
if (entry.first->isolate() == isolate) return true;
}
@@ -257,6 +262,7 @@ bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
for (auto it = jobs_.begin(); it != jobs_.end();) {
if (it->first->isolate() == isolate) {
it = jobs_.erase(it);
@@ -266,6 +272,18 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
}
}
+void WasmEngine::AddIsolate(Isolate* isolate) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK_EQ(0, isolates_.count(isolate));
+ isolates_.insert(isolate);
+}
+
+void WasmEngine::RemoveIsolate(Isolate* isolate) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ isolates_.erase(isolate);
+}
+
namespace {
struct WasmEnginePointerConstructTrait final {
@@ -284,16 +302,19 @@ base::LazyStaticInstance<std::shared_ptr<WasmEngine>,
} // namespace
+// static
void WasmEngine::InitializeOncePerProcess() {
if (!FLAG_wasm_shared_engine) return;
global_wasm_engine.Pointer()->reset(new WasmEngine());
}
+// static
void WasmEngine::GlobalTearDown() {
if (!FLAG_wasm_shared_engine) return;
global_wasm_engine.Pointer()->reset();
}
+// static
std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
if (FLAG_wasm_shared_engine) return global_wasm_engine.Get();
return std::shared_ptr<WasmEngine>(new WasmEngine());
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 66c12404b7..4f4cddb550 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -6,6 +6,7 @@
#define V8_WASM_WASM_ENGINE_H_
#include <memory>
+#include <unordered_set>
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
@@ -30,14 +31,14 @@ class V8_EXPORT_PRIVATE CompilationResultResolver {
public:
virtual void OnCompilationSucceeded(Handle<WasmModuleObject> result) = 0;
virtual void OnCompilationFailed(Handle<Object> error_reason) = 0;
- virtual ~CompilationResultResolver() {}
+ virtual ~CompilationResultResolver() = default;
};
class V8_EXPORT_PRIVATE InstantiationResultResolver {
public:
virtual void OnInstantiationSucceeded(Handle<WasmInstanceObject> result) = 0;
virtual void OnInstantiationFailed(Handle<Object> error_reason) = 0;
- virtual ~InstantiationResultResolver() {}
+ virtual ~InstantiationResultResolver() = default;
};
// The central data structure that represents an engine instance capable of
@@ -135,6 +136,10 @@ class V8_EXPORT_PRIVATE WasmEngine {
// for tearing down an isolate, or to clean it up to be reused.
void DeleteCompileJobsOnIsolate(Isolate* isolate);
+ // Manage the set of Isolates that use this WasmEngine.
+ void AddIsolate(Isolate* isolate);
+ void RemoveIsolate(Isolate* isolate);
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
@@ -168,6 +173,9 @@ class V8_EXPORT_PRIVATE WasmEngine {
std::unique_ptr<CompilationStatistics> compilation_stats_;
std::unique_ptr<CodeTracer> code_tracer_;
+ // Set of isolates which use this WasmEngine. Used for cross-isolate GCs.
+ std::unordered_set<Isolate*> isolates_;
+
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 0c7fb25b67..e724e73078 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -641,6 +641,8 @@ const char* OpcodeName(uint32_t val) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
}
+} // namespace
+
class SideTable;
// Code and metadata needed to execute a function.
@@ -902,32 +904,6 @@ class SideTable : public ZoneObject {
}
};
-struct ExternalCallResult {
- enum Type {
- // The function should be executed inside this interpreter.
- INTERNAL,
- // For indirect calls: Table or function does not exist.
- INVALID_FUNC,
- // For indirect calls: Signature does not match expected signature.
- SIGNATURE_MISMATCH,
- // The function was executed and returned normally.
- EXTERNAL_RETURNED,
- // The function was executed, threw an exception, and the stack was unwound.
- EXTERNAL_UNWOUND
- };
- Type type;
- // If type is INTERNAL, this field holds the function to call internally.
- InterpreterCode* interpreter_code;
-
- ExternalCallResult(Type type) : type(type) { // NOLINT
- DCHECK_NE(INTERNAL, type);
- }
- ExternalCallResult(Type type, InterpreterCode* code)
- : type(type), interpreter_code(code) {
- DCHECK_EQ(INTERNAL, type);
- }
-};
-
// The main storage for interpreter code. It maps {WasmFunction} to the
// metadata needed to execute each function.
class CodeMap {
@@ -1037,6 +1013,34 @@ class CodeMap {
}
};
+namespace {
+
+struct ExternalCallResult {
+ enum Type {
+ // The function should be executed inside this interpreter.
+ INTERNAL,
+ // For indirect calls: Table or function does not exist.
+ INVALID_FUNC,
+ // For indirect calls: Signature does not match expected signature.
+ SIGNATURE_MISMATCH,
+ // The function was executed and returned normally.
+ EXTERNAL_RETURNED,
+ // The function was executed, threw an exception, and the stack was unwound.
+ EXTERNAL_UNWOUND
+ };
+ Type type;
+ // If type is INTERNAL, this field holds the function to call internally.
+ InterpreterCode* interpreter_code;
+
+ ExternalCallResult(Type type) : type(type) { // NOLINT
+ DCHECK_NE(INTERNAL, type);
+ }
+ ExternalCallResult(Type type, InterpreterCode* code)
+ : type(type), interpreter_code(code) {
+ DCHECK_EQ(INTERNAL, type);
+ }
+};
+
// Like a static_cast from src to dst, but specialized for boxed floats.
template <typename dst, typename src>
struct converter {
@@ -1073,6 +1077,8 @@ V8_INLINE bool has_nondeterminism<double>(double val) {
return std::isnan(val);
}
+} // namespace
+
// Responsible for executing code directly.
class ThreadImpl {
struct Activation {
@@ -1426,7 +1432,7 @@ class ThreadImpl {
Push(result);
len = 1 + imm.length;
- if (FLAG_wasm_trace_memory) {
+ if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, false, rep);
TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
@@ -1452,7 +1458,7 @@ class ThreadImpl {
WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
len = 1 + imm.length;
- if (FLAG_wasm_trace_memory) {
+ if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, true, rep);
TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
@@ -2762,17 +2768,19 @@ class ThreadImpl {
arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer data pointer in a handle. As
- // this is an aligned pointer, to the GC it will look like a Smi.
+ // Wrap the arg_buffer and the code target data pointers in handles. As
+ // these are aligned pointers, to the GC it will look like Smis.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate);
DCHECK(!arg_buffer_obj->IsHeapObject());
+ Handle<Object> code_entry_obj(
+ reinterpret_cast<Object*>(code->instruction_start()), isolate);
+ DCHECK(!code_entry_obj->IsHeapObject());
static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
"code below needs adaption");
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
- args[compiler::CWasmEntryParameters::kCodeObject] = Handle<Object>::cast(
- isolate->factory()->NewForeign(code->instruction_start(), TENURED));
+ args[compiler::CWasmEntryParameters::kCodeEntry] = code_entry_obj;
args[compiler::CWasmEntryParameters::kWasmInstance] = instance;
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
@@ -2983,6 +2991,8 @@ class InterpretedFrameImpl {
}
};
+namespace {
+
// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
// Thread* is the public interface, without knowledge of the object layout.
// This cast is potentially risky, but as long as we always cast it back before
@@ -3090,20 +3100,21 @@ class WasmInterpreterInternals : public ZoneObject {
};
namespace {
-// TODO(wasm): a finalizer is only required to delete the global handle.
-void GlobalHandleDeleter(const v8::WeakCallbackInfo<void>& data) {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(
- reinterpret_cast<JSObject**>(data.GetParameter())));
+void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ Object** global_handle_location =
+ reinterpret_cast<Object**>(data.GetParameter());
+ GlobalHandles::Destroy(global_handle_location);
}
Handle<WasmInstanceObject> MakeWeak(
Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
- Handle<Object> handle = isolate->global_handles()->Create(*instance_object);
- // TODO(wasm): use a phantom handle in the WasmInterpreter.
- GlobalHandles::MakeWeak(handle.location(), handle.location(),
- &GlobalHandleDeleter,
- v8::WeakCallbackType::kFinalizer);
- return Handle<WasmInstanceObject>::cast(handle);
+ Handle<WasmInstanceObject> weak_instance =
+ isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
+ Object** global_handle_location =
+ Handle<Object>::cast(weak_instance).location();
+ GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
+ &NopFinalizer, v8::WeakCallbackType::kParameter);
+ return weak_instance;
}
} // namespace
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 896196ef67..31021a5d4e 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -208,7 +208,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
private:
Zone zone_;
- WasmInterpreterInternals* const internals_;
+ WasmInterpreterInternals* internals_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 1a20b88f10..314db914ed 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -212,7 +212,7 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
AsyncCompilationResolver(i::Isolate* isolate, i::Handle<i::JSPromise> promise)
: promise_(isolate->global_handles()->Create(*promise)) {}
- ~AsyncCompilationResolver() {
+ ~AsyncCompilationResolver() override {
i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
}
@@ -248,7 +248,7 @@ class InstantiateModuleResultResolver
i::Handle<i::JSPromise> promise)
: promise_(isolate->global_handles()->Create(*promise)) {}
- ~InstantiateModuleResultResolver() {
+ ~InstantiateModuleResultResolver() override {
i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
}
@@ -284,7 +284,7 @@ class InstantiateBytesResultResolver
promise_(isolate_->global_handles()->Create(*promise)),
module_(isolate_->global_handles()->Create(*module)) {}
- ~InstantiateBytesResultResolver() {
+ ~InstantiateBytesResultResolver() override {
i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(module_).location());
}
@@ -349,7 +349,7 @@ class AsyncInstantiateCompileResultResolver
: isolate_->global_handles()->Create(
*maybe_imports.ToHandleChecked())) {}
- ~AsyncInstantiateCompileResultResolver() {
+ ~AsyncInstantiateCompileResultResolver() override {
i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
if (!maybe_imports_.is_null()) {
i::GlobalHandles::Destroy(
@@ -956,7 +956,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::MaybeLocal<v8::Value> maybe = descriptor->Get(context, shared_key);
v8::Local<v8::Value> value;
if (maybe.ToLocal(&value)) {
- if (!value->BooleanValue(context).To(&is_shared_memory)) return;
+ is_shared_memory = value->BooleanValue(isolate);
}
}
// Throw TypeError if shared is true, and the descriptor has no "maximum"
@@ -1012,7 +1012,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::MaybeLocal<v8::Value> maybe = descriptor->Get(context, mutable_key);
v8::Local<v8::Value> value;
if (maybe.ToLocal(&value)) {
- if (!value->BooleanValue(context).To(&is_mutable)) return;
+ is_mutable = value->BooleanValue(isolate);
}
}
@@ -1095,6 +1095,15 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(global_js_object));
}
+// WebAssembly.Exception
+void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Excepion()");
+ thrower.TypeError("WebAssembly.Exception cannot be called");
+}
+
constexpr const char* kName_WasmGlobalObject = "WebAssembly.Global";
constexpr const char* kName_WasmMemoryObject = "WebAssembly.Memory";
constexpr const char* kName_WasmInstanceObject = "WebAssembly.Instance";
@@ -1250,8 +1259,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("This memory cannot be grown");
return;
}
- uint32_t old_size =
- old_buffer->byte_length()->Number() / i::wasm::kWasmPageSize;
+ int64_t old_size = old_buffer->byte_length() / i::wasm::kWasmPageSize;
int64_t new_size64 = old_size + delta_size;
if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
@@ -1580,11 +1588,11 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSObject::AddProperty(isolate, memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
- // Setup Global
-
// The context is not set up completely yet. That's why we cannot use
// {WasmFeaturesFromIsolate} and have to use {WasmFeaturesFromFlags} instead.
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
+
+ // Setup Global
if (enabled_features.mut_global) {
Handle<JSFunction> global_constructor =
InstallFunc(isolate, webassembly, "Global", WebAssemblyGlobal, 1);
@@ -1604,6 +1612,21 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
}
+ // Setup Exception
+ if (enabled_features.eh) {
+ Handle<JSFunction> exception_constructor =
+ InstallFunc(isolate, webassembly, "Exception", WebAssemblyException, 1);
+ context->set_wasm_exception_constructor(*exception_constructor);
+ SetDummyInstanceTemplate(isolate, exception_constructor);
+ JSFunction::EnsureHasInitialMap(exception_constructor);
+ Handle<JSObject> exception_proto(
+ JSObject::cast(exception_constructor->instance_prototype()), isolate);
+ i::Handle<i::Map> exception_map = isolate->factory()->NewMap(
+ i::WASM_EXCEPTION_TYPE, WasmExceptionObject::kSize);
+ JSFunction::SetInitialMap(exception_constructor, exception_map,
+ exception_proto);
+ }
+
// Setup errors
attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 92390cc556..6f022207bf 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -40,6 +40,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {xmm1, xmm2};
// ===========================================================================
constexpr Register kGpParamRegisters[] = {r3, r0, r1, r2};
constexpr Register kGpReturnRegisters[] = {r0, r1};
+// ARM d-registers must be in ascending order for correct allocation.
constexpr DoubleRegister kFpParamRegisters[] = {d0, d1, d2, d3, d4, d5, d6, d7};
constexpr DoubleRegister kFpReturnRegisters[] = {d0, d1};
@@ -129,9 +130,11 @@ class LinkageAllocator {
#if V8_TARGET_ARCH_ARM
switch (rep) {
case MachineRepresentation::kFloat32:
- return extra_float_reg >= 0 || fp_offset_ < fp_count_;
+ return extra_float_reg_ >= 0 ||
+ (extra_double_reg_ >= 0 && extra_double_reg_ < 16) ||
+ (fp_offset_ < fp_count_ && fp_regs_[fp_offset_].code() < 16);
case MachineRepresentation::kFloat64:
- return extra_double_reg >= 0 || fp_offset_ < fp_count_;
+ return extra_double_reg_ >= 0 || fp_offset_ < fp_count_;
case MachineRepresentation::kSimd128:
return ((fp_offset_ + 1) & ~1) + 1 < fp_count_;
default:
@@ -151,10 +154,10 @@ class LinkageAllocator {
#if V8_TARGET_ARCH_ARM
switch (rep) {
case MachineRepresentation::kFloat32: {
- // Use the extra S-register if we can.
- if (extra_float_reg >= 0) {
- int reg_code = extra_float_reg;
- extra_float_reg = -1;
+ // Use the extra S-register if there is one.
+ if (extra_float_reg_ >= 0) {
+ int reg_code = extra_float_reg_;
+ extra_float_reg_ = -1;
return reg_code;
}
// Allocate a D-register and split into 2 float registers.
@@ -162,15 +165,15 @@ class LinkageAllocator {
DCHECK_GT(16, d_reg_code); // D-registers 16 - 31 can't split.
int reg_code = d_reg_code * 2;
// Save the extra S-register.
- DCHECK_EQ(-1, extra_float_reg);
- extra_float_reg = reg_code + 1;
+ DCHECK_EQ(-1, extra_float_reg_);
+ extra_float_reg_ = reg_code + 1;
return reg_code;
}
case MachineRepresentation::kFloat64: {
- // Use an extra D-register if we can.
- if (extra_double_reg >= 0) {
- int reg_code = extra_double_reg;
- extra_double_reg = -1;
+ // Use the extra D-register if there is one.
+ if (extra_double_reg_ >= 0) {
+ int reg_code = extra_double_reg_;
+ extra_double_reg_ = -1;
return reg_code;
}
DCHECK_LT(fp_offset_, fp_count_);
@@ -178,16 +181,16 @@ class LinkageAllocator {
}
case MachineRepresentation::kSimd128: {
// Q-register must be an even-odd pair, so we must try to allocate at
- // the end, not using extra_double_reg. If we are at an odd D-register,
- // skip past it (saving it to extra_double_reg).
+ // the end, not using extra_double_reg_. If we are at an odd D-register,
+ // skip past it (saving it to extra_double_reg_).
DCHECK_LT(((fp_offset_ + 1) & ~1) + 1, fp_count_);
int d_reg1_code = fp_regs_[fp_offset_++].code();
if (d_reg1_code % 2 != 0) {
- // If we're misaligned then extra_double_reg must have been consumed.
- DCHECK_EQ(-1, extra_double_reg);
+ // If we're misaligned then extra_double_reg_ must have been consumed.
+ DCHECK_EQ(-1, extra_double_reg_);
int odd_double_reg = d_reg1_code;
d_reg1_code = fp_regs_[fp_offset_++].code();
- extra_double_reg = odd_double_reg;
+ extra_double_reg_ = odd_double_reg;
}
// Combine the current D-register with the next to form a Q-register.
int d_reg2_code = fp_regs_[fp_offset_++].code();
@@ -244,8 +247,8 @@ class LinkageAllocator {
// ARM FP register aliasing may require splitting or merging double registers.
// Track fragments of registers below fp_offset_ here. There can only be one
// extra float and double register.
- int extra_float_reg = -1;
- int extra_double_reg = -1;
+ int extra_float_reg_ = -1;
+ int extra_double_reg_ = -1;
#endif
int stack_offset_ = 0;
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index f7cc70a9e7..cf18817bb1 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -27,28 +27,14 @@ void AddAllocationStatusSample(Isolate* isolate,
}
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
- size_t size, bool require_full_guard_regions,
- void** allocation_base,
+ size_t size, void** allocation_base,
size_t* allocation_length) {
using AllocationStatus = WasmMemoryTracker::AllocationStatus;
-#if V8_TARGET_ARCH_32_BIT
- DCHECK(!require_full_guard_regions);
+#if V8_TARGET_ARCH_64_BIT
+ bool require_full_guard_regions = true;
+#else
+ bool require_full_guard_regions = false;
#endif
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- //
- // To protect against 32-bit integer overflow issues, we also protect the 2GiB
- // before the valid part of the memory buffer.
- // TODO(7881): do not use static_cast<uint32_t>() here
- *allocation_length =
- require_full_guard_regions
- ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
- : RoundUp(
- base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
- kWasmPageSize);
- DCHECK_GE(*allocation_length, size);
- DCHECK_GE(*allocation_length, kWasmPageSize);
-
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
// Try up to three times; getting rid of dead JSArrayBuffer allocations might
@@ -57,16 +43,43 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
static constexpr int kAllocationRetries = 2;
bool did_retry = false;
for (int trial = 0;; ++trial) {
- if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
+ // For guard regions, we always allocate the largest possible offset into
+ // the heap, so the addressable memory after the guard page can be made
+ // inaccessible.
+ //
+ // To protect against 32-bit integer overflow issues, we also protect the
+ // 2GiB before the valid part of the memory buffer.
+ // TODO(7881): do not use static_cast<uint32_t>() here
+ *allocation_length =
+ require_full_guard_regions
+ ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
+ : RoundUp(base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(size)),
+ kWasmPageSize);
+ DCHECK_GE(*allocation_length, size);
+ DCHECK_GE(*allocation_length, kWasmPageSize);
+
+ auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
+ : WasmMemoryTracker::kHardLimit;
+ if (memory_tracker->ReserveAddressSpace(*allocation_length, limit)) break;
+
did_retry = true;
// After first and second GC: retry.
if (trial == kAllocationRetries) {
+ // If we fail to allocate guard regions and the fallback is enabled, then
+ // retry without full guard regions.
+ if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
+ require_full_guard_regions = false;
+ --trial; // one more try.
+ continue;
+ }
+
// We are over the address space limit. Fail.
//
// When running under the correctness fuzzer (i.e.
- // --abort-on-stack-or-string-length-overflow is preset), we crash instead
- // so it is not incorrectly reported as a correctness violation. See
- // https://crbug.com/828293#c4
+ // --abort-on-stack-or-string-length-overflow is preset), we crash
+ // instead so it is not incorrectly reported as a correctness
+ // violation. See https://crbug.com/828293#c4
if (FLAG_abort_on_stack_or_string_length_overflow) {
FATAL("could not allocate wasm memory");
}
@@ -81,8 +94,9 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// The Reserve makes the whole region inaccessible by default.
DCHECK_NULL(*allocation_base);
for (int trial = 0;; ++trial) {
- *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
- PageAllocator::kNoAccess);
+ *allocation_base =
+ AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
+ kWasmPageSize, PageAllocator::kNoAccess);
if (*allocation_base != nullptr) break;
if (trial == kAllocationRetries) {
memory_tracker->ReleaseReservation(*allocation_length);
@@ -99,8 +113,9 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// Make the part we care about accessible.
if (size > 0) {
- bool result = SetPermissions(memory, RoundUp(size, kWasmPageSize),
- PageAllocator::kReadWrite);
+ bool result =
+ SetPermissions(GetPlatformPageAllocator(), memory,
+ RoundUp(size, kWasmPageSize), PageAllocator::kReadWrite);
// SetPermissions commits the extra memory, which may put us over the
// process memory limit. If so, report this as an OOM.
if (!result) {
@@ -115,6 +130,20 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
: AllocationStatus::kSuccess);
return memory;
}
+
+#if V8_TARGET_ARCH_MIPS64
+// MIPS64 has a user space of 2^40 bytes on most processors,
+// address space limits needs to be smaller.
+constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB
+constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
+#elif V8_TARGET_ARCH_64_BIT
+constexpr size_t kAddressSpaceSoftLimit = 0x6000000000L; // 384 GiB
+constexpr size_t kAddressSpaceHardLimit = 0x10100000000L; // 1 TiB + 4 GiB
+#else
+constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB
+constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
+#endif
+
} // namespace
WasmMemoryTracker::~WasmMemoryTracker() {
@@ -124,33 +153,19 @@ WasmMemoryTracker::~WasmMemoryTracker() {
DCHECK_EQ(allocated_address_space_, 0u);
}
-bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
-// Address space reservations are currently only meaningful using guard
-// regions, which is currently only supported on 64-bit systems. On other
-// platforms, we always fall back on bounds checks.
-#if V8_TARGET_ARCH_MIPS64
- // MIPS64 has a user space of 2^40 bytes on most processors,
- // address space limits needs to be smaller.
- constexpr size_t kAddressSpaceLimit = 0x2100000000L; // 132 GiB
-#elif V8_TARGET_ARCH_64_BIT
- // We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
- // once we fill everything up with full-sized guard regions.
- constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
-#else
- constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB
-#endif
-
- int retries = 5; // cmpxchng can fail, retry some number of times.
- do {
- size_t old_count = reserved_address_space_;
- if ((kAddressSpaceLimit - old_count) < num_bytes) return false;
+bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
+ ReservationLimit limit) {
+ size_t reservation_limit =
+ limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
+ while (true) {
+ size_t old_count = reserved_address_space_.load();
+ if (old_count > reservation_limit) return false;
+ if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
return true;
}
- } while (retries-- > 0);
-
- return false;
+ }
}
void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
@@ -233,7 +248,8 @@ bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
const void* buffer_start) {
if (IsWasmMemory(buffer_start)) {
const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
- CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
+ CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
+ allocation.allocation_length));
return true;
}
return false;
@@ -272,25 +288,9 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr;
size_t allocation_length = 0;
-#if V8_TARGET_ARCH_64_BIT
- bool require_full_guard_regions = true;
-#else
- bool require_full_guard_regions = false;
-#endif
void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
- require_full_guard_regions,
&allocation_base, &allocation_length);
- if (memory == nullptr && FLAG_wasm_trap_handler_fallback) {
- // If we failed to allocate with full guard regions, fall back on
- // mini-guards.
- require_full_guard_regions = false;
- memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
- require_full_guard_regions,
- &allocation_base, &allocation_length);
- }
- if (memory == nullptr) {
- return {};
- }
+ if (memory == nullptr) return {};
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index d95f7a88c8..5a919fe71c 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -24,13 +24,16 @@ namespace wasm {
// that buffer.
class WasmMemoryTracker {
public:
- WasmMemoryTracker() {}
+ WasmMemoryTracker() = default;
V8_EXPORT_PRIVATE ~WasmMemoryTracker();
// ReserveAddressSpace attempts to increase the reserved address space counter
// by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
// and reserve {num_bytes} bytes), false otherwise.
- bool ReserveAddressSpace(size_t num_bytes);
+ // Use {kSoftLimit} if you can implement a fallback which needs less reserved
+ // memory.
+ enum ReservationLimit { kSoftLimit, kHardLimit };
+ bool ReserveAddressSpace(size_t num_bytes, ReservationLimit limit);
void RegisterAllocation(Isolate* isolate, void* allocation_base,
size_t allocation_length, void* buffer_start,
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index ab603bfb3a..7e4621571a 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -27,15 +27,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-// static
-const WasmExceptionSig WasmException::empty_sig_(0, 0, nullptr);
-
-// static
-constexpr const char* WasmException::kRuntimeIdStr;
-
-// static
-constexpr const char* WasmException::kRuntimeValuesStr;
-
WireBytesRef WasmModule::LookupFunctionName(const ModuleWireBytes& wire_bytes,
uint32_t function_index) const {
if (!function_names) {
@@ -57,20 +48,6 @@ void WasmModule::AddFunctionNameForTesting(int function_index,
}
// Get a string stored in the module bytes representing a name.
-WasmName ModuleWireBytes::GetName(WireBytesRef ref) const {
- if (ref.is_empty()) return {"<?>", 3}; // no name.
- CHECK(BoundsCheck(ref.offset(), ref.length()));
- return WasmName::cast(
- module_bytes_.SubVector(ref.offset(), ref.end_offset()));
-}
-
-// Get a string stored in the module bytes representing a function name.
-WasmName ModuleWireBytes::GetName(const WasmFunction* function,
- const WasmModule* module) const {
- return GetName(module->LookupFunctionName(*this, function->func_index));
-}
-
-// Get a string stored in the module bytes representing a name.
WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
if (!ref.is_set()) return {nullptr, 0}; // no name.
CHECK(BoundsCheck(ref.offset(), ref.length()));
@@ -129,6 +106,7 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<String> table_string = factory->InternalizeUtf8String("table");
Handle<String> memory_string = factory->InternalizeUtf8String("memory");
Handle<String> global_string = factory->InternalizeUtf8String("global");
+ Handle<String> exception_string = factory->InternalizeUtf8String("exception");
// Create the result array.
const WasmModule* module = module_object->module();
@@ -161,6 +139,9 @@ Handle<JSArray> GetImports(Isolate* isolate,
case kExternalGlobal:
import_kind = global_string;
break;
+ case kExternalException:
+ import_kind = exception_string;
+ break;
default:
UNREACHABLE();
}
@@ -196,6 +177,7 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<String> table_string = factory->InternalizeUtf8String("table");
Handle<String> memory_string = factory->InternalizeUtf8String("memory");
Handle<String> global_string = factory->InternalizeUtf8String("global");
+ Handle<String> exception_string = factory->InternalizeUtf8String("exception");
// Create the result array.
const WasmModule* module = module_object->module();
@@ -226,6 +208,9 @@ Handle<JSArray> GetExports(Isolate* isolate,
case kExternalGlobal:
export_kind = global_string;
break;
+ case kExternalException:
+ export_kind = exception_string;
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index b1020661ab..d188cf59e1 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -9,7 +9,7 @@
#include "src/globals.h"
#include "src/handles.h"
-#include "src/wasm/decoder.h"
+#include "src/vector.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-opcodes.h"
@@ -22,8 +22,31 @@ class WasmModuleObject;
namespace wasm {
+using WasmName = Vector<const char>;
+
class ErrorThrower;
+// Reference to a string in the wire bytes.
+class WireBytesRef {
+ public:
+ WireBytesRef() : WireBytesRef(0, 0) {}
+ WireBytesRef(uint32_t offset, uint32_t length)
+ : offset_(offset), length_(length) {
+ DCHECK_IMPLIES(offset_ == 0, length_ == 0);
+ DCHECK_LE(offset_, offset_ + length_); // no uint32_t overflow.
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t length() const { return length_; }
+ uint32_t end_offset() const { return offset_ + length_; }
+ bool is_empty() const { return length_ == 0; }
+ bool is_set() const { return offset_ != 0; }
+
+ private:
+ uint32_t offset_;
+ uint32_t length_;
+};
+
// Static representation of a wasm function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
@@ -51,19 +74,12 @@ struct WasmGlobal {
// function signature.
typedef FunctionSig WasmExceptionSig;
+// Static representation of a wasm exception type.
struct WasmException {
- explicit WasmException(const WasmExceptionSig* sig = &empty_sig_)
- : sig(sig) {}
+ explicit WasmException(const WasmExceptionSig* sig) : sig(sig) {}
FunctionSig* ToFunctionSig() const { return const_cast<FunctionSig*>(sig); }
const WasmExceptionSig* sig; // type signature of the exception.
-
- // Used to hold data on runtime exceptions.
- static constexpr const char* kRuntimeIdStr = "WasmExceptionRuntimeId";
- static constexpr const char* kRuntimeValuesStr = "WasmExceptionValues";
-
- private:
- static const WasmExceptionSig empty_sig_;
};
// Static representation of a wasm data segment.
@@ -156,6 +172,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
ModuleOrigin origin = kWasmOrigin; // origin of the module
mutable std::unique_ptr<std::unordered_map<uint32_t, WireBytesRef>>
function_names;
+ std::string source_map_url;
explicit WasmModule(std::unique_ptr<Zone> owned = nullptr);
@@ -179,13 +196,6 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
}
// Get a string stored in the module bytes representing a name.
- WasmName GetName(WireBytesRef ref) const;
-
- // Get a string stored in the module bytes representing a function name.
- WasmName GetName(const WasmFunction* function,
- const WasmModule* module) const;
-
- // Get a string stored in the module bytes representing a name.
WasmName GetNameOrNull(WireBytesRef ref) const;
// Get a string stored in the module bytes representing a function name.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 481d2274bf..0144b8af5b 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -22,6 +22,7 @@ namespace v8 {
namespace internal {
CAST_ACCESSOR(WasmDebugInfo)
+CAST_ACCESSOR(WasmExceptionObject)
CAST_ACCESSOR(WasmExportedFunctionData)
CAST_ACCESSOR(WasmGlobalObject)
CAST_ACCESSOR(WasmInstanceObject)
@@ -101,10 +102,7 @@ int WasmGlobalObject::type_size() const {
}
Address WasmGlobalObject::address() const {
- uint32_t buffer_size = 0;
- DCHECK(array_buffer()->byte_length()->ToUint32(&buffer_size));
- DCHECK_LE(offset() + type_size(), buffer_size);
- USE(buffer_size);
+ DCHECK_LE(offset() + type_size(), array_buffer()->byte_length());
return Address(array_buffer()->backing_store()) + offset();
}
@@ -187,6 +185,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
FixedArray, kIndirectFunctionTableInstancesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
+ kExceptionsTableOffset)
ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
@@ -209,6 +209,11 @@ ImportedFunctionEntry::ImportedFunctionEntry(
DCHECK_LT(index, instance->module()->num_imported_functions);
}
+// WasmExceptionObject
+ACCESSORS(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>,
+ kSerializedSignatureOffset)
+ACCESSORS(WasmExceptionObject, exception_tag, HeapObject, kExceptionTagOffset)
+
// WasmExportedFunctionData
ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
@@ -220,7 +225,7 @@ SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
-ACCESSORS(WasmDebugInfo, interpreted_functions, Object,
+ACCESSORS(WasmDebugInfo, interpreted_functions, FixedArray,
kInterpretedFunctionsOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 4cd66a81c5..9d0e20ab2b 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -72,16 +72,14 @@ class WasmInstanceNativeAllocations {
reinterpret_cast<Address*>(
calloc(num_imported_mutable_globals, sizeof(Address))));
}
- ~WasmInstanceNativeAllocations() { free(); }
- // Frees natively-allocated storage.
- void free() {
+ ~WasmInstanceNativeAllocations() {
::free(indirect_function_table_sig_ids_);
- ::free(indirect_function_table_targets_);
- ::free(imported_function_targets_);
- ::free(imported_mutable_globals_);
indirect_function_table_sig_ids_ = nullptr;
+ ::free(indirect_function_table_targets_);
indirect_function_table_targets_ = nullptr;
+ ::free(imported_function_targets_);
imported_function_targets_ = nullptr;
+ ::free(imported_mutable_globals_);
imported_mutable_globals_ = nullptr;
}
// Resizes the indirect function table.
@@ -262,9 +260,9 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
isolate);
for (int i = 0; i < weak_instance_list->length(); ++i) {
MaybeObject* maybe_instance = weak_instance_list->Get(i);
- if (maybe_instance->IsWeakHeapObject()) {
+ if (maybe_instance->IsWeak()) {
Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(maybe_instance->ToWeakHeapObject()),
+ WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()),
isolate);
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
@@ -690,7 +688,7 @@ Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
wasm::WireBytesRef name_ref =
module()->LookupFunctionName(wire_bytes, func_index);
- wasm::WasmName name = wire_bytes.GetName(name_ref);
+ wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
return Vector<const uint8_t>::cast(name);
}
@@ -728,6 +726,14 @@ int WasmModuleObject::GetContainingFunction(uint32_t byte_offset) {
bool WasmModuleObject::GetPositionInfo(uint32_t position,
Script::PositionInfo* info) {
+ if (script()->source_mapping_url()->IsString()) {
+ if (module()->functions.size() == 0) return false;
+ info->line = 0;
+ info->column = position;
+ info->line_start = module()->functions[0].code.offset();
+ info->line_end = module()->functions.back().code.end_offset();
+ return true;
+ }
int func_index = GetContainingFunction(position);
if (func_index < 0) return false;
@@ -881,7 +887,7 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
uint32_t maximum_pages) {
if (!old_buffer->is_growable()) return {};
void* old_mem_start = old_buffer->backing_store();
- size_t old_size = old_buffer->byte_length()->Number();
+ size_t old_size = old_buffer->byte_length();
CHECK_GE(wasm::kV8MaxWasmMemoryBytes, old_size);
CHECK_EQ(0, old_size % wasm::kWasmPageSize);
size_t old_pages = old_size / wasm::kWasmPageSize;
@@ -904,8 +910,8 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
DCHECK_NOT_NULL(old_buffer->backing_store());
// If adjusting permissions fails, propagate error back to return
// failure to grow.
- if (!i::SetPermissions(old_mem_start, new_size,
- PageAllocator::kReadWrite)) {
+ if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start,
+ new_size, PageAllocator::kReadWrite)) {
return {};
}
reinterpret_cast<v8::Isolate*>(isolate)
@@ -949,7 +955,7 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
- buffer->byte_length()->Number());
+ buffer->byte_length());
#if DEBUG
if (!FLAG_mock_arraybuffer_allocator) {
// To flush out bugs earlier, in DEBUG mode, check that all pages of the
@@ -992,9 +998,8 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
}
uint32_t WasmMemoryObject::current_pages() {
- uint32_t byte_length;
- CHECK(array_buffer()->byte_length()->ToUint32(&byte_length));
- return byte_length / wasm::kWasmPageSize;
+ return static_cast<uint32_t>(array_buffer()->byte_length() /
+ wasm::kWasmPageSize);
}
bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
@@ -1049,8 +1054,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
uint32_t pages) {
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
if (!old_buffer->is_growable()) return -1;
- uint32_t old_size = 0;
- CHECK(old_buffer->byte_length()->ToUint32(&old_size));
+ size_t old_size = old_buffer->byte_length();
DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
Handle<JSArrayBuffer> new_buffer;
@@ -1069,17 +1073,17 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
for (int i = 0; i < instances->length(); i++) {
MaybeObject* elem = instances->Get(i);
HeapObject* heap_object;
- if (elem->ToWeakHeapObject(&heap_object)) {
+ if (elem->GetHeapObjectIfWeak(&heap_object)) {
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(heap_object), isolate);
SetInstanceMemory(instance, new_buffer);
} else {
- DCHECK(elem->IsClearedWeakHeapObject());
+ DCHECK(elem->IsCleared());
}
}
}
memory_object->set_array_buffer(*new_buffer);
- return old_size / wasm::kWasmPageSize;
+ return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
}
// static
@@ -1107,9 +1111,7 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
}
// Check that the offset is in bounds.
- uint32_t buffer_size = 0;
- CHECK(buffer->byte_length()->ToUint32(&buffer_size));
- CHECK(offset + type_size <= buffer_size);
+ CHECK_LE(offset + type_size, buffer->byte_length());
global_obj->set_array_buffer(*buffer);
global_obj->set_flags(0);
@@ -1297,50 +1299,52 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
return instance;
}
-namespace {
-void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmInstanceObject* instance = reinterpret_cast<WasmInstanceObject*>(*p);
- Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- // If a link to shared memory instances exists, update the list of memory
- // instances before the instance is destroyed.
- TRACE("Finalizing instance of %p {\n",
- instance->module_object()->native_module());
-
- // Since the order of finalizers is not guaranteed, it can be the case
- // that {instance->compiled_module()->module()}, which is a
- // {Managed<WasmModule>} has been collected earlier in this GC cycle.
- // Weak references to this instance won't be cleared until
- // the next GC cycle, so we need to manually break some links (such as
- // the weak references from {WasmMemoryObject::instances}.
- if (instance->has_memory_object()) {
- WasmMemoryObject::RemoveInstance(handle(instance->memory_object(), isolate),
- handle(instance, isolate));
+Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
+ wasm::NativeModule* native_module = module_object()->native_module();
+ if (func_index < native_module->num_imported_functions()) {
+ return imported_function_targets()[func_index];
}
-
- // Free raw C++ memory associated with the instance.
- GetNativeAllocations(instance)->free();
-
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
- TRACE("}\n");
+ return native_module->GetCallTargetForFunction(func_index);
}
-} // namespace
+// static
+Handle<WasmExceptionObject> WasmExceptionObject::New(
+ Isolate* isolate, const wasm::FunctionSig* sig,
+ Handle<HeapObject> exception_tag) {
+ Handle<JSFunction> exception_cons(
+ isolate->native_context()->wasm_exception_constructor(), isolate);
+ Handle<JSObject> exception_object =
+ isolate->factory()->NewJSObject(exception_cons, TENURED);
+ Handle<WasmExceptionObject> exception =
+ Handle<WasmExceptionObject>::cast(exception_object);
+
+ // Serialize the signature.
+ DCHECK_EQ(0, sig->return_count());
+ DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
+ int sig_size = static_cast<int>(sig->parameter_count());
+ Handle<PodArray<wasm::ValueType>> serialized_sig =
+ PodArray<wasm::ValueType>::New(isolate, sig_size, TENURED);
+ int index = 0; // Index into the {PodArray} above.
+ for (wasm::ValueType param : sig->parameters()) {
+ serialized_sig->set(index++, param);
+ }
+ exception->set_serialized_signature(*serialized_sig);
+ exception->set_exception_tag(*exception_tag);
-void WasmInstanceObject::InstallFinalizer(Isolate* isolate,
- Handle<WasmInstanceObject> instance) {
- Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- InstanceFinalizer, v8::WeakCallbackType::kFinalizer);
+ return exception;
}
-Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
- wasm::NativeModule* native_module = module_object()->native_module();
- if (func_index < native_module->num_imported_functions()) {
- return imported_function_targets()[func_index];
+bool WasmExceptionObject::IsSignatureEqual(const wasm::FunctionSig* sig) {
+ DCHECK_EQ(0, sig->return_count());
+ DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
+ int sig_size = static_cast<int>(sig->parameter_count());
+ if (sig_size != serialized_signature()->length()) return false;
+ for (int index = 0; index < sig_size; ++index) {
+ if (sig->GetParam(index) != serialized_signature()->get(index)) {
+ return false;
+ }
}
- return native_module->GetCallTargetForFunction(func_index);
+ return true;
}
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index a493f97e95..084b70489c 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -387,6 +387,7 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(imported_function_callables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
+ DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
DECL_ACCESSORS(undefined_value, Oddball)
DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code)
@@ -422,6 +423,7 @@ class WasmInstanceObject : public JSObject {
V(kImportedFunctionCallablesOffset, kPointerSize) \
V(kIndirectFunctionTableInstancesOffset, kPointerSize) \
V(kManagedNativeAllocationsOffset, kPointerSize) \
+ V(kExceptionsTableOffset, kPointerSize) \
V(kUndefinedValueOffset, kPointerSize) \
V(kNullValueOffset, kPointerSize) \
V(kCEntryStubOffset, kPointerSize) \
@@ -461,15 +463,37 @@ class WasmInstanceObject : public JSObject {
static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmModuleObject>);
- static void InstallFinalizer(Isolate* isolate,
- Handle<WasmInstanceObject> instance);
-
Address GetCallTarget(uint32_t func_index);
// Iterates all fields in the object except the untagged fields.
class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
+};
+
+// Representation of WebAssembly.Exception JavaScript-level object.
+class WasmExceptionObject : public JSObject {
+ public:
+ DECL_CAST(WasmExceptionObject)
+
+ DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
+ DECL_ACCESSORS(exception_tag, HeapObject)
+
+// Layout description.
+#define WASM_EXCEPTION_OBJECT_FIELDS(V) \
+ V(kSerializedSignatureOffset, kPointerSize) \
+ V(kExceptionTagOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ WASM_EXCEPTION_OBJECT_FIELDS)
+#undef WASM_EXCEPTION_OBJECT_FIELDS
+
+ // Checks whether the given {sig} has the same parameter types as the
+ // serialized signature stored within this exception object.
+ bool IsSignatureEqual(const wasm::FunctionSig* sig);
+
+ static Handle<WasmExceptionObject> New(Isolate* isolate,
+ const wasm::FunctionSig* sig,
+ Handle<HeapObject> exception_tag);
};
// A WASM function that is wrapped and exported to JavaScript.
@@ -521,12 +545,9 @@ class WasmExportedFunctionData : public Struct {
class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
public:
- using NeverReadOnlySpaceObject::GetHeap;
- using NeverReadOnlySpaceObject::GetIsolate;
-
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
- DECL_ACCESSORS(interpreter_handle, Object);
- DECL_ACCESSORS(interpreted_functions, Object);
+ DECL_ACCESSORS(interpreter_handle, Object); // Foreign or undefined
+ DECL_ACCESSORS(interpreted_functions, FixedArray);
DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index dff02f8147..238873228f 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -6,7 +6,6 @@
#define V8_WASM_WASM_OPCODES_H_
#include "src/globals.h"
-#include "src/vector.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-constants.h"
@@ -22,8 +21,6 @@ using FunctionSig = Signature<ValueType>;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
bool IsJSCompatibleSignature(const FunctionSig* sig);
-using WasmName = Vector<const char>;
-
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
V(Unreachable, 0x00, _) \
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 314f320752..1bcad13030 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -149,7 +149,7 @@ ErrorThrower::ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT
: isolate_(other.isolate_),
context_(other.context_),
error_type_(other.error_type_),
- error_msg_(other.error_msg_) {
+ error_msg_(std::move(other.error_msg_)) {
other.error_type_ = kNone;
}
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 2edc412afa..b676a5b61b 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -46,9 +46,9 @@ class Writer {
DCHECK_GE(current_size(), sizeof(T));
WriteUnalignedValue(reinterpret_cast<Address>(current_location()), value);
pos_ += sizeof(T);
- if (FLAG_wasm_trace_serialization) {
- StdoutStream{} << "wrote: " << (size_t)value << " sized: " << sizeof(T)
- << std::endl;
+ if (FLAG_trace_wasm_serialization) {
+ StdoutStream{} << "wrote: " << static_cast<size_t>(value)
+ << " sized: " << sizeof(T) << std::endl;
}
}
@@ -58,7 +58,7 @@ class Writer {
memcpy(current_location(), v.start(), v.size());
pos_ += v.size();
}
- if (FLAG_wasm_trace_serialization) {
+ if (FLAG_trace_wasm_serialization) {
StdoutStream{} << "wrote vector of " << v.size() << " elements"
<< std::endl;
}
@@ -90,9 +90,9 @@ class Reader {
T value =
ReadUnalignedValue<T>(reinterpret_cast<Address>(current_location()));
pos_ += sizeof(T);
- if (FLAG_wasm_trace_serialization) {
- StdoutStream{} << "read: " << (size_t)value << " sized: " << sizeof(T)
- << std::endl;
+ if (FLAG_trace_wasm_serialization) {
+ StdoutStream{} << "read: " << static_cast<size_t>(value)
+ << " sized: " << sizeof(T) << std::endl;
}
return value;
}
@@ -103,7 +103,7 @@ class Reader {
memcpy(v.start(), current_location(), v.size());
pos_ += v.size();
}
- if (FLAG_wasm_trace_serialization) {
+ if (FLAG_trace_wasm_serialization) {
StdoutStream{} << "read vector of " << v.size() << " elements"
<< std::endl;
}
@@ -127,14 +127,6 @@ void WriteVersion(Isolate* isolate, Writer* writer) {
writer->Write(FlagList::Hash());
}
-bool IsSupportedVersion(Isolate* isolate, const Vector<const byte> version) {
- if (version.size() < kVersionSize) return false;
- byte current_version[kVersionSize];
- Writer writer({current_version, kVersionSize});
- WriteVersion(isolate, &writer);
- return memcmp(version.start(), current_version, kVersionSize) == 0;
-}
-
// On Intel, call sites are encoded as a displacement. For linking and for
// serialization/deserialization, we want to store/retrieve a tag (the function
// index). On Intel, that means accessing the raw displacement.
@@ -537,6 +529,14 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
return true;
}
+bool IsSupportedVersion(Isolate* isolate, Vector<const byte> version) {
+ if (version.size() < kVersionSize) return false;
+ byte current_version[kVersionSize];
+ Writer writer({current_version, kVersionSize});
+ WriteVersion(isolate, &writer);
+ return memcmp(version.start(), current_version, kVersionSize) == 0;
+}
+
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes) {
if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
@@ -553,7 +553,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
if (!decode_result.ok()) return {};
CHECK_NOT_NULL(decode_result.val);
WasmModule* module = decode_result.val.get();
- Handle<Script> script = CreateWasmScript(isolate, wire_bytes);
+ Handle<Script> script =
+ CreateWasmScript(isolate, wire_bytes, module->source_map_url);
// TODO(eholk): We need to properly preserve the flag whether the trap
// handler was used or not when serializing.
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 436a369fb6..93f79a59de 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -11,9 +11,9 @@ namespace v8 {
namespace internal {
namespace wasm {
-// Support to serialize WebAssembly {NativeModule} objects. This class intends
-// to be thread-safe in that it takes a consistent snapshot of the module state
-// at instantiation, allowing other threads to mutate the module concurrently.
+// Support for serializing WebAssembly {NativeModule} objects. This class takes
+// a snapshot of the module state at instantiation, and other code that modifies
+// the module after that won't affect the serialized result.
class WasmSerializer {
public:
WasmSerializer(Isolate* isolate, NativeModule* native_module);
@@ -31,7 +31,11 @@ class WasmSerializer {
std::vector<WasmCode*> code_table_;
};
-// Support to deserialize WebAssembly {NativeModule} objects.
+// Support for deserializing WebAssembly {NativeModule} objects.
+// Checks the version header of the data against the current version.
+bool IsSupportedVersion(Isolate* isolate, Vector<const byte> data);
+
+// Deserializes the given data to create a compiled Wasm module.
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 27120e2d15..e52c35a532 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -21,6 +21,7 @@
#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
+#include "src/string-constants.h"
#include "src/v8.h"
namespace v8 {
@@ -82,7 +83,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
+ if (cpu.has_sse41() && FLAG_enable_sse4_1) {
+ supported_ |= 1u << SSE4_1;
+ supported_ |= 1u << SSSE3;
+ }
if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3;
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
// SAHF is not generally available in long mode.
@@ -335,6 +339,7 @@ bool Operand::AddressUsesRegister(Register reg) const {
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
switch (request.kind()) {
@@ -349,6 +354,13 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
UpdateCodeTarget(Memory<int32_t>(pc), request.code_stub()->GetCode());
break;
}
+ case HeapObjectRequest::kStringConstant: {
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ Handle<String> allocated = str->AllocateStringConstant(isolate);
+ Memory<Handle<Object>>(pc) = allocated;
+ break;
+ }
}
}
}
@@ -449,6 +461,9 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
ReserveCodeTargetSpace(100);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ EnableCpuFeature(SSSE3);
+ }
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
@@ -943,7 +958,7 @@ void Assembler::bswapq(Register dst) {
emit(0xC8 + dst.low_bits());
}
-void Assembler::bt(Operand dst, Register src) {
+void Assembler::btq(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
emit(0x0F);
@@ -951,7 +966,7 @@ void Assembler::bt(Operand dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::bts(Operand dst, Register src) {
+void Assembler::btsq(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
emit(0x0F);
@@ -959,6 +974,23 @@ void Assembler::bts(Operand dst, Register src) {
emit_operand(src, dst);
}
+void Assembler::btsq(Register dst, Immediate imm8) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0x0F);
+ emit(0xBA);
+ emit_modrm(0x5, dst);
+ emit(imm8.value_);
+}
+
+void Assembler::btrq(Register dst, Immediate imm8) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0x0F);
+ emit(0xBA);
+ emit_modrm(0x6, dst);
+ emit(imm8.value_);
+}
void Assembler::bsrl(Register dst, Register src) {
EnsureSpace ensure_space(this);
@@ -1818,6 +1850,14 @@ void Assembler::movp_heap_number(Register dst, double value) {
emitp(0, RelocInfo::EMBEDDED_OBJECT);
}
+void Assembler::movp_string(Register dst, const StringConstantBase* str) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kPointerSize);
+ emit(0xB8 | dst.low_bits());
+ RequestHeapObject(HeapObjectRequest(str));
+ emitp(0, RelocInfo::EMBEDDED_OBJECT);
+}
+
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
if (constpool_.TryRecordEntry(value, rmode)) {
// Emit rip-relative move with offset = 0
@@ -4962,12 +5002,7 @@ void Assembler::dq(Label* label) {
// Relocation information implementations.
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- DCHECK(!RelocInfo::IsNone(rmode));
- if (options().disable_reloc_info_for_patching) return;
- if (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code()) {
- return;
- }
+ if (!ShouldRecordRelocInfo(rmode)) return;
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 8823334a46..a0af3a3509 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -219,7 +219,6 @@ typedef XMMRegister Simd128Register;
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
-constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
enum Condition {
@@ -488,7 +487,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
- virtual ~Assembler() {}
+ ~Assembler() override = default;
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -689,6 +688,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// move.
void movp_heap_number(Register dst, double value);
+ void movp_string(Register dst, const StringConstantBase* str);
+
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value,
RelocInfo::Mode rmode = RelocInfo::NONE);
@@ -856,8 +857,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Bit operations.
void bswapl(Register dst);
void bswapq(Register dst);
- void bt(Operand dst, Register src);
- void bts(Operand dst, Register src);
+ void btq(Operand dst, Register src);
+ void btsq(Operand dst, Register src);
+ void btsq(Register dst, Immediate imm8);
+ void btrq(Register dst, Immediate imm8);
void bsrq(Register dst, Register src);
void bsrq(Register dst, Operand src);
void bsrl(Register dst, Register src);
@@ -2433,7 +2436,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// instructions and relocation information. The constructor makes
// sure that there is enough space and (in debug mode) the destructor
// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
+class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 5310a64714..d13181eea3 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -107,7 +107,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_exception = ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate());
__ Store(pending_exception, rax);
- __ LoadRoot(rax, Heap::kExceptionRootIndex);
+ __ LoadRoot(rax, RootIndex::kException);
__ jmp(&exit);
// Invoke: Link this frame into the handler chain.
@@ -352,19 +352,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, &ok, Label::kNear);
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ CompareRoot(map, RootIndex::kHeapNumberMap);
__ j(equal, &ok, Label::kNear);
- __ CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(return_value, RootIndex::kUndefinedValue);
__ j(equal, &ok, Label::kNear);
- __ CompareRoot(return_value, Heap::kTrueValueRootIndex);
+ __ CompareRoot(return_value, RootIndex::kTrueValue);
__ j(equal, &ok, Label::kNear);
- __ CompareRoot(return_value, Heap::kFalseValueRootIndex);
+ __ CompareRoot(return_value, RootIndex::kFalseValue);
__ j(equal, &ok, Label::kNear);
- __ CompareRoot(return_value, Heap::kNullValueRootIndex);
+ __ CompareRoot(return_value, RootIndex::kNullValue);
__ j(equal, &ok, Label::kNear);
__ Abort(AbortReason::kAPICallReturnedInvalidObject);
@@ -428,15 +428,15 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(return_address);
// new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// call data
__ Push(call_data);
// return value
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// return value default
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushRoot(RootIndex::kUndefinedValue);
// isolate
Register scratch = call_data;
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
@@ -526,7 +526,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(scratch);
__ Push(receiver);
__ Push(FieldOperand(callback, AccessorInfo::kDataOffset));
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
__ Push(kScratchRegister); // return value
__ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 41fe3dc363..bced4a0fd3 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -5,7 +5,6 @@
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
-#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/x64/assembler-x64-inl.h"
@@ -14,13 +13,14 @@ namespace internal {
#define __ masm.
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+UnaryMathFunction CreateSqrtFunction() {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
- byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(page_allocator,
+ page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- CodeObjectRequired::kNo);
+ MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
// xmm0: raw double input.
// Move double input into registers.
@@ -28,12 +28,13 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+ CHECK(SetPermissions(page_allocator, buffer, allocated,
+ PageAllocator::kReadExecute));
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
#undef __
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 371735590c..4b57221571 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1816,7 +1816,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer(",0x%x", (*current) & 7);
current += 1;
} else {
- const char* mnemonic = "?";
+ const char* mnemonic;
if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
@@ -2166,7 +2166,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xA2) {
// CPUID
AppendToBuffer("%s", mnemonic);
-
} else if ((opcode & 0xF0) == 0x40) {
// CMOVcc: conditional move.
int condition = opcode & 0x0F;
@@ -2229,13 +2228,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
opcode == 0xB7 || opcode == 0xAF) {
// Size-extending moves, IMUL.
current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
-
} else if ((opcode & 0xF0) == 0x90) {
// SETcc: Set byte on condition. Needs pointer to beginning of instruction.
current = data + SetCC(data);
-
- } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
- // SHLD, SHRD (double-precision shift), BTS (bit set).
+ } else if (opcode == 0xA3 || opcode == 0xA5 || opcode == 0xAB ||
+ opcode == 0xAD) {
+ // BT (bit test), SHLD, BTS (bit test and set),
+ // SHRD (double-precision shift)
AppendToBuffer("%s ", mnemonic);
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
@@ -2245,6 +2244,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (opcode == 0xBA) {
+ // BTS / BTR (bit test and set/reset) with immediate
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ mnemonic = regop == 5 ? "bts" : regop == 6 ? "btr" : "?";
+ AppendToBuffer("%s ", mnemonic);
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%d", *current++);
} else if (opcode == 0xB8 || opcode == 0xBC || opcode == 0xBD) {
// POPCNT, CTZ, CLZ.
AppendToBuffer("%s%c ", mnemonic, operand_size_code());
@@ -2297,6 +2304,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return (group_1_prefix_ == 0xF2) ? "maxsd" : "maxss";
case 0xA2:
return "cpuid";
+ case 0xA3:
+ return "bt";
case 0xA5:
return "shld";
case 0xAB:
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 65c708024d..0115fcf75d 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -90,9 +90,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
// rdi : the target to call
- // rbx : arguments list (FixedArray)
// rcx : arguments list length (untagged)
- Register registers[] = {rdi, rax, rbx, rcx};
+ // rbx : arguments list (FixedArray)
+ Register registers[] = {rdi, rax, rcx, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -127,9 +127,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// rax : number of arguments (on the stack, not including receiver)
// rdi : the target to call
// rdx : the new target
- // rbx : arguments list (FixedArray)
// rcx : arguments list length (untagged)
- Register registers[] = {rdi, rdx, rax, rbx, rcx};
+ // rbx : arguments list (FixedArray)
+ Register registers[] = {rdi, rdx, rax, rcx, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -195,7 +195,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rdi, // JSFunction
@@ -239,10 +239,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rax, // argument count (not including receiver)
+ rcx, // address of first argument
+ rdi, // constructor to call
rdx, // new target
- rdi, // constructor
rbx, // allocation site feedback if available, undefined otherwise
- rcx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index adb52afac9..16dc893ab7 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -21,6 +21,7 @@
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/snapshot/snapshot.h"
+#include "src/string-constants.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h" // Cannot be the first include.
@@ -135,8 +136,8 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- Heap::kBuiltinsConstantsTableRootIndex));
- LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
movp(destination,
FieldOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
@@ -192,22 +193,22 @@ void MacroAssembler::PushAddress(ExternalReference source) {
Push(kScratchRegister);
}
-void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
DCHECK(root_array_available_);
movp(destination, Operand(kRootRegister, RootRegisterOffset(index)));
}
-void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+void MacroAssembler::PushRoot(RootIndex index) {
DCHECK(root_array_available_);
Push(Operand(kRootRegister, RootRegisterOffset(index)));
}
-void TurboAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+void TurboAssembler::CompareRoot(Register with, RootIndex index) {
DCHECK(root_array_available_);
cmpp(with, Operand(kRootRegister, RootRegisterOffset(index)));
}
-void TurboAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
+void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
DCHECK(root_array_available_);
DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
@@ -285,8 +286,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register isolate_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@@ -311,8 +310,6 @@ void TurboAssembler::CallRecordWriteStub(
xchgq(slot_parameter, object_parameter);
}
- LoadAddress(isolate_parameter, ExternalReference::isolate_address(isolate()));
-
Smi* smi_rsa = Smi::FromEnum(remembered_set_action);
Smi* smi_fm = Smi::FromEnum(fp_mode);
Move(remembered_set_parameter, smi_rsa);
@@ -1343,6 +1340,12 @@ void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
movp(dst, kScratchRegister);
}
+void TurboAssembler::MoveStringConstant(Register result,
+ const StringConstantBase* string,
+ RelocInfo::Mode rmode) {
+ movp_string(result, string);
+}
+
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addp(rsp, Immediate(stack_elements * kPointerSize));
@@ -2173,7 +2176,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ LoadRoot(rdx, RootIndex::kUndefinedValue);
}
Label done;
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 6b96c3dcb3..25c488ad35 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -50,6 +50,8 @@ constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
+class StringConstantBase;
+
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
@@ -66,7 +68,7 @@ enum StackArgumentsAccessorReceiverMode {
ARGUMENTS_DONT_CONTAIN_RECEIVER
};
-class StackArgumentsAccessor BASE_EMBEDDED {
+class StackArgumentsAccessor {
public:
StackArgumentsAccessor(Register base_reg, int argument_count_immediate,
StackArgumentsAccessorReceiverMode receiver_mode =
@@ -114,6 +116,9 @@ class StackArgumentsAccessor BASE_EMBEDDED {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
+ TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
+ : TurboAssemblerBase(options, buffer, buffer_size) {}
+
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@@ -215,8 +220,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Set(Operand dst, intptr_t x);
// Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index) override;
- void LoadRoot(Operand destination, Heap::RootListIndex index) {
+ void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Operand destination, RootIndex index) {
LoadRoot(kScratchRegister, index);
movp(destination, kScratchRegister);
}
@@ -350,6 +355,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
movp(dst, ptr, rmode);
}
+ void MoveStringConstant(Register result, const StringConstantBase* string,
+ RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
+
// Convert smi to word-size sign-extended value.
void SmiUntag(Register dst, Register src);
void SmiUntag(Register dst, Operand src);
@@ -401,8 +409,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(Operand with, Heap::RootListIndex index);
+ void CompareRoot(Register with, RootIndex index);
+ void CompareRoot(Operand with, RootIndex index);
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
@@ -517,11 +525,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
- // TODO(titzer): inline this utility constructor.
+ MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
+ : TurboAssembler(options, buffer, size) {}
+
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
+
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@@ -541,29 +552,27 @@ class MacroAssembler : public TurboAssembler {
// Load a root value where the index (or part of it) is variable.
// The variable_offset register is added to the fixed_offset value
// to get the index into the root-array.
- void PushRoot(Heap::RootListIndex index);
+ void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
- void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
+ void JumpIfRoot(Operand with, RootIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
// Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal,
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
- void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
- Label* if_not_equal,
+ void JumpIfNotRoot(Operand with, RootIndex index, Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
diff --git a/deps/v8/src/zone/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index 3914241e04..b3ce2473b6 100644
--- a/deps/v8/src/zone/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -42,7 +42,7 @@ class ZoneAllocator {
T* address(T& x) const { return &x; }
const T* address(const T& x) const { return &x; }
- T* allocate(size_t n, const void* hint = 0) {
+ T* allocate(size_t n, const void* hint = nullptr) {
return static_cast<T*>(zone_->NewArray<T>(static_cast<int>(n)));
}
void deallocate(T* p, size_t) { /* noop for Zones */
@@ -103,7 +103,7 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
template <typename U>
friend class RecyclingZoneAllocator;
- T* allocate(size_t n, const void* hint = 0) {
+ T* allocate(size_t n, const void* hint = nullptr) {
// Only check top block in free list, since this will be equal to or larger
// than the other blocks in the free list.
if (free_list_ && free_list_->size >= n) {
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index 9c0c073a81..049e8f52a9 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -155,8 +155,8 @@ class ZoneChunkListIterator
using ChunkList = maybe_const<ZoneChunkList<T>>;
public:
- maybe_const<T>& operator*() { return current_->items()[position_]; }
- maybe_const<T>* operator->() { return &current_->items()[position_]; }
+ maybe_const<T>& operator*() const { return current_->items()[position_]; }
+ maybe_const<T>* operator->() const { return &current_->items()[position_]; }
bool operator==(const ZoneChunkListIterator& other) const {
return other.current_ == current_ && other.position_ == position_;
}
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 1988826779..86c4bd0702 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -161,10 +161,10 @@ class ZoneUnorderedMap
ZoneAllocator<std::pair<const K, V>>> {
public:
// Constructs an empty map.
- explicit ZoneUnorderedMap(Zone* zone)
+ explicit ZoneUnorderedMap(Zone* zone, size_t bucket_count = 100)
: std::unordered_map<K, V, Hash, KeyEqual,
ZoneAllocator<std::pair<const K, V>>>(
- 100, Hash(), KeyEqual(),
+ bucket_count, Hash(), KeyEqual(),
ZoneAllocator<std::pair<const K, V>>(zone)) {}
};
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 295d7815cb..a851f6797a 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -43,7 +43,6 @@ Zone::Zone(AccountingAllocator* allocator, const char* name,
Zone::~Zone() {
allocator_->ZoneDestruction(this);
-
DeleteAll();
DCHECK_EQ(segment_bytes_allocated_, 0);
@@ -77,6 +76,12 @@ void* Zone::New(size_t size) {
return reinterpret_cast<void*>(result);
}
+void Zone::ReleaseMemory() {
+ allocator_->ZoneDestruction(this);
+ DeleteAll();
+ allocator_->ZoneCreation(this);
+}
+
void Zone::DeleteAll() {
// Traverse the chained list of segments and return them all to the allocator.
for (Segment* current = segment_head_; current;) {
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index 6f863f27fd..5fcc25b350 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -9,8 +9,10 @@
#include "src/base/hashmap.h"
#include "src/base/logging.h"
+#include "src/base/threaded-list.h"
#include "src/globals.h"
#include "src/splay-tree.h"
+#include "src/utils.h"
#include "src/zone/accounting-allocator.h"
#ifndef ZONE_NAME
@@ -56,6 +58,10 @@ class V8_EXPORT_PRIVATE Zone final {
// Seals the zone to prevent any further allocation.
void Seal() { sealed_ = true; }
+ // Allows the zone to be safely reused. Releases the memory and fires zone
+ // destruction and creation events for the accounting allocator.
+ void ReleaseMemory();
+
// Returns true if more memory has been allocated in zones than
// the limit allows.
bool excess_allocation() const {
@@ -69,6 +75,9 @@ class V8_EXPORT_PRIVATE Zone final {
AccountingAllocator* allocator() const { return allocator_; }
private:
+ // Deletes all objects and free all memory allocated in the Zone.
+ void DeleteAll();
+
// All pointers returned from New() are 8-byte aligned.
static const size_t kAlignmentInBytes = 8;
@@ -81,9 +90,6 @@ class V8_EXPORT_PRIVATE Zone final {
// Report zone excess when allocation exceeds this limit.
static const size_t kExcessLimit = 256 * MB;
- // Deletes all objects and free all memory allocated in the Zone.
- void DeleteAll();
-
// The number of bytes allocated in this zone so far.
size_t allocation_size_;
@@ -295,6 +301,11 @@ class ZoneList final {
template <typename T>
using ZonePtrList = ZoneList<T*>;
+// ZoneThreadedList is a special variant of the ThreadedList that can be put
+// into a Zone.
+template <typename T, typename TLTraits = base::ThreadedListTraits<T>>
+using ZoneThreadedList = base::ThreadedListBase<T, ZoneObject, TLTraits>;
+
// A zone splay tree. The config type parameter encapsulates the
// different configurations of a concrete splay tree (see splay-tree.h).
// The tree itself and all its elements are allocated in the Zone.
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index dbae4e6216..8333f49c78 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -105,7 +105,7 @@ class TestSuite(testsuite.TestSuite):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def _get_files_params(self):
path = self.path
testroot = self.suite.testroot
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index c4aa51b818..df37a3d4c5 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -214,6 +214,7 @@ v8_source_set("cctest_sources") {
"test-roots.cc",
"test-sampler-api.cc",
"test-serialize.cc",
+ "test-smi-lexicographic-compare.cc",
"test-strings.cc",
"test-strtod.cc",
"test-symbols.cc",
@@ -237,6 +238,7 @@ v8_source_set("cctest_sources") {
"trace-extension.cc",
"trace-extension.h",
"types-fuzz.h",
+ "unicode-helpers.cc",
"unicode-helpers.h",
"wasm/test-c-wasm-entry.cc",
"wasm/test-jump-table-assembler.cc",
@@ -285,6 +287,7 @@ v8_source_set("cctest_sources") {
"test-code-stubs.h",
"test-disasm-arm.cc",
"test-macro-assembler-arm.cc",
+ "test-poison-disasm-arm.cc",
"test-sync-primitives-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 0919f39efe..1b1eeb5d41 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -123,16 +123,21 @@ void CcTest::CollectGarbage(i::AllocationSpace space) {
heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
}
-void CcTest::CollectAllGarbage() {
- CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+void CcTest::CollectAllGarbage(i::Isolate* isolate) {
+ i::Isolate* iso = isolate ? isolate : i_isolate();
+ iso->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
-void CcTest::CollectAllGarbage(int flags) {
- heap()->CollectAllGarbage(flags, i::GarbageCollectionReason::kTesting);
+void CcTest::CollectAllAvailableGarbage(i::Isolate* isolate) {
+ i::Isolate* iso = isolate ? isolate : i_isolate();
+ iso->heap()->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
}
-void CcTest::CollectAllAvailableGarbage() {
- heap()->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
+void CcTest::PreciseCollectAllGarbage(i::Isolate* isolate) {
+ i::Isolate* iso = isolate ? isolate : i_isolate();
+ iso->heap()->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
v8::base::RandomNumberGenerator* CcTest::random_number_generator() {
@@ -210,12 +215,12 @@ InitializedHandleScope::InitializedHandleScope()
initialized_handle_scope_impl_(
new InitializedHandleScopeImpl(main_isolate_)) {}
-InitializedHandleScope::~InitializedHandleScope() {}
+InitializedHandleScope::~InitializedHandleScope() = default;
HandleAndZoneScope::HandleAndZoneScope()
: main_zone_(new i::Zone(&allocator_, ZONE_NAME)) {}
-HandleAndZoneScope::~HandleAndZoneScope() {}
+HandleAndZoneScope::~HandleAndZoneScope() = default;
static void PrintTestList(CcTest* current) {
if (current == nullptr) return;
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 383771710a..9d4af5af3f 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -37,6 +37,7 @@
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/register-configuration.h"
#include "src/utils.h"
#include "src/v8.h"
#include "src/zone/accounting-allocator.h"
@@ -50,6 +51,13 @@ class RandomNumberGenerator;
namespace internal {
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+// TODO(v8:6666): Fold into Default config once root is fully supported.
+const auto GetRegConfig = RegisterConfiguration::PreserveRootIA32;
+#else
+const auto GetRegConfig = RegisterConfiguration::Default;
+#endif
+
class HandleScope;
class Zone;
@@ -131,9 +139,9 @@ class CcTest {
static i::Heap* heap();
static void CollectGarbage(i::AllocationSpace space);
- static void CollectAllGarbage();
- static void CollectAllGarbage(int flags);
- static void CollectAllAvailableGarbage();
+ static void CollectAllGarbage(i::Isolate* isolate = nullptr);
+ static void CollectAllAvailableGarbage(i::Isolate* isolate = nullptr);
+ static void PreciseCollectAllGarbage(i::Isolate* isolate = nullptr);
static v8::base::RandomNumberGenerator* random_number_generator();
@@ -193,7 +201,7 @@ class ApiTestFuzzer: public v8::base::Thread {
void CallTest();
// The ApiTestFuzzer is also a Thread, so it has a Run method.
- virtual void Run();
+ void Run() override;
enum PartOfTest {
FIRST_PART,
@@ -220,7 +228,7 @@ class ApiTestFuzzer: public v8::base::Thread {
test_number_(num),
gate_(0),
active_(true) {}
- ~ApiTestFuzzer() {}
+ ~ApiTestFuzzer() override = default;
static bool fuzzing_;
static int tests_being_run_;
@@ -275,14 +283,15 @@ class RegisterThreadedTest {
// A LocalContext holds a reference to a v8::Context.
class LocalContext {
public:
- LocalContext(v8::Isolate* isolate, v8::ExtensionConfiguration* extensions = 0,
+ LocalContext(v8::Isolate* isolate,
+ v8::ExtensionConfiguration* extensions = nullptr,
v8::Local<v8::ObjectTemplate> global_template =
v8::Local<v8::ObjectTemplate>(),
v8::Local<v8::Value> global_object = v8::Local<v8::Value>()) {
Initialize(isolate, extensions, global_template, global_object);
}
- LocalContext(v8::ExtensionConfiguration* extensions = 0,
+ LocalContext(v8::ExtensionConfiguration* extensions = nullptr,
v8::Local<v8::ObjectTemplate> global_template =
v8::Local<v8::ObjectTemplate>(),
v8::Local<v8::Value> global_object = v8::Local<v8::Value>()) {
@@ -506,9 +515,7 @@ static inline void ExpectInt32(const char* code, int expected) {
static inline void ExpectBoolean(const char* code, bool expected) {
v8::Local<v8::Value> result = CompileRun(code);
CHECK(result->IsBoolean());
- CHECK_EQ(expected,
- result->BooleanValue(v8::Isolate::GetCurrent()->GetCurrentContext())
- .FromJust());
+ CHECK_EQ(expected, result->BooleanValue(v8::Isolate::GetCurrent()));
}
@@ -597,11 +604,11 @@ class StaticOneByteResource : public v8::String::ExternalOneByteStringResource {
public:
explicit StaticOneByteResource(const char* data) : data_(data) {}
- ~StaticOneByteResource() {}
+ ~StaticOneByteResource() override = default;
- const char* data() const { return data_; }
+ const char* data() const override { return data_; }
- size_t length() const { return strlen(data_); }
+ size_t length() const override { return strlen(data_); }
private:
const char* data_;
@@ -677,13 +684,14 @@ class TestPlatform : public v8::Platform {
}
void CallOnForegroundThread(v8::Isolate* isolate, v8::Task* task) override {
- old_platform_->CallOnForegroundThread(isolate, task);
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
}
void CallDelayedOnForegroundThread(v8::Isolate* isolate, v8::Task* task,
double delay_in_seconds) override {
- old_platform_->CallDelayedOnForegroundThread(isolate, task,
- delay_in_seconds);
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
}
double MonotonicallyIncreasingTime() override {
@@ -696,7 +704,8 @@ class TestPlatform : public v8::Platform {
void CallIdleOnForegroundThread(v8::Isolate* isolate,
v8::IdleTask* task) override {
- old_platform_->CallIdleOnForegroundThread(isolate, task);
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
}
bool IdleTasksEnabled(v8::Isolate* isolate) override {
@@ -709,7 +718,7 @@ class TestPlatform : public v8::Platform {
protected:
TestPlatform() : old_platform_(i::V8::GetCurrentPlatform()) {}
- ~TestPlatform() { i::V8::SetPlatformForTesting(old_platform_); }
+ ~TestPlatform() override { i::V8::SetPlatformForTesting(old_platform_); }
v8::Platform* old_platform() const { return old_platform_; }
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index c59c443b06..feaf2eb327 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -93,6 +93,9 @@
'test-cpu-profiler/TracingCpuProfiler': [SKIP],
'test-sampler/LibSamplerCollectSample': [SKIP],
+ # BUG(v8:8209). Flaky
+ 'test-cpu-profiler/Issue1398': [SKIP],
+
# BUG(7702). Flaky data race and other test failures.
'test-cpu-profiler/MultipleProfilers': [SKIP],
@@ -154,7 +157,7 @@
'test-mark-compact/Promotion': [PASS, FAIL],
# BUG(v8:3434).
- ' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
+ 'test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
}], # 'arch == arm64'
['arch == arm64 and simulator_run', {
@@ -256,6 +259,9 @@
# BUG(5920): Flaky crash.
'test-serialize/PartialSerializerContext': [PASS, ['arch == x64 and mode == debug', SKIP]],
+
+ # BUG(v8:8220). Flaky
+ 'test-log/LogAll': [SKIP],
}], # 'system == windows'
##############################################################################
@@ -285,10 +291,13 @@
'test-serialize/StartupSerializerTwice': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
+}], # 'arch == arm'
+##############################################################################
+['arch == arm and not simulator_run', {
# crbug.com/v8/7605
- 'test-heap/OutOfMemorySmallObjects': [PASS, ['not simulator_run', SKIP]],
-}], # 'arch == arm'
+ 'test-heap/OutOfMemorySmallObjects': [SKIP],
+}], # 'arch == arm and not simulator_run'
##############################################################################
['arch == mipsel or arch == mips', {
@@ -314,12 +323,10 @@
}], # 'arch == mips'
##############################################################################
-['arch == mips or arch == mips64', {
- # TODO(mips-team): Implement LiftOff on big-endian
- 'test-run-wasm/RunWasmLiftoff*': [SKIP],
- 'test-run-wasm-64/RunWasmLiftoff*': [SKIP],
-}], # 'arch == mips or arch == mips64'
-
+['arch == mips64', {
+ # TODO(mips-team): Currently fails on mips64 board.
+ 'test-run-wasm/RunWasmLiftoff_I32Binop*': [SKIP],
+}], # 'arch == mips64'
##############################################################################
['arch == mips64el or arch == mips64', {
# BUG(v8:3154).
@@ -345,11 +352,10 @@
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
##############################################################################
-['arch == mipsel or arch == mips64el or arch == mips or arch == mips64 or arch == ppc or arch == ppc64 or arch == s390 or arch == s390x', {
+['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
# TODO(mips-team): Implement I64Atomic operations on MIPS
- # TODO(ppc-team, s390-team): Implement I64Atomic operations on PPC/s390
'test-run-wasm-atomics64/*': [SKIP],
-}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64 or arch == ppc or arch == ppc64 or arch == s390 or arch == s390x'
+}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
##############################################################################
['mips_arch_variant == r6', {
@@ -372,6 +378,12 @@
}], # 'arch == android_arm or arch == android_ia32'
##############################################################################
+['system == android', {
+ # Uses too much memory.
+ 'test-api/NewStringRangeError': [SKIP],
+}], # 'system == android'
+
+##############################################################################
['system == aix and arch == ppc64', {
# BUG 2857
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 8a0ea70a2a..4bf06a9ba3 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -21,7 +21,7 @@ class CallHelper {
: csig_(csig), isolate_(isolate) {
USE(isolate_);
}
- virtual ~CallHelper() {}
+ virtual ~CallHelper() = default;
template <typename... Params>
R Call(Params... args) {
@@ -46,7 +46,7 @@ class CodeRunner : public CallHelper<T> {
public:
CodeRunner(Isolate* isolate, Handle<Code> code, MachineSignature* csig)
: CallHelper<T>(isolate, csig), code_(code) {}
- virtual ~CodeRunner() {}
+ ~CodeRunner() override = default;
Address Generate() override { return code_->entry(); }
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index c58eb3b485..f9fbd4af3a 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -39,7 +39,25 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()) {}
- virtual ~RawMachineAssemblerTester() {}
+ template <typename... ParamMachTypes>
+ RawMachineAssemblerTester(Code::Kind kind, ParamMachTypes... p)
+ : HandleAndZoneScope(),
+ CallHelper<ReturnType>(
+ main_isolate(),
+ CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p...)),
+ RawMachineAssembler(
+ main_isolate(), new (main_zone()) Graph(main_zone()),
+ Linkage::GetSimplifiedCDescriptor(
+ main_zone(),
+ CSignature::New(main_zone(), MachineTypeForC<ReturnType>(),
+ p...),
+ true),
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()),
+ kind_(kind) {}
+
+ ~RawMachineAssemblerTester() override = default;
void CheckNumber(double expected, Object* number) {
CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
@@ -59,13 +77,12 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
}
protected:
- virtual Address Generate() {
+ Address Generate() override {
if (code_.is_null()) {
Schedule* schedule = this->Export();
auto call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
- OptimizedCompilationInfo info(ArrayVector("testing"), main_zone(),
- Code::STUB);
+ OptimizedCompilationInfo info(ArrayVector("testing"), main_zone(), kind_);
code_ = Pipeline::GenerateCodeForTesting(
&info, main_isolate(), call_descriptor, graph,
AssemblerOptions::Default(main_isolate()), schedule);
@@ -74,6 +91,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
}
private:
+ Code::Kind kind_ = Code::Kind::STUB;
MaybeHandle<Code> code_;
};
@@ -395,7 +413,7 @@ class BinopGen {
public:
virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) = 0;
virtual T expected(T a, T b) = 0;
- virtual ~BinopGen() {}
+ virtual ~BinopGen() = default;
};
// A helper class to generate various combination of input shape combinations
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index c16feae340..e0045979d4 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -62,7 +62,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
Begin(static_cast<int>(parameter_count()));
InitParameters();
}
- virtual ~GraphBuilderTester() {}
+ ~GraphBuilderTester() override = default;
void GenerateCode() { Generate(); }
Node* Parameter(size_t index) {
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index ab17ff0992..090a0f23cd 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -309,12 +309,12 @@ class CmpMaterializeBoolGen : public BinopGen<int32_t> {
CmpMaterializeBoolGen(IrOpcode::Value opcode, bool i)
: w(opcode), invert(i) {}
- virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+ void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) override {
Node* cond = w.MakeNode(m, a, b);
if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
m->Return(cond);
}
- virtual int32_t expected(int32_t a, int32_t b) {
+ int32_t expected(int32_t a, int32_t b) override {
if (invert) return !w.Int32Compare(a, b) ? 1 : 0;
return w.Int32Compare(a, b) ? 1 : 0;
}
@@ -333,7 +333,7 @@ class CmpBranchGen : public BinopGen<int32_t> {
CmpBranchGen(IrOpcode::Value opcode, bool i, bool t, int32_t eq, int32_t ne)
: w(opcode), invert(i), true_first(t), eq_constant(eq), ne_constant(ne) {}
- virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+ void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) override {
RawMachineLabel blocka, blockb;
Node* cond = w.MakeNode(m, a, b);
if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
@@ -350,7 +350,7 @@ class CmpBranchGen : public BinopGen<int32_t> {
m->Return(m->Int32Constant(eq_constant));
}
}
- virtual int32_t expected(int32_t a, int32_t b) {
+ int32_t expected(int32_t a, int32_t b) override {
if (invert) return !w.Int32Compare(a, b) ? eq_constant : ne_constant;
return w.Int32Compare(a, b) ? eq_constant : ne_constant;
}
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 91065cd546..a2243e6edd 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -30,7 +30,7 @@ Node* SmiTag(CodeAssembler& m, Node* value) {
}
Node* UndefinedConstant(CodeAssembler& m) {
- return m.LoadRoot(Heap::kUndefinedValueRootIndex);
+ return m.LoadRoot(RootIndex::kUndefinedValue);
}
Node* SmiFromInt32(CodeAssembler& m, Node* value) {
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index a3b80bc887..8bf29dca69 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -50,7 +50,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
// arguments:
// ~~~
// FixedArray setup(CodeObject* test, FixedArray state_in) {
-// FixedArray state_out = AllocateFixedArray(state_in.length());
+// FixedArray state_out = AllocateZeroedFixedArray(state_in.length());
// // `test` will tail-call to its first parameter which will be `teardown`.
// return test(teardown, state_out, state_in[0], state_in[1],
// state_in[2], ...);
@@ -83,8 +83,8 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
// First allocate the FixedArray which will hold the final results. Here we
// should take care of all allocations, meaning we allocate HeapNumbers and
// FixedArrays representing Simd128 values.
- TNode<FixedArray> state_out = __ Cast(__ AllocateFixedArray(
- PACKED_ELEMENTS, __ IntPtrConstant(parameters.size())));
+ TNode<FixedArray> state_out =
+ __ AllocateZeroedFixedArray(__ IntPtrConstant(parameters.size()));
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
switch (parameters[i].representation()) {
case MachineRepresentation::kTagged:
@@ -94,8 +94,8 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
__ StoreFixedArrayElement(state_out, i, __ AllocateHeapNumber());
break;
case MachineRepresentation::kSimd128: {
- TNode<FixedArray> vector = __ Cast(
- __ AllocateFixedArray(PACKED_SMI_ELEMENTS, __ IntPtrConstant(4)));
+ TNode<FixedArray> vector =
+ __ AllocateZeroedFixedArray(__ IntPtrConstant(4));
for (int lane = 0; lane < 4; lane++) {
__ StoreFixedArrayElement(vector, lane, __ SmiConstant(0));
}
@@ -361,7 +361,11 @@ class TestEnvironment : public HandleAndZoneScope {
public:
// These constants may be tuned to experiment with different environments.
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+ static constexpr int kGeneralRegisterCount = 3;
+#else
static constexpr int kGeneralRegisterCount = 4;
+#endif
static constexpr int kDoubleRegisterCount = 6;
static constexpr int kTaggedSlotCount = 64;
@@ -431,13 +435,10 @@ class TestEnvironment : public HandleAndZoneScope {
// kReturnRegister0 as the first parameter, and the call will need a
// register to hold the CodeObject address. So the maximum number of
// registers left to test with is the number of available registers minus 2.
- DCHECK_LE(
- kGeneralRegisterCount,
- RegisterConfiguration::Default()->num_allocatable_general_registers() -
- 2);
+ DCHECK_LE(kGeneralRegisterCount,
+ GetRegConfig()->num_allocatable_general_registers() - 2);
- int32_t general_mask =
- RegisterConfiguration::Default()->allocatable_general_codes_mask();
+ int32_t general_mask = GetRegConfig()->allocatable_general_codes_mask();
// kReturnRegister0 is used to hold the "teardown" code object, do not
// generate moves using it.
std::unique_ptr<const RegisterConfiguration> registers(
@@ -639,18 +640,21 @@ class TestEnvironment : public HandleAndZoneScope {
case MachineRepresentation::kTagged:
state->set(i, Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
break;
- case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat32: {
// HeapNumbers are Float64 values. However, we will convert it to a
// Float32 and back inside `setup` and `teardown`. Make sure the value
// we pick fits in a Float32.
- state->set(
- i, *main_isolate()->factory()->NewHeapNumber(
- static_cast<double>(DoubleToFloat32(rng_->NextDouble()))));
+ Handle<HeapNumber> num = main_isolate()->factory()->NewHeapNumber(
+ static_cast<double>(DoubleToFloat32(rng_->NextDouble())));
+ state->set(i, *num);
break;
- case MachineRepresentation::kFloat64:
- state->set(
- i, *main_isolate()->factory()->NewHeapNumber(rng_->NextDouble()));
+ }
+ case MachineRepresentation::kFloat64: {
+ Handle<HeapNumber> num =
+ main_isolate()->factory()->NewHeapNumber(rng_->NextDouble());
+ state->set(i, *num);
break;
+ }
case MachineRepresentation::kSimd128: {
Handle<FixedArray> vector =
main_isolate()->factory()->NewFixedArray(4);
@@ -968,7 +972,7 @@ class CodeGeneratorTester {
linkage_(environment->test_descriptor()),
frame_(environment->test_descriptor()->CalculateFixedFrameSize()) {
// Pick half of the stack parameters at random and move them into spill
- // slots, seperated by `extra_stack_space` bytes.
+ // slots, separated by `extra_stack_space` bytes.
// When testing a move with stack slots using CheckAssembleMove or
// CheckAssembleSwap, we'll transparently make use of local spill slots
// instead of stack parameters for those that were picked. This allows us to
@@ -1285,7 +1289,7 @@ TEST(FuzzAssembleMoveAndSwap) {
}
TEST(AssembleTailCallGap) {
- const RegisterConfiguration* conf = RegisterConfiguration::Default();
+ const RegisterConfiguration* conf = GetRegConfig();
TestEnvironment env;
// This test assumes at least 4 registers are allocatable.
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index eec562cf36..559ed1088c 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -32,7 +32,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
simplified(main_zone()),
common(main_zone()),
graph(main_zone()),
- typer(main_isolate(), &js_heap_broker, Typer::kNoFlags, &graph),
+ typer(&js_heap_broker, Typer::kNoFlags, &graph),
context_node(nullptr) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
graph.SetEnd(graph.NewNode(common.End(1), graph.start()));
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index b591d193e7..dccdbd9b92 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -190,11 +190,10 @@ void TestReturnMultipleValues(MachineType type) {
std::unique_ptr<wasm::NativeModule> module = AllocateNativeModule(
handles.main_isolate(), code->raw_instruction_size());
- byte* code_start = module->AddCodeCopy(code, wasm::WasmCode::kFunction, 0)
- ->instructions()
- .start();
+ byte* code_start =
+ module->AddCodeForTesting(code)->instructions().start();
- RawMachineAssemblerTester<int32_t> mt;
+ RawMachineAssemblerTester<int32_t> mt(Code::Kind::JS_TO_WASM_FUNCTION);
const int input_count = 2 + param_count;
Node* call_inputs[2 + kMaxParamCount];
call_inputs[0] = mt.PointerConstant(code_start);
@@ -280,9 +279,7 @@ void ReturnLastValue(MachineType type) {
std::unique_ptr<wasm::NativeModule> module = AllocateNativeModule(
handles.main_isolate(), code->raw_instruction_size());
- byte* code_start = module->AddCodeCopy(code, wasm::WasmCode::kFunction, 0)
- ->instructions()
- .start();
+ byte* code_start = module->AddCodeForTesting(code)->instructions().start();
// Generate caller.
int expect = return_count - 1;
@@ -343,9 +340,7 @@ void ReturnSumOfReturns(MachineType type) {
std::unique_ptr<wasm::NativeModule> module = AllocateNativeModule(
handles.main_isolate(), code->raw_instruction_size());
- byte* code_start = module->AddCodeCopy(code, wasm::WasmCode::kFunction, 0)
- ->instructions()
- .start();
+ byte* code_start = module->AddCodeForTesting(code)->instructions().start();
// Generate caller.
RawMachineAssemblerTester<int32_t> mt;
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 894338b3e2..c334ecb383 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -6,6 +6,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/representation-change.h"
+#include "src/compiler/type-cache.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
@@ -46,6 +47,12 @@ class RepresentationChangerTester : public HandleAndZoneScope,
CHECK_EQ(expected, m.Value());
}
+ void CheckInt64Constant(Node* n, int64_t expected) {
+ Int64Matcher m(n);
+ CHECK(m.HasValue());
+ CHECK_EQ(expected, m.Value());
+ }
+
void CheckUint32Constant(Node* n, uint32_t expected) {
Uint32Matcher m(n);
CHECK(m.HasValue());
@@ -267,6 +274,18 @@ TEST(ToUint32_constant) {
}
}
+TEST(ToInt64_constant) {
+ RepresentationChangerTester r;
+ FOR_INT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Constant(*i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, TypeCache::Get().kSafeInteger, use,
+ UseInfo(MachineRepresentation::kWord64, Truncation::None()));
+ r.CheckInt64Constant(c, *i);
+ }
+}
+
static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
Type from_type, UseInfo use_info) {
RepresentationChangerTester r;
@@ -291,7 +310,7 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
Type from_type, MachineRepresentation to) {
- CheckChange(expected, from, from_type, UseInfo(to, Truncation::None()));
+ CheckChange(expected, from, from_type, UseInfo(to, Truncation::Any()));
}
static void CheckTwoChanges(IrOpcode::Value expected2,
@@ -328,6 +347,132 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
CHECK_EQ(n, c->InputAt(0));
}
+TEST(Word64) {
+ CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord8,
+ TypeCache::Get().kInt8, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord8,
+ TypeCache::Get().kUint8, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord16,
+ TypeCache::Get().kInt16, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord16,
+ TypeCache::Get().kUint16, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord32,
+ Type::Signed32(), MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord32,
+ Type::Unsigned32(), MachineRepresentation::kWord64);
+
+ CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64,
+ Type::Signed32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64,
+ Type::Unsigned32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64,
+ TypeCache::Get().kSafeInteger, MachineRepresentation::kWord32,
+ UseInfo::TruncatingWord32());
+ CheckChange(
+ IrOpcode::kCheckedInt64ToInt32, MachineRepresentation::kWord64,
+ TypeCache::Get().kSafeInteger, MachineRepresentation::kWord32,
+ UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
+ CheckChange(
+ IrOpcode::kCheckedUint64ToInt32, MachineRepresentation::kWord64,
+ TypeCache::Get().kPositiveSafeInteger, MachineRepresentation::kWord32,
+ UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
+
+ CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
+ Type::Signed32(), MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
+ Type::Unsigned32(), MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
+ TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
+ TypeCache::Get().kInt64, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeFloat64ToUint64, MachineRepresentation::kFloat64,
+ TypeCache::Get().kUint64, MachineRepresentation::kWord64);
+
+ CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64,
+ Type::Signed32(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64,
+ Type::Unsigned32(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64,
+ TypeCache::Get().kSafeInteger, MachineRepresentation::kFloat64);
+
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToInt64,
+ MachineRepresentation::kFloat32, Type::Signed32(),
+ MachineRepresentation::kWord64);
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToInt64,
+ MachineRepresentation::kFloat32, Type::Unsigned32(),
+ MachineRepresentation::kWord64);
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToInt64,
+ MachineRepresentation::kFloat32, TypeCache::Get().kInt64,
+ MachineRepresentation::kWord64);
+ CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
+ IrOpcode::kChangeFloat64ToUint64,
+ MachineRepresentation::kFloat32, TypeCache::Get().kUint64,
+ MachineRepresentation::kWord64);
+
+ CheckTwoChanges(IrOpcode::kChangeInt64ToFloat64,
+ IrOpcode::kTruncateFloat64ToFloat32,
+ MachineRepresentation::kWord64, Type::Signed32(),
+ MachineRepresentation::kFloat32);
+
+ CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
+ Type::Signed32(), MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
+ Type::Unsigned32(), MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
+ TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
+ TypeCache::Get().kInt64, MachineRepresentation::kWord64);
+ CheckChange(IrOpcode::kChangeTaggedSignedToInt64,
+ MachineRepresentation::kTaggedSigned, Type::SignedSmall(),
+ MachineRepresentation::kWord64);
+
+ CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
+ IrOpcode::kChangeInt31ToTaggedSigned,
+ MachineRepresentation::kWord64, Type::Signed31(),
+ MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
+ IrOpcode::kChangeInt32ToTagged,
+ MachineRepresentation::kWord64, Type::Signed32(),
+ MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
+ IrOpcode::kChangeUint32ToTagged,
+ MachineRepresentation::kWord64, Type::Unsigned32(),
+ MachineRepresentation::kTagged);
+ CheckChange(IrOpcode::kChangeInt64ToTagged, MachineRepresentation::kWord64,
+ TypeCache::Get().kSafeInteger, MachineRepresentation::kTagged);
+ CheckChange(IrOpcode::kChangeUint64ToTagged, MachineRepresentation::kWord64,
+ TypeCache::Get().kPositiveSafeInteger,
+ MachineRepresentation::kTagged);
+
+ CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
+ IrOpcode::kChangeInt31ToTaggedSigned,
+ MachineRepresentation::kWord64, Type::Signed31(),
+ MachineRepresentation::kTaggedSigned);
+ if (SmiValuesAre32Bits()) {
+ CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
+ IrOpcode::kChangeInt32ToTagged,
+ MachineRepresentation::kWord64, Type::Signed32(),
+ MachineRepresentation::kTaggedSigned);
+ }
+ CheckChange(IrOpcode::kCheckedInt64ToTaggedSigned,
+ MachineRepresentation::kWord64, TypeCache::Get().kSafeInteger,
+ MachineRepresentation::kTaggedSigned,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()));
+ CheckChange(IrOpcode::kCheckedUint64ToTaggedSigned,
+ MachineRepresentation::kWord64,
+ TypeCache::Get().kPositiveSafeInteger,
+ MachineRepresentation::kTaggedSigned,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()));
+
+ CheckTwoChanges(IrOpcode::kChangeInt64ToFloat64,
+ IrOpcode::kChangeFloat64ToTaggedPointer,
+ MachineRepresentation::kWord64, TypeCache::Get().kSafeInteger,
+ MachineRepresentation::kTaggedPointer);
+}
+
TEST(SingleChanges) {
CheckChange(IrOpcode::kChangeTaggedToBit, MachineRepresentation::kTagged,
Type::Boolean(), MachineRepresentation::kBit);
@@ -371,6 +516,10 @@ TEST(SingleChanges) {
// Int32,Uint32 <-> Float64 are actually machine conversions.
CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
Type::Signed32(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
+ Type::Signed32OrMinusZero(), MachineRepresentation::kFloat64,
+ UseInfo(MachineRepresentation::kFloat64,
+ Truncation::Any(kIdentifyZeros)));
CheckChange(IrOpcode::kChangeUint32ToFloat64, MachineRepresentation::kWord32,
Type::Unsigned32(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kChangeFloat64ToInt32, MachineRepresentation::kFloat64,
@@ -425,7 +574,8 @@ TEST(SignednessInWord32) {
Type::Signed32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kTruncateFloat64ToWord32,
MachineRepresentation::kFloat64, Type::Number(),
- MachineRepresentation::kWord32);
+ MachineRepresentation::kWord32,
+ UseInfo(MachineRepresentation::kWord32, Truncation::Word32()));
CheckChange(IrOpcode::kCheckedTruncateTaggedToWord32,
MachineRepresentation::kTagged, Type::NonInternal(),
MachineRepresentation::kWord32,
@@ -523,16 +673,10 @@ TEST(TypeErrors) {
MachineRepresentation::kWord64);
r.CheckTypeError(MachineRepresentation::kTagged, Type::Boolean(),
MachineRepresentation::kWord64);
-
- // Word64 / Word32 shouldn't be implicitly converted.
r.CheckTypeError(MachineRepresentation::kWord64, Type::Internal(),
MachineRepresentation::kWord32);
r.CheckTypeError(MachineRepresentation::kWord32, Type::Number(),
MachineRepresentation::kWord64);
- r.CheckTypeError(MachineRepresentation::kWord32, Type::Signed32(),
- MachineRepresentation::kWord64);
- r.CheckTypeError(MachineRepresentation::kWord32, Type::Unsigned32(),
- MachineRepresentation::kWord64);
}
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index c62ed69105..681669f334 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -60,7 +60,7 @@ class BytecodeGraphCallable {
public:
BytecodeGraphCallable(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate), function_(function) {}
- virtual ~BytecodeGraphCallable() {}
+ virtual ~BytecodeGraphCallable() = default;
MaybeHandle<Object> operator()(A... args) {
return CallFunction(isolate_, function_, args...);
@@ -79,7 +79,7 @@ class BytecodeGraphTester {
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
}
- virtual ~BytecodeGraphTester() {}
+ virtual ~BytecodeGraphTester() = default;
template <class... A>
BytecodeGraphCallable<A...> GetCallable(
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index efae91343f..82c4c447f2 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -20,21 +20,6 @@ TEST(Call) {
T.CheckCall(T.Val("6x"), T.NewObject("({d:'x'})"), T.NewObject("f"));
}
-
-TEST(ClassOf) {
- FunctionTester T("(function(a) { return %_ClassOf(a); })", flags);
-
- T.CheckCall(T.Val("Function"), T.NewObject("(function() {})"));
- T.CheckCall(T.Val("Array"), T.NewObject("([1])"));
- T.CheckCall(T.Val("Object"), T.NewObject("({})"));
- T.CheckCall(T.Val("RegExp"), T.NewObject("(/x/)"));
- T.CheckCall(T.null(), T.undefined());
- T.CheckCall(T.null(), T.null());
- T.CheckCall(T.null(), T.Val("x"));
- T.CheckCall(T.null(), T.Val(1));
-}
-
-
TEST(IsArray) {
FunctionTester T("(function(a) { return %_IsArray(a); })", flags);
@@ -50,36 +35,6 @@ TEST(IsArray) {
}
-TEST(IsDate) {
- FunctionTester T("(function(a) { return %_IsDate(a); })", flags);
-
- T.CheckTrue(T.NewObject("new Date()"));
- T.CheckFalse(T.NewObject("(function() {})"));
- T.CheckFalse(T.NewObject("([1])"));
- T.CheckFalse(T.NewObject("({})"));
- T.CheckFalse(T.NewObject("(/x/)"));
- T.CheckFalse(T.undefined());
- T.CheckFalse(T.null());
- T.CheckFalse(T.Val("x"));
- T.CheckFalse(T.Val(1));
-}
-
-
-TEST(IsFunction) {
- FunctionTester T("(function(a) { return %_IsFunction(a); })", flags);
-
- T.CheckFalse(T.NewObject("new Date()"));
- T.CheckTrue(T.NewObject("(function() {})"));
- T.CheckFalse(T.NewObject("([1])"));
- T.CheckFalse(T.NewObject("({})"));
- T.CheckFalse(T.NewObject("(/x/)"));
- T.CheckFalse(T.undefined());
- T.CheckFalse(T.null());
- T.CheckFalse(T.Val("x"));
- T.CheckFalse(T.Val(1));
-}
-
-
TEST(IsSmi) {
FunctionTester T("(function(a) { return %_IsSmi(a); })", flags);
@@ -96,15 +51,6 @@ TEST(IsSmi) {
T.CheckFalse(T.Val(-2.3));
}
-
-TEST(StringAdd) {
- FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })", flags);
-
- T.CheckCall(T.Val("aaabbb"), T.Val("aaa"), T.Val("bbb"));
- T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(""));
- T.CheckCall(T.Val("bbb"), T.Val(""), T.Val("bbb"));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 71adbc738d..419d1b0699 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -4174,7 +4174,6 @@ TEST(RunChangeFloat64ToInt32_B) {
}
}
-
TEST(RunChangeFloat64ToUint32) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float64());
m.Return(m.ChangeFloat64ToUint32(m.Parameter(0)));
@@ -6340,6 +6339,29 @@ TEST(RunCallCFunction9) {
#if V8_TARGET_ARCH_64_BIT
// TODO(titzer): run int64 tests on all platforms when supported.
+TEST(RunChangeFloat64ToInt64) {
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
+ m.Return(m.ChangeFloat64ToInt64(m.Parameter(0)));
+
+ FOR_INT64_INPUTS(i) {
+ double input = static_cast<double>(*i);
+ if (static_cast<int64_t>(input) == *i) {
+ CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
+ }
+ }
+}
+
+TEST(RunChangeInt64ToFloat64) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Int64());
+ m.Return(m.ChangeInt64ToFloat64(m.Parameter(0)));
+ FOR_INT64_INPUTS(i) {
+ double output = static_cast<double>(*i);
+ if (static_cast<int64_t>(output) == *i) {
+ CHECK_EQ(output, m.Call(*i));
+ }
+ }
+}
+
TEST(RunBitcastInt64ToFloat64) {
int64_t input = 1;
Float64 output;
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index b23bd500c6..2ddaa1bc07 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <vector>
+
#include "src/assembler.h"
#include "src/codegen.h"
#include "src/compiler/linkage.h"
@@ -9,6 +11,7 @@
#include "src/machine-type.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
+#include "src/wasm/wasm-linkage.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
@@ -20,8 +23,6 @@ namespace internal {
namespace compiler {
namespace test_run_native_calls {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
namespace {
typedef float float32;
typedef double float64;
@@ -84,21 +85,12 @@ class RegisterPairs : public Pairs {
GetRegConfig()->allocatable_general_codes()) {}
};
-
-// Pairs of double registers.
+// Pairs of float registers.
class Float32RegisterPairs : public Pairs {
public:
Float32RegisterPairs()
- : Pairs(
- 100,
-#if V8_TARGET_ARCH_ARM
- // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- GetRegConfig()->num_allocatable_double_registers() / 2 - 2,
-#else
- GetRegConfig()->num_allocatable_double_registers(),
-#endif
- GetRegConfig()->allocatable_double_codes()) {
- }
+ : Pairs(100, GetRegConfig()->num_allocatable_float_registers(),
+ GetRegConfig()->allocatable_float_codes()) {}
};
@@ -112,48 +104,39 @@ class Float64RegisterPairs : public Pairs {
// Helper for allocating either an GP or FP reg, or the next stack slot.
-struct Allocator {
- Allocator(int* gp, int gpc, int* fp, int fpc)
- : gp_count(gpc),
- gp_offset(0),
- gp_regs(gp),
- fp_count(fpc),
- fp_offset(0),
- fp_regs(fp),
- stack_offset(0) {}
-
- int gp_count;
- int gp_offset;
- int* gp_regs;
-
- int fp_count;
- int fp_offset;
- int* fp_regs;
-
- int stack_offset;
+class Allocator {
+ public:
+ Allocator(int* gp, int gpc, int* fp, int fpc) : stack_offset_(0) {
+ for (int i = 0; i < gpc; ++i) {
+ gp_.push_back(Register::from_code(gp[i]));
+ }
+ for (int i = 0; i < fpc; ++i) {
+ fp_.push_back(DoubleRegister::from_code(fp[i]));
+ }
+ Reset();
+ }
+
+ int stack_offset() const { return stack_offset_; }
LinkageLocation Next(MachineType type) {
if (IsFloatingPoint(type.representation())) {
// Allocate a floating point register/stack location.
- if (fp_offset < fp_count) {
- int code = fp_regs[fp_offset++];
-#if V8_TARGET_ARCH_ARM
- // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- if (type.representation() == MachineRepresentation::kFloat32) code *= 2;
-#endif
+ if (reg_allocator_->CanAllocateFP(type.representation())) {
+ int code = reg_allocator_->NextFpReg(type.representation());
return LinkageLocation::ForRegister(code, type);
} else {
- int offset = -1 - stack_offset;
- stack_offset += StackWords(type);
+ int offset = -1 - stack_offset_;
+ stack_offset_ += StackWords(type);
return LinkageLocation::ForCallerFrameSlot(offset, type);
}
} else {
// Allocate a general purpose register/stack location.
- if (gp_offset < gp_count) {
- return LinkageLocation::ForRegister(gp_regs[gp_offset++], type);
+ if (reg_allocator_->CanAllocateGP()) {
+ int code = reg_allocator_->NextGpReg();
+ return LinkageLocation::ForRegister(code, type);
} else {
- int offset = -1 - stack_offset;
- stack_offset += StackWords(type);
+ int offset = -1 - stack_offset_;
+ stack_offset_ += StackWords(type);
return LinkageLocation::ForCallerFrameSlot(offset, type);
}
}
@@ -163,10 +146,17 @@ struct Allocator {
return size <= kPointerSize ? 1 : size / kPointerSize;
}
void Reset() {
- fp_offset = 0;
- gp_offset = 0;
- stack_offset = 0;
+ stack_offset_ = 0;
+ reg_allocator_.reset(
+ new wasm::LinkageAllocator(gp_.data(), static_cast<int>(gp_.size()),
+ fp_.data(), static_cast<int>(fp_.size())));
}
+
+ private:
+ std::vector<Register> gp_;
+ std::vector<DoubleRegister> fp_;
+ std::unique_ptr<wasm::LinkageAllocator> reg_allocator_;
+ int stack_offset_;
};
@@ -197,7 +187,7 @@ class RegisterConfig {
MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
- int stack_param_count = params.stack_offset;
+ int stack_param_count = params.stack_offset();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
@@ -868,7 +858,7 @@ TEST(Float32Select_registers) {
return;
}
- int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableFloatCode(0)};
ArgsBuffer<float32>::Sig sig(2);
Float32RegisterPairs pairs;
@@ -912,7 +902,7 @@ TEST(Float64Select_registers) {
TEST(Float32Select_stack_params_return_reg) {
- int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableFloatCode(0)};
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index e66c1ff454..8e652ec3b5 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -345,29 +345,37 @@ template <typename type>
struct FloatCompareWrapper {
type value;
explicit FloatCompareWrapper(type x) : value(x) {}
- bool operator==(type other) const {
+ bool operator==(FloatCompareWrapper<type> const& other) const {
return std::isnan(value)
- ? std::isnan(other)
- : value == other && std::signbit(value) == std::signbit(other);
+ ? std::isnan(other.value)
+ : value == other.value &&
+ std::signbit(value) == std::signbit(other.value);
}
};
template <typename type>
std::ostream& operator<<(std::ostream& out, FloatCompareWrapper<type> wrapper) {
- return out << wrapper.value;
+ uint8_t bytes[sizeof(type)];
+ memcpy(bytes, &wrapper.value, sizeof(type));
+ out << wrapper.value << " (0x";
+ const char* kHexDigits = "0123456789ABCDEF";
+ for (unsigned i = 0; i < sizeof(type); ++i) {
+ out << kHexDigits[bytes[i] >> 4] << kHexDigits[bytes[i] & 15];
+ }
+ return out << ")";
}
#define CHECK_FLOAT_EQ(lhs, rhs) \
do { \
using FloatWrapper = ::v8::internal::compiler::FloatCompareWrapper<float>; \
- CHECK_EQ(FloatWrapper(lhs), rhs); \
+ CHECK_EQ(FloatWrapper(lhs), FloatWrapper(rhs)); \
} while (false)
#define CHECK_DOUBLE_EQ(lhs, rhs) \
do { \
using DoubleWrapper = \
::v8::internal::compiler::FloatCompareWrapper<double>; \
- CHECK_EQ(DoubleWrapper(lhs), rhs); \
+ CHECK_EQ(DoubleWrapper(lhs), DoubleWrapper(rhs)); \
} while (false)
} // namespace compiler
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 5beed7f4cb..8f70847c9a 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -9,16 +9,15 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
+#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace heap {
void SealCurrentObjects(Heap* heap) {
- heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
- GarbageCollectionReason::kTesting);
- heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
- GarbageCollectionReason::kTesting);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted();
heap->old_space()->FreeLinearAllocationArea();
for (Page* page : *heap->old_space()) {
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 5a19f806bc..a669233b3a 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -168,84 +168,10 @@ TEST(StressJS) {
.ToLocalChecked()
->Run(env)
.ToLocalChecked();
- CHECK_EQ(true, result->BooleanValue(env).FromJust());
+ CHECK_EQ(true, result->BooleanValue(CcTest::isolate()));
env->Exit();
}
-
-// CodeRange test.
-// Tests memory management in a CodeRange by allocating and freeing blocks,
-// using a pseudorandom generator to choose block sizes geometrically
-// distributed between 2 * Page::kPageSize and 2^5 + 1 * Page::kPageSize.
-// Ensure that the freed chunks are collected and reused by allocating (in
-// total) more than the size of the CodeRange.
-
-// This pseudorandom generator does not need to be particularly good.
-// Use the lower half of the V8::Random() generator.
-unsigned int Pseudorandom() {
- static uint32_t lo = 2345;
- lo = 18273 * (lo & 0xFFFF) + (lo >> 16); // Provably not 0.
- return lo & 0xFFFF;
-}
-
-namespace {
-
-// Plain old data class. Represents a block of allocated memory.
-class Block {
- public:
- Block(Address base_arg, int size_arg)
- : base(base_arg), size(size_arg) {}
-
- Address base;
- int size;
-};
-
-} // namespace
-
-TEST(CodeRange) {
- const size_t code_range_size = 32*MB;
- CcTest::InitializeVM();
- CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()),
- code_range_size);
- size_t current_allocated = 0;
- size_t total_allocated = 0;
- std::vector<Block> blocks;
- blocks.reserve(1000);
-
- while (total_allocated < 5 * code_range_size) {
- if (current_allocated < code_range_size / 10) {
- // Allocate a block.
- // Geometrically distributed sizes, greater than
- // kMaxRegularHeapObjectSize (which is greater than code page area).
- // TODO(gc): instead of using 3 use some contant based on code_range_size
- // kMaxRegularHeapObjectSize.
- size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
- Pseudorandom() % 5000 + 1;
- requested = RoundUp(requested, MemoryAllocator::GetCommitPageSize());
- size_t allocated = 0;
-
- // The request size has to be at least 2 code guard pages larger than the
- // actual commit size.
- Address base = code_range.AllocateRawMemory(
- requested, requested - (2 * MemoryAllocator::CodePageGuardSize()),
- &allocated);
- CHECK_NE(base, kNullAddress);
- blocks.emplace_back(base, static_cast<int>(allocated));
- current_allocated += static_cast<int>(allocated);
- total_allocated += static_cast<int>(allocated);
- } else {
- // Free a block.
- size_t index = Pseudorandom() % blocks.size();
- code_range.FreeRawMemory(blocks[index].base, blocks[index].size);
- current_allocated -= blocks[index].size;
- if (index < blocks.size() - 1) {
- blocks[index] = blocks.back();
- }
- blocks.pop_back();
- }
- }
-}
-
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index cb35a73126..272c8831fd 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -323,10 +323,8 @@ UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
Heap* heap = i_isolate->heap();
// Ensure heap is in a clean state.
- heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
- GarbageCollectionReason::kTesting);
- heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
- GarbageCollectionReason::kTesting);
+ CcTest::CollectAllGarbage(i_isolate);
+ CcTest::CollectAllGarbage(i_isolate);
Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, 100);
Handle<JSArrayBuffer> buf1 = v8::Utils::OpenHandle(*ab1);
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index 0f10b1b9bd..d49ccf6213 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -39,9 +39,10 @@ TEST(ConcurrentMarking) {
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
+ ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
- ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
+ ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
+ heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
@@ -61,9 +62,10 @@ TEST(ConcurrentMarkingReschedule) {
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
+ ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
- ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
+ ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
+ heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
@@ -87,9 +89,10 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
}
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
+ ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
- ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
+ ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
+ heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
diff --git a/deps/v8/test/cctest/heap/test-external-string-tracker.cc b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
index 501825a296..cfade38da7 100644
--- a/deps/v8/test/cctest/heap/test-external-string-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
@@ -27,14 +27,14 @@ class TestOneByteResource : public v8::String::ExternalOneByteStringResource {
length_(strlen(data) - offset),
counter_(counter) {}
- ~TestOneByteResource() {
+ ~TestOneByteResource() override {
i::DeleteArray(orig_data_);
if (counter_ != nullptr) ++*counter_;
}
- const char* data() const { return data_; }
+ const char* data() const override { return data_; }
- size_t length() const { return length_; }
+ size_t length() const override { return length_; }
private:
const char* orig_data_;
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index f73f6f0195..8c6a3c446c 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -898,14 +898,14 @@ static const char* not_so_random_string_table[] = {
"volatile",
"while",
"with",
- 0
+ nullptr
};
-
static void CheckInternalizedStrings(const char** strings) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- for (const char* string = *strings; *strings != 0; string = *strings++) {
+ for (const char* string = *strings; *strings != nullptr;
+ string = *strings++) {
HandleScope scope(isolate);
Handle<String> a =
isolate->factory()->InternalizeUtf8String(CStrVector(string));
@@ -2192,9 +2192,9 @@ HEAP_TEST(GCFlags) {
heap->set_current_gc_flags(Heap::kNoGCFlags);
CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
- // Set the flags to check whether we appropriately resets them after the GC.
- heap->set_current_gc_flags(Heap::kAbortIncrementalMarkingMask);
- CcTest::CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
+ // Check whether we appropriately reset flags after GC.
+ CcTest::heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kTesting);
CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
MarkCompactCollector* collector = heap->mark_compact_collector();
@@ -2212,7 +2212,7 @@ HEAP_TEST(GCFlags) {
// NewSpace scavenges should not overwrite the flags.
CHECK_NE(0, heap->current_gc_flags_ & Heap::kReduceMemoryFootprintMask);
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
}
@@ -3097,14 +3097,14 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
CHECK_EQ(expected_slots, feedback_helper.slot_count());
int slot1 = 0;
int slot2 = 1;
- CHECK(feedback_vector->Get(feedback_helper.slot(slot1))->IsWeakHeapObject());
- CHECK(feedback_vector->Get(feedback_helper.slot(slot2))->IsWeakHeapObject());
+ CHECK(feedback_vector->Get(feedback_helper.slot(slot1))->IsWeak());
+ CHECK(feedback_vector->Get(feedback_helper.slot(slot2))->IsWeak());
heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::CollectAllGarbage();
- feedback_vector->Get(feedback_helper.slot(slot1))->IsWeakHeapObject();
- feedback_vector->Get(feedback_helper.slot(slot2))->IsWeakHeapObject();
+ CHECK(feedback_vector->Get(feedback_helper.slot(slot1))->IsWeak());
+ CHECK(feedback_vector->Get(feedback_helper.slot(slot2))->IsWeak());
}
@@ -3134,12 +3134,12 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
Handle<FeedbackVector> vector(f->feedback_vector(), f->GetIsolate());
- CHECK(vector->Get(FeedbackSlot(0))->IsWeakOrClearedHeapObject());
+ CHECK(vector->Get(FeedbackSlot(0))->IsWeakOrCleared());
heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::CollectAllGarbage();
- CHECK(vector->Get(FeedbackSlot(0))->IsWeakOrClearedHeapObject());
+ CHECK(vector->Get(FeedbackSlot(0))->IsWeakOrCleared());
}
TEST(IncrementalMarkingPreservesMonomorphicIC) {
@@ -3247,14 +3247,14 @@ class SourceResource : public v8::String::ExternalOneByteStringResource {
explicit SourceResource(const char* data)
: data_(data), length_(strlen(data)) { }
- virtual void Dispose() {
+ void Dispose() override {
i::DeleteArray(data_);
data_ = nullptr;
}
- const char* data() const { return data_; }
+ const char* data() const override { return data_; }
- size_t length() const { return length_; }
+ size_t length() const override { return length_; }
bool IsDisposed() { return data_ == nullptr; }
@@ -3636,9 +3636,9 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
dependency->group() ==
DependentCode::kAllocationSiteTenuringChangedGroup);
CHECK_EQ(1, dependency->count());
- CHECK(dependency->object_at(0)->IsWeakHeapObject());
+ CHECK(dependency->object_at(0)->IsWeak());
Code* function_bar =
- Code::cast(dependency->object_at(0)->ToWeakHeapObject());
+ Code::cast(dependency->object_at(0)->GetHeapObjectAssumeWeak());
CHECK_EQ(bar_handle->code(), function_bar);
dependency = dependency->next_link();
dependency_group_count++;
@@ -3655,7 +3655,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// The site still exists because of our global handle, but the code is no
// longer referred to by dependent_code().
- CHECK(site->dependent_code()->object_at(0)->IsClearedWeakHeapObject());
+ CHECK(site->dependent_code()->object_at(0)->IsCleared());
}
void CheckNumberOfAllocations(Heap* heap, const char* source,
@@ -4133,18 +4133,18 @@ TEST(WeakFunctionInConstructor) {
Handle<FeedbackVector>(createObj->feedback_vector(), CcTest::i_isolate());
for (int i = 0; i < 20; i++) {
MaybeObject* slot_value = feedback_vector->Get(FeedbackSlot(0));
- CHECK(slot_value->IsWeakOrClearedHeapObject());
- if (slot_value->IsClearedWeakHeapObject()) break;
+ CHECK(slot_value->IsWeakOrCleared());
+ if (slot_value->IsCleared()) break;
CcTest::CollectAllGarbage();
}
MaybeObject* slot_value = feedback_vector->Get(FeedbackSlot(0));
- CHECK(slot_value->IsClearedWeakHeapObject());
+ CHECK(slot_value->IsCleared());
CompileRun(
"function coat() { this.x = 6; }"
"createObj(coat);");
slot_value = feedback_vector->Get(FeedbackSlot(0));
- CHECK(slot_value->IsWeakHeapObject());
+ CHECK(slot_value->IsWeak());
}
@@ -4511,8 +4511,7 @@ HEAP_TEST(Regress538257) {
heap::ForceEvacuationCandidate(Page::FromAddress(objects[i]->address()));
}
heap::SimulateFullSpace(old_space);
- heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
- i::GarbageCollectionReason::kTesting);
+ CcTest::CollectAllGarbage();
// If we get this far, we've successfully aborted compaction. Any further
// allocations might trigger OOM.
}
@@ -4704,7 +4703,7 @@ TEST(Regress3877) {
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
weak_prototype_holder->Set(0, HeapObjectReference::Weak(*proto));
}
- CHECK(!weak_prototype_holder->Get(0)->IsClearedWeakHeapObject());
+ CHECK(!weak_prototype_holder->Get(0)->IsCleared());
CompileRun(
"var a = { };"
"a.x = new cls();"
@@ -4713,13 +4712,13 @@ TEST(Regress3877) {
CcTest::CollectAllGarbage();
}
// The map of a.x keeps prototype alive
- CHECK(!weak_prototype_holder->Get(0)->IsClearedWeakHeapObject());
+ CHECK(!weak_prototype_holder->Get(0)->IsCleared());
// Change the map of a.x and make the previous map garbage collectable.
CompileRun("a.x.__proto__ = {};");
for (int i = 0; i < 4; i++) {
CcTest::CollectAllGarbage();
}
- CHECK(weak_prototype_holder->Get(0)->IsClearedWeakHeapObject());
+ CHECK(weak_prototype_holder->Get(0)->IsCleared());
}
Handle<WeakFixedArray> AddRetainedMap(Isolate* isolate, Heap* heap) {
@@ -4742,15 +4741,15 @@ void CheckMapRetainingFor(int n) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Handle<WeakFixedArray> array_with_map = AddRetainedMap(isolate, heap);
- CHECK(array_with_map->Get(0)->IsWeakHeapObject());
+ CHECK(array_with_map->Get(0)->IsWeak());
for (int i = 0; i < n; i++) {
heap::SimulateIncrementalMarking(heap);
CcTest::CollectGarbage(OLD_SPACE);
}
- CHECK(array_with_map->Get(0)->IsWeakHeapObject());
+ CHECK(array_with_map->Get(0)->IsWeak());
heap::SimulateIncrementalMarking(heap);
CcTest::CollectGarbage(OLD_SPACE);
- CHECK(array_with_map->Get(0)->IsClearedWeakHeapObject());
+ CHECK(array_with_map->Get(0)->IsCleared());
}
@@ -4766,8 +4765,8 @@ TEST(MapRetaining) {
}
TEST(WritableVsImmortalRoots) {
- for (int i = 0; i < Heap::kStrongRootListLength; ++i) {
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ for (RootIndex root_index = RootIndex::kFirstRoot;
+ root_index <= RootIndex::kLastRoot; ++root_index) {
bool writable = Heap::RootCanBeWrittenAfterInitialization(root_index);
bool immortal = Heap::RootIsImmortalImmovable(root_index);
// A root value can be writable, immortal, or neither, but not both.
@@ -5427,11 +5426,11 @@ class StaticOneByteResource : public v8::String::ExternalOneByteStringResource {
public:
explicit StaticOneByteResource(const char* data) : data_(data) {}
- ~StaticOneByteResource() {}
+ ~StaticOneByteResource() override = default;
- const char* data() const { return data_; }
+ const char* data() const override { return data_; }
- size_t length() const { return strlen(data_); }
+ size_t length() const override { return strlen(data_); }
private:
const char* data_;
@@ -5676,6 +5675,7 @@ TEST(Regress618958) {
}
TEST(YoungGenerationLargeObjectAllocation) {
+ if (FLAG_minor_mc) return;
FLAG_young_generation_large_objects = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -5684,13 +5684,26 @@ TEST(YoungGenerationLargeObjectAllocation) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
- CHECK(chunk->owner()->identity() == LO_SPACE);
+ CHECK_EQ(LO_SPACE, chunk->owner()->identity());
CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
chunk = MemoryChunk::FromAddress(array_small->address());
- CHECK(chunk->owner()->identity() == NEW_LO_SPACE);
+ CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+
+ Handle<Object> number = isolate->factory()->NewHeapNumber(123.456);
+ array_small->set(0, *number);
+
+ CcTest::CollectGarbage(NEW_SPACE);
+
+ // After the first young generation GC array_small will be in the old
+ // generation large object space.
+ chunk = MemoryChunk::FromAddress(array_small->address());
+ CHECK_EQ(LO_SPACE, chunk->owner()->identity());
+ CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+
+ CcTest::CollectAllAvailableGarbage();
}
TEST(UncommitUnusedLargeObjectMemory) {
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index bbf630f0ba..8213ea6080 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -33,12 +33,13 @@ namespace heap {
class MockPlatform : public TestPlatform {
public:
- MockPlatform() : task_(nullptr), old_platform_(i::V8::GetCurrentPlatform()) {
+ MockPlatform()
+ : taskrunner_(new MockTaskRunner()),
+ old_platform_(i::V8::GetCurrentPlatform()) {
// Now that it's completely constructed, make this the current platform.
i::V8::SetPlatformForTesting(this);
}
- virtual ~MockPlatform() {
- delete task_;
+ ~MockPlatform() override {
i::V8::SetPlatformForTesting(old_platform_);
for (auto& task : worker_tasks_) {
old_platform_->CallOnWorkerThread(std::move(task));
@@ -46,8 +47,9 @@ class MockPlatform : public TestPlatform {
worker_tasks_.clear();
}
- void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- task_ = task;
+ std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override {
+ return taskrunner_;
}
void CallOnWorkerThread(std::unique_ptr<Task> task) override {
@@ -56,17 +58,40 @@ class MockPlatform : public TestPlatform {
bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
- bool PendingTask() { return task_ != nullptr; }
+ bool PendingTask() { return taskrunner_->PendingTask(); }
- void PerformTask() {
- Task* task = task_;
- task_ = nullptr;
- task->Run();
- delete task;
- }
+ void PerformTask() { taskrunner_->PerformTask(); }
private:
- Task* task_;
+ class MockTaskRunner : public v8::TaskRunner {
+ public:
+ void PostTask(std::unique_ptr<v8::Task> task) override {
+ task_ = std::move(task);
+ }
+
+ void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ UNREACHABLE();
+ };
+
+ void PostIdleTask(std::unique_ptr<IdleTask> task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled() override { return false; };
+
+ bool PendingTask() { return task_ != nullptr; }
+
+ void PerformTask() {
+ std::unique_ptr<Task> task = std::move(task_);
+ task->Run();
+ }
+
+ private:
+ std::unique_ptr<Task> task_;
+ };
+
+ std::shared_ptr<MockTaskRunner> taskrunner_;
std::vector<std::unique_ptr<Task>> worker_tasks_;
v8::Platform* old_platform_;
};
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index 42a3eec04c..ae0bfd969a 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -63,7 +63,7 @@ TEST(InvalidLab) {
TEST(UnusedLabImplicitClose) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- heap->root(Heap::kOnePointerFillerMapRootIndex);
+ heap->root(RootIndex::kOnePointerFillerMap);
const int kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index b930361eb9..e03d8229b3 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
+#include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/heap/factory.h"
#include "src/heap/spaces-inl.h"
@@ -59,36 +60,43 @@ class TestMemoryAllocatorScope {
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
-
-// Temporarily sets a given code range in an isolate.
-class TestCodeRangeScope {
+// Temporarily sets a given code page allocator in an isolate.
+class TestCodePageAllocatorScope {
public:
- TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
+ TestCodePageAllocatorScope(Isolate* isolate,
+ v8::PageAllocator* code_page_allocator)
: isolate_(isolate),
- old_code_range_(isolate->heap()->memory_allocator()->code_range()) {
- isolate->heap()->memory_allocator()->code_range_ = code_range;
+ old_code_page_allocator_(
+ isolate->heap()->memory_allocator()->code_page_allocator()) {
+ isolate->heap()->memory_allocator()->code_page_allocator_ =
+ code_page_allocator;
}
- ~TestCodeRangeScope() {
- isolate_->heap()->memory_allocator()->code_range_ = old_code_range_;
+ ~TestCodePageAllocatorScope() {
+ isolate_->heap()->memory_allocator()->code_page_allocator_ =
+ old_code_page_allocator_;
}
private:
Isolate* isolate_;
- CodeRange* old_code_range_;
+ v8::PageAllocator* old_code_page_allocator_;
- DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
+ DISALLOW_COPY_AND_ASSIGN(TestCodePageAllocatorScope);
};
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
- CodeRange* code_range, size_t reserve_area_size,
- size_t commit_area_size, Executability executable,
- Space* space) {
+ v8::PageAllocator* code_page_allocator,
+ size_t reserve_area_size, size_t commit_area_size,
+ Executability executable, Space* space) {
MemoryAllocator* memory_allocator =
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
{
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
- TestCodeRangeScope test_code_range_scope(isolate, code_range);
+ TestCodePageAllocatorScope test_code_page_allocator_scope(
+ isolate, code_page_allocator);
+
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(executable);
size_t header_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset()
@@ -98,14 +106,12 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
reserve_area_size, commit_area_size, executable, space);
- size_t alignment = code_range != nullptr && code_range->valid()
- ? MemoryChunk::kAlignment
- : CommitPageSize();
size_t reserved_size =
((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
- alignment)
- : RoundUp(header_size + reserve_area_size, CommitPageSize());
+ page_allocator->CommitPageSize())
+ : RoundUp(header_size + reserve_area_size,
+ page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
memory_chunk->address() + memory_chunk->size());
@@ -119,38 +125,6 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
delete memory_allocator;
}
-TEST(Regress3540) {
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- MemoryAllocator* memory_allocator =
- new MemoryAllocator(isolate, heap->MaxReserved(), 0);
- TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
- size_t code_range_size =
- kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
- CodeRange* code_range = new CodeRange(isolate, code_range_size);
-
- Address address;
- size_t size;
- size_t request_size = code_range_size - Page::kPageSize;
- address = code_range->AllocateRawMemory(
- request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
- &size);
- CHECK_NE(address, kNullAddress);
-
- Address null_address;
- size_t null_size;
- request_size = code_range_size - Page::kPageSize;
- null_address = code_range->AllocateRawMemory(
- request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
- &null_size);
- CHECK_EQ(null_address, kNullAddress);
-
- code_range->FreeRawMemory(address, size);
- delete code_range;
- memory_allocator->TearDown();
- delete memory_allocator;
-}
-
static unsigned int PseudorandomAreaSize() {
static uint32_t lo = 2345;
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
@@ -162,24 +136,31 @@ TEST(MemoryChunk) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+
size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size;
for (int i = 0; i < 100; i++) {
initial_commit_area_size =
- RoundUp(PseudorandomAreaSize(), CommitPageSize());
+ RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
// With CodeRange.
const size_t code_range_size = 32 * MB;
- CodeRange* code_range = new CodeRange(isolate, code_range_size);
+ VirtualMemory code_range_reservation(page_allocator, code_range_size,
+ nullptr, MemoryChunk::kAlignment);
+ CHECK(code_range_reservation.IsReserved());
+
+ base::BoundedPageAllocator code_page_allocator(
+ page_allocator, code_range_reservation.address(),
+ code_range_reservation.size(), MemoryChunk::kAlignment);
- VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
+ VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, EXECUTABLE, heap->code_space());
- VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
+ VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, NOT_EXECUTABLE,
heap->old_space());
- delete code_range;
}
}
@@ -240,7 +221,8 @@ TEST(NewSpace) {
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
- NewSpace new_space(heap, CcTest::heap()->InitialSemiSpaceSize(),
+ NewSpace new_space(heap, memory_allocator->data_page_allocator(),
+ CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize());
CHECK(new_space.MaximumCapacity());
@@ -522,9 +504,7 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
// Clear out any pre-existing garbage to make the test consistent
// across snapshot/no-snapshot builds.
- i_isolate->heap()->CollectAllGarbage(
- i::Heap::kFinalizeIncrementalMarkingMask,
- i::GarbageCollectionReason::kTesting);
+ CcTest::CollectAllGarbage(i_isolate);
NewSpace* new_space = i_isolate->heap()->new_space();
diff --git a/deps/v8/test/cctest/heap/test-unmapper.cc b/deps/v8/test/cctest/heap/test-unmapper.cc
index 880c54457c..1fbe5c1f5c 100644
--- a/deps/v8/test/cctest/heap/test-unmapper.cc
+++ b/deps/v8/test/cctest/heap/test-unmapper.cc
@@ -23,7 +23,7 @@ class MockPlatformForUnmapper : public TestPlatform {
// Now that it's completely constructed, make this the current platform.
i::V8::SetPlatformForTesting(this);
}
- virtual ~MockPlatformForUnmapper() {
+ ~MockPlatformForUnmapper() override {
delete task_;
i::V8::SetPlatformForTesting(old_platform_);
for (auto& task : worker_tasks_) {
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index a54b13afd2..bbe4776b93 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -62,19 +62,19 @@ TEST(WeakReferencesBasic) {
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
HeapObject* code_heap_object;
- CHECK(
- fv->optimized_code_weak_or_smi()->ToWeakHeapObject(&code_heap_object));
+ CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(
+ &code_heap_object));
CHECK_EQ(*code, code_heap_object);
CcTest::CollectAllGarbage();
- CHECK(
- fv->optimized_code_weak_or_smi()->ToWeakHeapObject(&code_heap_object));
+ CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(
+ &code_heap_object));
CHECK_EQ(*code, code_heap_object);
} // code will go out of scope.
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsClearedWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
}
TEST(WeakReferencesOldToOld) {
@@ -103,7 +103,7 @@ TEST(WeakReferencesOldToOld) {
CHECK(heap->InOldSpace(*fixed_array));
HeapObject* heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->ToWeakHeapObject(&heap_object));
+ CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -128,7 +128,7 @@ TEST(WeakReferencesOldToNew) {
CcTest::CollectAllGarbage();
HeapObject* heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->ToWeakHeapObject(&heap_object));
+ CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -153,7 +153,7 @@ TEST(WeakReferencesOldToNewScavenged) {
CcTest::CollectGarbage(NEW_SPACE);
HeapObject* heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->ToWeakHeapObject(&heap_object));
+ CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -174,7 +174,7 @@ TEST(WeakReferencesOldToCleared) {
fv->set_optimized_code_weak_or_smi(HeapObjectReference::ClearedValue());
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsClearedWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
}
TEST(ObjectMovesBeforeClearingWeakField) {
@@ -210,11 +210,11 @@ TEST(ObjectMovesBeforeClearingWeakField) {
CcTest::CollectGarbage(NEW_SPACE);
FeedbackVector* new_fv_location = *fv;
CHECK_NE(fv_location, new_fv_location);
- CHECK(fv->optimized_code_weak_or_smi()->IsWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsWeak());
// Now we try to clear *fv.
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsClearedWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
}
TEST(ObjectWithWeakFieldDies) {
@@ -277,7 +277,7 @@ TEST(ObjectWithWeakReferencePromoted) {
CHECK(heap->InOldSpace(*fixed_array));
HeapObject* heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->ToWeakHeapObject(&heap_object));
+ CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -296,14 +296,14 @@ TEST(ObjectWithClearedWeakReferencePromoted) {
CcTest::CollectGarbage(NEW_SPACE);
CHECK(Heap::InNewSpace(*fv));
- CHECK(fv->optimized_code_weak_or_smi()->IsClearedWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
CcTest::CollectGarbage(NEW_SPACE);
CHECK(heap->InOldSpace(*fv));
- CHECK(fv->optimized_code_weak_or_smi()->IsClearedWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsClearedWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
}
TEST(WeakReferenceWriteBarrier) {
@@ -343,7 +343,7 @@ TEST(WeakReferenceWriteBarrier) {
CcTest::CollectAllGarbage();
// Check that the write barrier treated the weak reference as strong.
- CHECK(fv->optimized_code_weak_or_smi()->IsWeakHeapObject());
+ CHECK(fv->optimized_code_weak_or_smi()->IsWeak());
}
TEST(EmptyWeakArray) {
@@ -375,7 +375,7 @@ TEST(WeakArraysBasic) {
for (int i = 0; i < length; ++i) {
HeapObject* heap_object;
- CHECK(array->Get(i)->ToStrongHeapObject(&heap_object));
+ CHECK(array->Get(i)->GetHeapObjectIfStrong(&heap_object));
CHECK_EQ(heap_object, ReadOnlyRoots(heap).undefined_value());
}
@@ -407,23 +407,23 @@ TEST(WeakArraysBasic) {
// space.
CcTest::CollectGarbage(NEW_SPACE);
HeapObject* heap_object;
- CHECK(array->Get(0)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(0)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2016);
- CHECK(array->Get(1)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(1)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
- CHECK(array->Get(2)->ToStrongHeapObject(&heap_object));
+ CHECK(array->Get(2)->GetHeapObjectIfStrong(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2018);
- CHECK(array->Get(3)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(3)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2019);
CcTest::CollectAllGarbage();
CHECK(heap->InOldSpace(*array));
- CHECK(array->Get(0)->IsClearedWeakHeapObject());
- CHECK(array->Get(1)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(0)->IsCleared());
+ CHECK(array->Get(1)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
- CHECK(array->Get(2)->ToStrongHeapObject(&heap_object));
+ CHECK(array->Get(2)->GetHeapObjectIfStrong(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2018);
- CHECK(array->Get(3)->IsClearedWeakHeapObject());
+ CHECK(array->Get(3)->IsCleared());
}
TEST(WeakArrayListBasic) {
@@ -481,13 +481,13 @@ TEST(WeakArrayListBasic) {
CHECK(Heap::InNewSpace(*array));
CHECK_EQ(array->Get(0), HeapObjectReference::Weak(*index0));
- CHECK_EQ(Smi::ToInt(array->Get(1)->ToSmi()), 1);
+ CHECK_EQ(Smi::ToInt(array->Get(1)->cast<Smi>()), 1);
CHECK_EQ(array->Get(2), HeapObjectReference::Weak(*index2));
- CHECK_EQ(Smi::ToInt(array->Get(3)->ToSmi()), 3);
+ CHECK_EQ(Smi::ToInt(array->Get(3)->cast<Smi>()), 3);
CHECK_EQ(array->Get(4), HeapObjectReference::Weak(*index4));
- CHECK_EQ(Smi::ToInt(array->Get(5)->ToSmi()), 5);
+ CHECK_EQ(Smi::ToInt(array->Get(5)->cast<Smi>()), 5);
CHECK_EQ(array->Get(6), HeapObjectReference::Weak(*index6));
array = inner_scope.CloseAndEscape(array);
@@ -502,37 +502,37 @@ TEST(WeakArrayListBasic) {
CcTest::CollectGarbage(NEW_SPACE);
HeapObject* heap_object;
CHECK_EQ(array->length(), 8);
- CHECK(array->Get(0)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(0)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2016);
- CHECK_EQ(Smi::ToInt(array->Get(1)->ToSmi()), 1);
+ CHECK_EQ(Smi::ToInt(array->Get(1)->cast<Smi>()), 1);
- CHECK(array->Get(2)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(2)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
- CHECK_EQ(Smi::ToInt(array->Get(3)->ToSmi()), 3);
+ CHECK_EQ(Smi::ToInt(array->Get(3)->cast<Smi>()), 3);
- CHECK(array->Get(4)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(4)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2018);
- CHECK_EQ(Smi::ToInt(array->Get(5)->ToSmi()), 5);
+ CHECK_EQ(Smi::ToInt(array->Get(5)->cast<Smi>()), 5);
- CHECK(array->Get(6)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(6)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2019);
- CHECK_EQ(Smi::ToInt(array->Get(7)->ToSmi()), 7);
+ CHECK_EQ(Smi::ToInt(array->Get(7)->cast<Smi>()), 7);
CcTest::CollectAllGarbage();
CHECK(heap->InOldSpace(*array));
CHECK_EQ(array->length(), 8);
- CHECK(array->Get(0)->IsClearedWeakHeapObject());
- CHECK_EQ(Smi::ToInt(array->Get(1)->ToSmi()), 1);
+ CHECK(array->Get(0)->IsCleared());
+ CHECK_EQ(Smi::ToInt(array->Get(1)->cast<Smi>()), 1);
- CHECK(array->Get(2)->ToWeakHeapObject(&heap_object));
+ CHECK(array->Get(2)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
- CHECK_EQ(Smi::ToInt(array->Get(3)->ToSmi()), 3);
+ CHECK_EQ(Smi::ToInt(array->Get(3)->cast<Smi>()), 3);
- CHECK(array->Get(4)->IsClearedWeakHeapObject());
- CHECK_EQ(Smi::ToInt(array->Get(5)->ToSmi()), 5);
+ CHECK(array->Get(4)->IsCleared());
+ CHECK_EQ(Smi::ToInt(array->Get(5)->cast<Smi>()), 5);
- CHECK(array->Get(6)->IsClearedWeakHeapObject());
- CHECK_EQ(Smi::ToInt(array->Get(7)->ToSmi()), 7);
+ CHECK(array->Get(6)->IsCleared());
+ CHECK_EQ(Smi::ToInt(array->Get(7)->cast<Smi>()), 7);
}
TEST(WeakArrayListRemove) {
@@ -753,7 +753,7 @@ TEST(PrototypeUsersCompacted) {
PrototypeUsers::MarkSlotEmpty(*array, 1);
CcTest::CollectAllGarbage();
- CHECK(array->Get(3)->IsClearedWeakHeapObject());
+ CHECK(array->Get(3)->IsCleared());
CHECK_EQ(array->length(), 3 + PrototypeUsers::kFirstIndex);
WeakArrayList* new_array =
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index ae8d050914..31272f1c29 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -35,17 +35,17 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(1),
- B(LdaZero),
B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(StaKeyedProperty), R(1), R(2), U8(1),
+ /* 54 E> */ B(StaInArrayLiteral), R(2), R(1), U8(1),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star), R(1),
B(Ldar), R(0),
/* 59 E> */ B(AddSmi), I8(1), U8(3),
- B(StaKeyedProperty), R(1), R(2), U8(1),
- B(Ldar), R(1),
+ B(StaInArrayLiteral), R(2), R(1), U8(1),
+ B(Ldar), R(2),
/* 65 S> */ B(Return),
]
constant pool: [
@@ -84,29 +84,29 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(4),
- B(Star), R(1),
- B(LdaZero),
B(Star), R(2),
- B(CreateArrayLiteral), U8(1), U8(3), U8(37),
- B(Star), R(3),
B(LdaZero),
+ B(Star), R(1),
+ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
B(Ldar), R(0),
- /* 56 E> */ B(StaKeyedProperty), R(3), R(4), U8(4),
- B(Ldar), R(3),
- B(StaKeyedProperty), R(1), R(2), U8(1),
+ /* 56 E> */ B(StaInArrayLiteral), R(4), R(3), U8(2),
+ B(Ldar), R(4),
+ B(StaInArrayLiteral), R(2), R(1), U8(4),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star), R(1),
B(CreateArrayLiteral), U8(2), U8(6), U8(37),
- B(Star), R(3),
- B(LdaZero),
B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
B(Ldar), R(0),
- /* 68 E> */ B(AddSmi), I8(2), U8(9),
- B(StaKeyedProperty), R(3), R(4), U8(7),
- B(Ldar), R(3),
- B(StaKeyedProperty), R(1), R(2), U8(1),
- B(Ldar), R(1),
+ /* 68 E> */ B(AddSmi), I8(2), U8(7),
+ B(StaInArrayLiteral), R(4), R(3), U8(8),
+ B(Ldar), R(4),
+ B(StaInArrayLiteral), R(2), R(1), U8(4),
+ B(Ldar), R(2),
/* 76 S> */ B(Return),
]
constant pool: [
@@ -121,50 +121,18 @@ handlers: [
snippet: "
var a = [ 1, 2 ]; return [ ...a ];
"
-frame size: 8
+frame size: 1
parameter count: 1
-bytecode array length: 86
+bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
- /* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(1),
- B(LdaConstant), U8(2),
- /* 64 S> */ B(Star), R(2),
- B(LdaNamedProperty), R(0), U8(3), U8(7),
- B(Star), R(7),
- B(CallProperty0), R(7), R(0), U8(9),
- B(Mov), R(0), R(6),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
- B(LdaNamedProperty), R(5), U8(4), U8(11),
- B(Star), R(4),
- B(CallProperty0), R(4), R(5), U8(13),
- B(Star), R(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(5), U8(15),
- B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(3), U8(6), U8(17),
- B(Star), R(3),
- B(StaInArrayLiteral), R(1), R(2), U8(2),
- B(Ldar), R(2),
- B(Inc), U8(4),
- B(Star), R(2),
- B(JumpLoop), U8(35), I8(0),
- B(Ldar), R(1),
+ /* 52 S> */ B(CreateArrayFromIterable),
/* 68 S> */ B(Return),
]
constant pool: [
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- Smi [0],
- SYMBOL_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
]
handlers: [
]
@@ -181,32 +149,32 @@ bytecodes: [
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
/* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(1),
+ B(Star), R(2),
B(LdaConstant), U8(2),
- /* 67 S> */ B(Star), R(2),
- B(LdaNamedProperty), R(0), U8(3), U8(7),
+ /* 67 S> */ B(Star), R(1),
+ B(LdaNamedProperty), R(0), U8(3), U8(5),
B(Star), R(7),
- B(CallProperty0), R(7), R(0), U8(9),
+ B(CallProperty0), R(7), R(0), U8(7),
B(Mov), R(0), R(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(5),
- B(LdaNamedProperty), R(5), U8(4), U8(11),
+ B(LdaNamedProperty), R(5), U8(4), U8(9),
B(Star), R(4),
- B(CallProperty0), R(4), R(5), U8(13),
+ B(CallProperty0), R(4), R(5), U8(11),
B(Star), R(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(5), U8(15),
+ B(LdaNamedProperty), R(3), U8(5), U8(13),
B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(3), U8(6), U8(17),
+ B(LdaNamedProperty), R(3), U8(6), U8(15),
B(Star), R(3),
- B(StaInArrayLiteral), R(1), R(2), U8(2),
- B(Ldar), R(2),
- B(Inc), U8(4),
- B(Star), R(2),
- B(JumpLoop), U8(35), I8(0),
+ B(StaInArrayLiteral), R(2), R(1), U8(3),
B(Ldar), R(1),
+ B(Inc), U8(2),
+ B(Star), R(1),
+ B(JumpLoop), U8(35), I8(0),
+ B(Ldar), R(2),
/* 71 S> */ B(Return),
]
constant pool: [
@@ -225,55 +193,25 @@ handlers: [
snippet: "
var a = [ 1, 2 ]; return [ ...a, 3 ];
"
-frame size: 8
+frame size: 3
parameter count: 1
-bytecode array length: 98
+bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
- /* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(1),
- B(LdaConstant), U8(2),
- /* 64 S> */ B(Star), R(2),
- B(LdaNamedProperty), R(0), U8(3), U8(7),
- B(Star), R(7),
- B(CallProperty0), R(7), R(0), U8(9),
- B(Mov), R(0), R(6),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
- B(LdaNamedProperty), R(5), U8(4), U8(11),
- B(Star), R(4),
- B(CallProperty0), R(4), R(5), U8(13),
- B(Star), R(3),
- B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(5), U8(15),
- B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(3), U8(6), U8(17),
- B(Star), R(3),
- B(StaInArrayLiteral), R(1), R(2), U8(2),
- B(Ldar), R(2),
- B(Inc), U8(4),
+ /* 52 S> */ B(CreateArrayFromIterable),
B(Star), R(2),
- B(JumpLoop), U8(35), I8(0),
+ B(LdaNamedProperty), R(2), U8(1), U8(1),
+ B(Star), R(1),
B(LdaSmi), I8(3),
- B(StaInArrayLiteral), R(1), R(2), U8(2),
+ B(StaInArrayLiteral), R(2), R(1), U8(3),
B(Ldar), R(2),
- B(Inc), U8(4),
- B(Star), R(2),
- B(Ldar), R(1),
/* 71 S> */ B(Return),
]
constant pool: [
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- Smi [0],
- SYMBOL_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["length"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index f5cbed6a7a..f3ddec23a0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -252,19 +252,19 @@ frame size: 22
parameter count: 1
bytecode array length: 490
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
B(Mov), R(closure), R(11),
B(Mov), R(this), R(12),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 17 E> */ B(StackCheck),
B(Mov), R(context), R(13),
B(Mov), R(context), R(14),
- B(Ldar), R(2),
- /* 17 E> */ B(SuspendGenerator), R(2), R(0), U8(15), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(15),
+ B(Ldar), R(0),
+ /* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(15), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(15),
B(Star), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
B(Ldar), R(15),
/* 17 E> */ B(Throw),
@@ -300,16 +300,16 @@ bytecodes: [
B(Star), R(7),
B(Mov), R(8), R(3),
/* 22 E> */ B(StackCheck),
- B(Mov), R(3), R(0),
+ B(Mov), R(3), R(1),
/* 42 S> */ B(LdaFalse),
B(Star), R(21),
- B(Mov), R(2), R(19),
- B(Mov), R(0), R(20),
+ B(Mov), R(0), R(19),
+ B(Mov), R(1), R(20),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(19), U8(3),
- /* 42 E> */ B(SuspendGenerator), R(2), R(0), U8(19), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(19),
+ /* 42 E> */ B(SuspendGenerator), R(0), R(0), U8(19), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(19),
B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
B(Ldar), R(19),
/* 42 E> */ B(Throw),
@@ -362,7 +362,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(18),
B(LdaConstant), U8(14),
B(Star), R(19),
@@ -398,12 +398,12 @@ bytecodes: [
B(ReThrow),
B(LdaUndefined),
B(Star), R(16),
- B(Mov), R(2), R(15),
+ B(Mov), R(0), R(15),
B(CallJSRuntime), U8(%async_generator_await_uncaught), R(15), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(15), U8(2),
- B(ResumeGenerator), R(2), R(0), U8(15),
+ B(SuspendGenerator), R(0), R(0), U8(15), U8(2),
+ B(ResumeGenerator), R(0), R(0), U8(15),
B(Star), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(16),
B(LdaZero),
B(TestReferenceEqual), R(16),
@@ -424,7 +424,7 @@ bytecodes: [
B(PushContext), R(15),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(17),
- B(Mov), R(2), R(16),
+ B(Mov), R(0), R(16),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(16), U8(2),
B(PopContext), R(15),
B(Star), R(12),
@@ -441,7 +441,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
B(Ldar), R(13),
B(SetPendingMessage),
B(Ldar), R(11),
@@ -449,7 +449,7 @@ bytecodes: [
B(Jump), U8(22),
B(LdaTrue),
B(Star), R(16),
- B(Mov), R(2), R(14),
+ B(Mov), R(0), R(14),
B(Mov), R(12), R(15),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(14), U8(3),
/* 50 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 779e12c4ec..4c27e3a8d4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -67,7 +67,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 112
+bytecode array length: 109
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
@@ -75,38 +75,36 @@ bytecodes: [
B(LdaNamedProperty), R(0), U8(1), U8(2),
B(Star), R(1),
B(CreateArrayLiteral), U8(2), U8(4), U8(37),
- B(Star), R(3),
- B(LdaConstant), U8(3),
B(Star), R(4),
- /* 49 S> */ B(CreateArrayLiteral), U8(4), U8(10), U8(37),
+ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ /* 49 S> */ B(CreateArrayLiteral), U8(4), U8(8), U8(37),
B(Star), R(8),
- B(LdaNamedProperty), R(8), U8(5), U8(11),
+ B(LdaNamedProperty), R(8), U8(5), U8(9),
B(Star), R(9),
- B(CallProperty0), R(9), R(8), U8(13),
+ B(CallProperty0), R(9), R(8), U8(11),
B(Mov), R(0), R(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(7),
- B(LdaNamedProperty), R(7), U8(6), U8(15),
+ B(LdaNamedProperty), R(7), U8(6), U8(13),
B(Star), R(6),
- B(CallProperty0), R(6), R(7), U8(17),
+ B(CallProperty0), R(6), R(7), U8(15),
B(Star), R(5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(7), U8(19),
+ B(LdaNamedProperty), R(5), U8(7), U8(17),
B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(5), U8(8), U8(21),
+ B(LdaNamedProperty), R(5), U8(8), U8(19),
B(Star), R(5),
- B(StaInArrayLiteral), R(3), R(4), U8(5),
- B(Ldar), R(4),
- B(Inc), U8(7),
- B(Star), R(4),
+ B(StaInArrayLiteral), R(4), R(3), U8(6),
+ B(Ldar), R(3),
+ B(Inc), U8(5),
+ B(Star), R(3),
B(JumpLoop), U8(35), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(3), R(4), U8(5),
- B(Ldar), R(4),
- B(Inc), U8(7),
- B(Star), R(4),
+ B(StaInArrayLiteral), R(4), R(3), U8(6),
+ B(Mov), R(4), R(3),
B(CallJSRuntime), U8(%reflect_apply), R(1), U8(3),
B(LdaUndefined),
/* 64 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index 737d423fcb..c56e29436e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -103,12 +103,12 @@ bytecodes: [
B(Star), R(3),
B(LdaConstant), U8(3),
B(Star), R(4),
- B(LdaImmutableCurrentContextSlot), U8(4),
- /* 75 E> */ B(ToName), R(7),
+ /* 75 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(ToName), R(7),
B(CreateClosure), U8(5), U8(1), U8(2),
B(Star), R(8),
- B(LdaImmutableCurrentContextSlot), U8(5),
- /* 106 E> */ B(ToName), R(9),
+ /* 106 S> */ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(ToName), R(9),
B(LdaConstant), U8(6),
B(TestEqualStrict), R(9), U8(2),
B(Mov), R(3), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index c5fae1f4f6..098130c480 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,20 +16,20 @@ snippet: "
"
frame size: 23
parameter count: 1
-bytecode array length: 514
+bytecode array length: 518
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(SwitchOnGeneratorState), R(3), U8(0), U8(3),
B(Mov), R(closure), R(12),
B(Mov), R(this), R(13),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(2),
+ B(Star), R(3),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(11),
+ B(Star), R(0),
B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(Mov), R(context), R(18),
B(Mov), R(context), R(19),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
@@ -46,40 +46,40 @@ bytecodes: [
B(CallProperty0), R(21), R(20), U8(7),
B(Star), R(21),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
- B(Star), R(4),
- /* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
B(Star), R(5),
- /* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
+ /* 43 E> */ B(LdaNamedProperty), R(5), U8(6), U8(9),
+ B(Star), R(6),
+ /* 40 S> */ B(CallProperty0), R(6), R(5), U8(11),
B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(11), R(22),
+ B(Mov), R(3), R(20),
+ B(Mov), R(0), R(22),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(20),
+ /* 40 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
+ B(ResumeGenerator), R(3), R(0), U8(20),
B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(21),
B(LdaZero),
B(TestReferenceEqual), R(21),
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(20), R(6),
+ B(Mov), R(20), R(7),
/* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(7), U8(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(LdaNamedProperty), R(7), U8(7), U8(13),
B(JumpIfToBooleanTrue), U8(25),
- B(LdaNamedProperty), R(6), U8(8), U8(15),
- B(Star), R(8),
+ B(LdaNamedProperty), R(7), U8(8), U8(15),
+ B(Star), R(9),
B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(Star), R(8),
+ B(Mov), R(9), R(4),
/* 23 E> */ B(StackCheck),
- B(Mov), R(3), R(0),
+ B(Mov), R(4), R(1),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(JumpLoop), U8(82), I8(0),
B(Jump), U8(37),
B(Star), R(20),
@@ -90,10 +90,10 @@ bytecodes: [
B(Ldar), R(19),
B(PushContext), R(20),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(17),
+ B(TestEqualStrict), R(8), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
@@ -109,38 +109,38 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(18),
B(LdaZero),
- B(TestEqualStrict), R(7), U8(18),
+ B(TestEqualStrict), R(8), U8(18),
B(JumpIfTrue), U8(167),
- B(LdaNamedProperty), R(4), U8(10), U8(19),
- B(Star), R(9),
+ B(LdaNamedProperty), R(5), U8(10), U8(19),
+ B(Star), R(10),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(156),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(21),
+ B(TestEqualStrict), R(8), U8(21),
B(JumpIfFalse), U8(86),
- B(Ldar), R(9),
+ B(Ldar), R(10),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(19),
B(LdaConstant), U8(11),
B(Star), R(20),
B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
B(Mov), R(context), R(19),
- B(Mov), R(9), R(20),
- B(Mov), R(4), R(21),
+ B(Mov), R(10), R(20),
+ B(Mov), R(5), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(11), R(22),
+ B(Mov), R(3), R(20),
+ B(Mov), R(0), R(22),
B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(20),
+ B(SuspendGenerator), R(3), R(0), U8(20), U8(1),
+ B(ResumeGenerator), R(3), R(0), U8(20),
B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(21),
B(LdaZero),
B(TestReferenceEqual), R(21),
@@ -153,28 +153,28 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(19),
B(Jump), U8(65),
- B(Mov), R(9), R(19),
- B(Mov), R(4), R(20),
+ B(Mov), R(10), R(19),
+ B(Mov), R(5), R(20),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
B(Star), R(20),
- B(Mov), R(2), R(19),
- B(Mov), R(11), R(21),
+ B(Mov), R(3), R(19),
+ B(Mov), R(0), R(21),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
- B(ResumeGenerator), R(2), R(0), U8(19),
+ B(SuspendGenerator), R(3), R(0), U8(19), U8(2),
+ B(ResumeGenerator), R(3), R(0), U8(19),
B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(20),
B(LdaZero),
B(TestReferenceEqual), R(20),
B(JumpIfTrue), U8(5),
B(Ldar), R(19),
B(ReThrow),
- B(Mov), R(19), R(10),
+ B(Mov), R(19), R(11),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(Ldar), R(18),
B(SetPendingMessage),
B(LdaZero),
@@ -183,14 +183,11 @@ bytecodes: [
B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(11), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
+ B(Star), R(13),
B(LdaZero),
B(Star), R(12),
- B(Mov), R(11), R(13),
- B(Jump), U8(55),
- B(Jump), U8(39),
+ B(Jump), U8(56),
+ B(Jump), U8(40),
B(Star), R(16),
B(CreateCatchContext), R(16), U8(12),
B(Star), R(15),
@@ -202,32 +199,37 @@ bytecodes: [
B(Star), R(18),
B(LdaFalse),
B(Star), R(19),
- B(Mov), R(11), R(17),
+ B(Mov), R(0), R(17),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
B(PopContext), R(16),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(12),
- B(Mov), R(11), R(13),
+ B(Mov), R(0), R(13),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(13),
B(Star), R(12),
B(Jump), U8(8),
B(Star), R(13),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(14),
B(LdaTrue),
B(Star), R(16),
- B(Mov), R(11), R(15),
+ B(Mov), R(0), R(15),
B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
B(Ldar), R(14),
B(SetPendingMessage),
B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
- B(Jump), U8(8),
+ B(SwitchOnSmiNoFeedback), U8(13), U8(3), I8(0),
+ B(Jump), U8(21),
+ B(Mov), R(0), R(15),
+ B(Mov), R(13), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
+ B(Ldar), R(0),
+ /* 57 S> */ B(Return),
B(Ldar), R(13),
/* 57 S> */ B(Return),
B(Ldar), R(13),
@@ -250,11 +252,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
Smi [6],
- Smi [9],
+ Smi [19],
+ Smi [22],
]
handlers: [
- [26, 467, 475],
- [29, 428, 430],
+ [26, 458, 466],
+ [29, 418, 420],
[35, 211, 219],
[38, 174, 176],
[279, 328, 330],
@@ -269,20 +272,20 @@ snippet: "
"
frame size: 23
parameter count: 1
-bytecode array length: 543
+bytecode array length: 532
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(SwitchOnGeneratorState), R(3), U8(0), U8(3),
B(Mov), R(closure), R(12),
B(Mov), R(this), R(13),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(2),
+ B(Star), R(3),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(11),
+ B(Star), R(0),
B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(Mov), R(context), R(18),
B(Mov), R(context), R(19),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
@@ -299,41 +302,41 @@ bytecodes: [
B(CallProperty0), R(21), R(20), U8(7),
B(Star), R(21),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
- B(Star), R(4),
- /* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
B(Star), R(5),
- /* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
+ /* 43 E> */ B(LdaNamedProperty), R(5), U8(6), U8(9),
+ B(Star), R(6),
+ /* 40 S> */ B(CallProperty0), R(6), R(5), U8(11),
B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(11), R(22),
+ B(Mov), R(3), R(20),
+ B(Mov), R(0), R(22),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(20),
+ /* 40 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
+ B(ResumeGenerator), R(3), R(0), U8(20),
B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(21),
B(LdaZero),
B(TestReferenceEqual), R(21),
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(20), R(6),
+ B(Mov), R(20), R(7),
/* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(7), U8(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(LdaNamedProperty), R(7), U8(7), U8(13),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(6), U8(8), U8(15),
- B(Star), R(8),
+ B(LdaNamedProperty), R(7), U8(8), U8(15),
+ B(Star), R(9),
B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(Star), R(8),
+ B(Mov), R(9), R(4),
/* 23 E> */ B(StackCheck),
- B(Mov), R(3), R(0),
+ B(Mov), R(4), R(1),
/* 56 S> */ B(LdaZero),
B(Star), R(16),
- B(Mov), R(8), R(17),
+ B(Mov), R(9), R(17),
B(Jump), U8(53),
B(Jump), U8(37),
B(Star), R(20),
@@ -344,10 +347,10 @@ bytecodes: [
B(Ldar), R(19),
B(PushContext), R(20),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(17),
+ B(TestEqualStrict), R(8), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
@@ -363,38 +366,38 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(18),
B(LdaZero),
- B(TestEqualStrict), R(7), U8(18),
+ B(TestEqualStrict), R(8), U8(18),
B(JumpIfTrue), U8(167),
- B(LdaNamedProperty), R(4), U8(10), U8(19),
- B(Star), R(9),
+ B(LdaNamedProperty), R(5), U8(10), U8(19),
+ B(Star), R(10),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(156),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(21),
+ B(TestEqualStrict), R(8), U8(21),
B(JumpIfFalse), U8(86),
- B(Ldar), R(9),
+ B(Ldar), R(10),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(19),
B(LdaConstant), U8(11),
B(Star), R(20),
B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
B(Mov), R(context), R(19),
- B(Mov), R(9), R(20),
- B(Mov), R(4), R(21),
+ B(Mov), R(10), R(20),
+ B(Mov), R(5), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(11), R(22),
+ B(Mov), R(3), R(20),
+ B(Mov), R(0), R(22),
B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(20),
+ B(SuspendGenerator), R(3), R(0), U8(20), U8(1),
+ B(ResumeGenerator), R(3), R(0), U8(20),
B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(21),
B(LdaZero),
B(TestReferenceEqual), R(21),
@@ -407,28 +410,28 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(19),
B(Jump), U8(65),
- B(Mov), R(9), R(19),
- B(Mov), R(4), R(20),
+ B(Mov), R(10), R(19),
+ B(Mov), R(5), R(20),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
B(Star), R(20),
- B(Mov), R(2), R(19),
- B(Mov), R(11), R(21),
+ B(Mov), R(3), R(19),
+ B(Mov), R(0), R(21),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
- B(ResumeGenerator), R(2), R(0), U8(19),
+ B(SuspendGenerator), R(3), R(0), U8(19), U8(2),
+ B(ResumeGenerator), R(3), R(0), U8(19),
B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(20),
B(LdaZero),
B(TestReferenceEqual), R(20),
B(JumpIfTrue), U8(5),
B(Ldar), R(19),
B(ReThrow),
- B(Mov), R(19), R(10),
+ B(Mov), R(19), R(11),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(Ldar), R(18),
B(SetPendingMessage),
B(Ldar), R(16),
@@ -437,16 +440,13 @@ bytecodes: [
B(LdaZero),
B(Star), R(12),
B(Mov), R(17), R(13),
- B(Jump), U8(78),
+ B(Jump), U8(67),
B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(11), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
- B(LdaSmi), I8(1),
+ B(Star), R(13),
+ B(LdaZero),
B(Star), R(12),
- B(Mov), R(11), R(13),
B(Jump), U8(56),
B(Jump), U8(40),
B(Star), R(16),
@@ -460,12 +460,12 @@ bytecodes: [
B(Star), R(18),
B(LdaFalse),
B(Star), R(19),
- B(Mov), R(11), R(17),
+ B(Mov), R(0), R(17),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
B(PopContext), R(16),
B(LdaSmi), I8(1),
B(Star), R(12),
- B(Mov), R(11), R(13),
+ B(Mov), R(0), R(13),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(13),
@@ -479,17 +479,17 @@ bytecodes: [
B(Star), R(14),
B(LdaTrue),
B(Star), R(16),
- B(Mov), R(11), R(15),
+ B(Mov), R(0), R(15),
B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
B(Ldar), R(14),
B(SetPendingMessage),
B(Ldar), R(12),
B(SwitchOnSmiNoFeedback), U8(15), U8(3), I8(0),
B(Jump), U8(21),
- B(Mov), R(11), R(15),
+ B(Mov), R(0), R(15),
B(Mov), R(13), R(16),
B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
- B(Ldar), R(11),
+ B(Ldar), R(0),
/* 68 S> */ B(Return),
B(Ldar), R(13),
/* 68 S> */ B(Return),
@@ -519,8 +519,8 @@ constant pool: [
Smi [22],
]
handlers: [
- [26, 483, 491],
- [29, 443, 445],
+ [26, 472, 480],
+ [29, 432, 434],
[35, 213, 221],
[38, 176, 178],
[282, 331, 333],
@@ -538,20 +538,20 @@ snippet: "
"
frame size: 23
parameter count: 1
-bytecode array length: 532
+bytecode array length: 536
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(SwitchOnGeneratorState), R(3), U8(0), U8(3),
B(Mov), R(closure), R(12),
B(Mov), R(this), R(13),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(2),
+ B(Star), R(3),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(11),
+ B(Star), R(0),
B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(Mov), R(context), R(18),
B(Mov), R(context), R(19),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
@@ -568,48 +568,48 @@ bytecodes: [
B(CallProperty0), R(21), R(20), U8(7),
B(Star), R(21),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
- B(Star), R(4),
- /* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
B(Star), R(5),
- /* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
+ /* 43 E> */ B(LdaNamedProperty), R(5), U8(6), U8(9),
+ B(Star), R(6),
+ /* 40 S> */ B(CallProperty0), R(6), R(5), U8(11),
B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(11), R(22),
+ B(Mov), R(3), R(20),
+ B(Mov), R(0), R(22),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(20),
+ /* 40 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
+ B(ResumeGenerator), R(3), R(0), U8(20),
B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(21),
B(LdaZero),
B(TestReferenceEqual), R(21),
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(20), R(6),
+ B(Mov), R(20), R(7),
/* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(7), U8(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(LdaNamedProperty), R(7), U8(7), U8(13),
B(JumpIfToBooleanTrue), U8(43),
- B(LdaNamedProperty), R(6), U8(8), U8(15),
- B(Star), R(8),
+ B(LdaNamedProperty), R(7), U8(8), U8(15),
+ B(Star), R(9),
B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(Star), R(8),
+ B(Mov), R(9), R(4),
/* 23 E> */ B(StackCheck),
- B(Mov), R(3), R(0),
+ B(Mov), R(4), R(1),
/* 63 S> */ B(LdaSmi), I8(10),
- /* 69 E> */ B(TestEqual), R(0), U8(17),
+ /* 69 E> */ B(TestEqual), R(1), U8(17),
B(JumpIfFalse), U8(4),
/* 76 S> */ B(Jump), U8(14),
/* 90 S> */ B(LdaSmi), I8(20),
- /* 96 E> */ B(TestEqual), R(0), U8(18),
+ /* 96 E> */ B(TestEqual), R(1), U8(18),
B(JumpIfFalse), U8(4),
/* 103 S> */ B(Jump), U8(8),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(JumpLoop), U8(100), I8(0),
B(Jump), U8(37),
B(Star), R(20),
@@ -620,10 +620,10 @@ bytecodes: [
B(Ldar), R(19),
B(PushContext), R(20),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(19),
+ B(TestEqualStrict), R(8), U8(19),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
@@ -639,38 +639,38 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(18),
B(LdaZero),
- B(TestEqualStrict), R(7), U8(20),
+ B(TestEqualStrict), R(8), U8(20),
B(JumpIfTrue), U8(167),
- B(LdaNamedProperty), R(4), U8(10), U8(21),
- B(Star), R(9),
+ B(LdaNamedProperty), R(5), U8(10), U8(21),
+ B(Star), R(10),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(156),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(23),
+ B(TestEqualStrict), R(8), U8(23),
B(JumpIfFalse), U8(86),
- B(Ldar), R(9),
+ B(Ldar), R(10),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(19),
B(LdaConstant), U8(11),
B(Star), R(20),
B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
B(Mov), R(context), R(19),
- B(Mov), R(9), R(20),
- B(Mov), R(4), R(21),
+ B(Mov), R(10), R(20),
+ B(Mov), R(5), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
- B(Mov), R(2), R(20),
- B(Mov), R(11), R(22),
+ B(Mov), R(3), R(20),
+ B(Mov), R(0), R(22),
B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(20),
+ B(SuspendGenerator), R(3), R(0), U8(20), U8(1),
+ B(ResumeGenerator), R(3), R(0), U8(20),
B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(21),
B(LdaZero),
B(TestReferenceEqual), R(21),
@@ -683,28 +683,28 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(19),
B(Jump), U8(65),
- B(Mov), R(9), R(19),
- B(Mov), R(4), R(20),
+ B(Mov), R(10), R(19),
+ B(Mov), R(5), R(20),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
B(Star), R(20),
- B(Mov), R(2), R(19),
- B(Mov), R(11), R(21),
+ B(Mov), R(3), R(19),
+ B(Mov), R(0), R(21),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
- B(ResumeGenerator), R(2), R(0), U8(19),
+ B(SuspendGenerator), R(3), R(0), U8(19), U8(2),
+ B(ResumeGenerator), R(3), R(0), U8(19),
B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(20),
B(LdaZero),
B(TestReferenceEqual), R(20),
B(JumpIfTrue), U8(5),
B(Ldar), R(19),
B(ReThrow),
- B(Mov), R(19), R(10),
+ B(Mov), R(19), R(11),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(Ldar), R(18),
B(SetPendingMessage),
B(LdaZero),
@@ -713,14 +713,11 @@ bytecodes: [
B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(11), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
+ B(Star), R(13),
B(LdaZero),
B(Star), R(12),
- B(Mov), R(11), R(13),
- B(Jump), U8(55),
- B(Jump), U8(39),
+ B(Jump), U8(56),
+ B(Jump), U8(40),
B(Star), R(16),
B(CreateCatchContext), R(16), U8(12),
B(Star), R(15),
@@ -732,32 +729,37 @@ bytecodes: [
B(Star), R(18),
B(LdaFalse),
B(Star), R(19),
- B(Mov), R(11), R(17),
+ B(Mov), R(0), R(17),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
B(PopContext), R(16),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(12),
- B(Mov), R(11), R(13),
+ B(Mov), R(0), R(13),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(13),
B(Star), R(12),
B(Jump), U8(8),
B(Star), R(13),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(14),
B(LdaTrue),
B(Star), R(16),
- B(Mov), R(11), R(15),
+ B(Mov), R(0), R(15),
B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
B(Ldar), R(14),
B(SetPendingMessage),
B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
- B(Jump), U8(8),
+ B(SwitchOnSmiNoFeedback), U8(13), U8(3), I8(0),
+ B(Jump), U8(21),
+ B(Mov), R(0), R(15),
+ B(Mov), R(13), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
+ B(Ldar), R(0),
+ /* 114 S> */ B(Return),
B(Ldar), R(13),
/* 114 S> */ B(Return),
B(Ldar), R(13),
@@ -780,11 +782,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
Smi [6],
- Smi [9],
+ Smi [19],
+ Smi [22],
]
handlers: [
- [26, 485, 493],
- [29, 446, 448],
+ [26, 476, 484],
+ [29, 436, 438],
[35, 229, 237],
[38, 192, 194],
[297, 346, 348],
@@ -800,17 +803,17 @@ snippet: "
"
frame size: 20
parameter count: 1
-bytecode array length: 403
+bytecode array length: 392
bytecodes: [
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(9),
+ B(Star), R(0),
B(Mov), R(context), R(12),
B(Mov), R(context), R(13),
/* 31 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(14),
- B(Mov), R(14), R(1),
+ B(Mov), R(14), R(2),
B(LdaZero),
- B(Star), R(5),
+ B(Star), R(6),
B(Mov), R(context), R(16),
B(Mov), R(context), R(17),
/* 68 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
@@ -820,25 +823,25 @@ bytecodes: [
B(CallProperty0), R(19), R(18), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(2),
- /* 68 E> */ B(LdaNamedProperty), R(2), U8(3), U8(6),
B(Star), R(3),
- /* 59 S> */ B(CallProperty0), R(3), R(2), U8(8),
+ /* 68 E> */ B(LdaNamedProperty), R(3), U8(3), U8(6),
B(Star), R(4),
- /* 59 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
+ /* 59 S> */ B(CallProperty0), R(4), R(3), U8(8),
+ B(Star), R(5),
+ /* 59 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
- B(LdaNamedProperty), R(4), U8(4), U8(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
+ B(LdaNamedProperty), R(5), U8(4), U8(10),
B(JumpIfToBooleanTrue), U8(30),
- /* 58 E> */ B(LdaNamedProperty), R(4), U8(5), U8(12),
- B(Star), R(6),
+ /* 58 E> */ B(LdaNamedProperty), R(5), U8(5), U8(12),
+ B(Star), R(7),
B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Ldar), R(6),
- B(StaNamedProperty), R(1), U8(6), U8(14),
+ B(Star), R(6),
+ B(Ldar), R(7),
+ B(StaNamedProperty), R(2), U8(6), U8(14),
/* 53 E> */ B(StackCheck),
- /* 87 S> */ B(LdaNamedProperty), R(1), U8(6), U8(16),
+ /* 87 S> */ B(LdaNamedProperty), R(2), U8(6), U8(16),
B(Star), R(15),
B(LdaZero),
B(Star), R(14),
@@ -852,10 +855,10 @@ bytecodes: [
B(Ldar), R(17),
B(PushContext), R(18),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(5), U8(18),
+ B(TestEqualStrict), R(6), U8(18),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(5),
+ B(Star), R(6),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(19),
B(CallRuntime), U16(Runtime::kReThrow), R(19), U8(1),
@@ -871,43 +874,43 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(16),
B(LdaZero),
- B(TestEqualStrict), R(5), U8(19),
+ B(TestEqualStrict), R(6), U8(19),
B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(2), U8(8), U8(20),
- B(Star), R(7),
+ B(LdaNamedProperty), R(3), U8(8), U8(20),
+ B(Star), R(8),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(5), U8(22),
+ B(TestEqualStrict), R(6), U8(22),
B(JumpIfFalse), U8(47),
- B(Ldar), R(7),
+ B(Ldar), R(8),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
B(Mov), R(context), R(17),
- B(Mov), R(7), R(18),
- B(Mov), R(2), R(19),
+ B(Mov), R(8), R(18),
+ B(Mov), R(3), R(19),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(17),
B(Jump), U8(27),
- B(Mov), R(7), R(17),
- B(Mov), R(2), R(18),
+ B(Mov), R(8), R(17),
+ B(Mov), R(3), R(18),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
- B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
+ B(Star), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
B(Ldar), R(16),
B(SetPendingMessage),
B(Ldar), R(14),
@@ -916,16 +919,13 @@ bytecodes: [
B(LdaZero),
B(Star), R(10),
B(Mov), R(15), R(11),
- B(Jump), U8(78),
+ B(Jump), U8(67),
B(Ldar), R(15),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(15),
- B(Mov), R(9), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(14), U8(2),
- B(LdaSmi), I8(1),
+ B(Star), R(11),
+ B(LdaZero),
B(Star), R(10),
- B(Mov), R(9), R(11),
B(Jump), U8(56),
B(Jump), U8(40),
B(Star), R(14),
@@ -939,12 +939,12 @@ bytecodes: [
B(Star), R(16),
B(LdaFalse),
B(Star), R(17),
- B(Mov), R(9), R(15),
+ B(Mov), R(0), R(15),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(15), U8(3),
B(PopContext), R(14),
B(LdaSmi), I8(1),
B(Star), R(10),
- B(Mov), R(9), R(11),
+ B(Mov), R(0), R(11),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(11),
@@ -958,17 +958,17 @@ bytecodes: [
B(Star), R(12),
B(LdaFalse),
B(Star), R(14),
- B(Mov), R(9), R(13),
+ B(Mov), R(0), R(13),
B(CallJSRuntime), U8(%async_function_promise_release), R(13), U8(2),
B(Ldar), R(12),
B(SetPendingMessage),
B(Ldar), R(10),
B(SwitchOnSmiNoFeedback), U8(13), U8(3), I8(0),
B(Jump), U8(21),
- B(Mov), R(9), R(13),
+ B(Mov), R(0), R(13),
B(Mov), R(11), R(14),
B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(13), U8(2),
- B(Ldar), R(9),
+ B(Ldar), R(0),
/* 96 S> */ B(Return),
B(Ldar), R(11),
/* 96 S> */ B(Return),
@@ -996,8 +996,8 @@ constant pool: [
Smi [22],
]
handlers: [
- [10, 343, 351],
- [13, 303, 305],
+ [10, 332, 340],
+ [13, 292, 294],
[27, 150, 158],
[30, 113, 115],
[219, 229, 231],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index bcb462bc75..b43429e008 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -85,7 +85,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(12),
B(LdaConstant), U8(7),
B(Star), R(13),
@@ -217,7 +217,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(13),
B(LdaConstant), U8(7),
B(Star), R(14),
@@ -361,7 +361,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(12),
B(LdaConstant), U8(7),
B(Star), R(13),
@@ -495,7 +495,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(11),
B(LdaConstant), U8(9),
B(Star), R(12),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index d4fe1a091c..9755e0af17 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -89,7 +89,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(14),
B(LdaConstant), U8(6),
B(Star), R(15),
@@ -256,7 +256,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(14),
B(LdaConstant), U8(11),
B(Star), R(15),
@@ -401,7 +401,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
@@ -550,7 +550,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(17),
B(LdaConstant), U8(8),
B(Star), R(18),
@@ -697,7 +697,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(15),
B(LdaConstant), U8(9),
B(Star), R(16),
@@ -859,7 +859,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(14),
B(LdaConstant), U8(12),
B(Star), R(15),
@@ -926,15 +926,15 @@ snippet: "
"
frame size: 23
parameter count: 2
-bytecode array length: 363
+bytecode array length: 367
bytecodes: [
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(12),
+ B(Star), R(0),
B(Mov), R(context), R(15),
B(Mov), R(context), R(16),
B(LdaZero),
- B(Star), R(8),
+ B(Star), R(9),
B(Mov), R(context), R(19),
B(Mov), R(context), R(20),
/* 40 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
@@ -943,27 +943,27 @@ bytecodes: [
B(Mov), R(arg0), R(21),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
- /* 40 E> */ B(LdaNamedProperty), R(5), U8(1), U8(4),
B(Star), R(6),
- /* 35 S> */ B(CallProperty0), R(6), R(5), U8(6),
+ /* 40 E> */ B(LdaNamedProperty), R(6), U8(1), U8(4),
B(Star), R(7),
- /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
+ /* 35 S> */ B(CallProperty0), R(7), R(6), U8(6),
+ B(Star), R(8),
+ /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(2), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(LdaNamedProperty), R(8), U8(2), U8(8),
B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(7), U8(3), U8(10),
- B(Star), R(9),
+ B(LdaNamedProperty), R(8), U8(3), U8(10),
+ B(Star), R(10),
B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Mov), R(9), R(4),
+ B(Star), R(9),
+ B(Mov), R(10), R(5),
/* 26 E> */ B(StackCheck),
- B(Mov), R(4), R(1),
- /* 55 S> */ B(Mov), R(1), R(0),
+ B(Mov), R(5), R(2),
+ /* 55 S> */ B(Mov), R(2), R(1),
B(LdaZero),
- B(Star), R(8),
+ B(Star), R(9),
B(JumpLoop), U8(47), I8(0),
B(Jump), U8(37),
B(Star), R(21),
@@ -974,10 +974,10 @@ bytecodes: [
B(Ldar), R(20),
B(PushContext), R(21),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(8), U8(12),
+ B(TestEqualStrict), R(9), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(8),
+ B(Star), R(9),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(22),
B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
@@ -993,43 +993,43 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(19),
B(LdaZero),
- B(TestEqualStrict), R(8), U8(13),
+ B(TestEqualStrict), R(9), U8(13),
B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(5), U8(5), U8(14),
- B(Star), R(10),
+ B(LdaNamedProperty), R(6), U8(5), U8(14),
+ B(Star), R(11),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(8), U8(16),
+ B(TestEqualStrict), R(9), U8(16),
B(JumpIfFalse), U8(47),
- B(Ldar), R(10),
+ B(Ldar), R(11),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(20),
B(LdaConstant), U8(6),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
B(Throw),
B(Mov), R(context), R(20),
- B(Mov), R(10), R(21),
- B(Mov), R(5), R(22),
+ B(Mov), R(11), R(21),
+ B(Mov), R(6), R(22),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(20),
B(Jump), U8(27),
- B(Mov), R(10), R(20),
- B(Mov), R(5), R(21),
+ B(Mov), R(11), R(20),
+ B(Mov), R(6), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(Ldar), R(19),
B(SetPendingMessage),
B(LdaZero),
@@ -1038,14 +1038,11 @@ bytecodes: [
B(Ldar), R(18),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(12), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(17), U8(2),
+ B(Star), R(14),
B(LdaZero),
B(Star), R(13),
- B(Mov), R(12), R(14),
- B(Jump), U8(55),
- B(Jump), U8(39),
+ B(Jump), U8(56),
+ B(Jump), U8(40),
B(Star), R(17),
B(CreateCatchContext), R(17), U8(7),
B(Star), R(16),
@@ -1057,32 +1054,37 @@ bytecodes: [
B(Star), R(19),
B(LdaFalse),
B(Star), R(20),
- B(Mov), R(12), R(18),
+ B(Mov), R(0), R(18),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(18), U8(3),
B(PopContext), R(17),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(13),
- B(Mov), R(12), R(14),
+ B(Mov), R(0), R(14),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(14),
B(Star), R(13),
B(Jump), U8(8),
B(Star), R(14),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(15),
B(LdaFalse),
B(Star), R(17),
- B(Mov), R(12), R(16),
+ B(Mov), R(0), R(16),
B(CallJSRuntime), U8(%async_function_promise_release), R(16), U8(2),
B(Ldar), R(15),
B(SetPendingMessage),
B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
- B(Jump), U8(8),
+ B(SwitchOnSmiNoFeedback), U8(8), U8(3), I8(0),
+ B(Jump), U8(21),
+ B(Mov), R(0), R(16),
+ B(Mov), R(14), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
+ B(Ldar), R(0),
+ /* 60 S> */ B(Return),
B(Ldar), R(14),
/* 60 S> */ B(Return),
B(Ldar), R(14),
@@ -1100,11 +1102,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
Smi [6],
- Smi [9],
+ Smi [19],
+ Smi [22],
]
handlers: [
- [10, 316, 324],
- [13, 277, 279],
+ [10, 307, 315],
+ [13, 267, 269],
[19, 137, 145],
[22, 100, 102],
[205, 215, 217],
@@ -1119,20 +1122,20 @@ snippet: "
"
frame size: 23
parameter count: 2
-bytecode array length: 414
+bytecode array length: 418
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(3), U8(0), U8(1),
B(Mov), R(closure), R(12),
B(Mov), R(this), R(13),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(2),
+ B(Star), R(3),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(11),
+ B(Star), R(0),
B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(Mov), R(context), R(18),
B(Mov), R(context), R(19),
/* 40 S> */ B(LdaNamedProperty), R(arg0), U8(1), U8(0),
@@ -1141,32 +1144,32 @@ bytecodes: [
B(Mov), R(arg0), R(20),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
- /* 40 E> */ B(LdaNamedProperty), R(4), U8(2), U8(4),
B(Star), R(5),
- /* 35 S> */ B(CallProperty0), R(5), R(4), U8(6),
+ /* 40 E> */ B(LdaNamedProperty), R(5), U8(2), U8(4),
B(Star), R(6),
- /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ /* 35 S> */ B(CallProperty0), R(6), R(5), U8(6),
+ B(Star), R(7),
+ /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(3), U8(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(LdaNamedProperty), R(7), U8(3), U8(8),
B(JumpIfToBooleanTrue), U8(63),
- B(LdaNamedProperty), R(6), U8(4), U8(10),
- B(Star), R(8),
+ B(LdaNamedProperty), R(7), U8(4), U8(10),
+ B(Star), R(9),
B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(Star), R(8),
+ B(Mov), R(9), R(4),
/* 26 E> */ B(StackCheck),
- B(Mov), R(3), R(0),
- /* 45 S> */ B(Mov), R(2), R(20),
- B(Mov), R(0), R(21),
- B(Mov), R(11), R(22),
+ B(Mov), R(4), R(1),
+ /* 45 S> */ B(Mov), R(3), R(20),
+ B(Mov), R(1), R(21),
+ B(Mov), R(0), R(22),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 45 E> */ B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(20),
+ /* 45 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
+ B(ResumeGenerator), R(3), R(0), U8(20),
B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(21),
B(LdaZero),
B(TestReferenceEqual), R(21),
@@ -1174,7 +1177,7 @@ bytecodes: [
B(Ldar), R(20),
B(ReThrow),
B(LdaZero),
- B(Star), R(7),
+ B(Star), R(8),
B(JumpLoop), U8(82), I8(0),
B(Jump), U8(37),
B(Star), R(20),
@@ -1185,10 +1188,10 @@ bytecodes: [
B(Ldar), R(19),
B(PushContext), R(20),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(12),
+ B(TestEqualStrict), R(8), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(21),
B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
@@ -1204,43 +1207,43 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(18),
B(LdaZero),
- B(TestEqualStrict), R(7), U8(13),
+ B(TestEqualStrict), R(8), U8(13),
B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(6), U8(14),
- B(Star), R(9),
+ B(LdaNamedProperty), R(5), U8(6), U8(14),
+ B(Star), R(10),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(79),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(16),
+ B(TestEqualStrict), R(8), U8(16),
B(JumpIfFalse), U8(47),
- B(Ldar), R(9),
+ B(Ldar), R(10),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(19),
B(LdaConstant), U8(7),
B(Star), R(20),
B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
B(Mov), R(context), R(19),
- B(Mov), R(9), R(20),
- B(Mov), R(4), R(21),
+ B(Mov), R(10), R(20),
+ B(Mov), R(5), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(19),
B(Jump), U8(27),
- B(Mov), R(9), R(19),
- B(Mov), R(4), R(20),
+ B(Mov), R(10), R(19),
+ B(Mov), R(5), R(20),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(Ldar), R(18),
B(SetPendingMessage),
B(LdaZero),
@@ -1249,14 +1252,11 @@ bytecodes: [
B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(11), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
+ B(Star), R(13),
B(LdaZero),
B(Star), R(12),
- B(Mov), R(11), R(13),
- B(Jump), U8(55),
- B(Jump), U8(39),
+ B(Jump), U8(56),
+ B(Jump), U8(40),
B(Star), R(16),
B(CreateCatchContext), R(16), U8(8),
B(Star), R(15),
@@ -1268,32 +1268,37 @@ bytecodes: [
B(Star), R(18),
B(LdaFalse),
B(Star), R(19),
- B(Mov), R(11), R(17),
+ B(Mov), R(0), R(17),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
B(PopContext), R(16),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(12),
- B(Mov), R(11), R(13),
+ B(Mov), R(0), R(13),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(13),
B(Star), R(12),
B(Jump), U8(8),
B(Star), R(13),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(14),
B(LdaTrue),
B(Star), R(16),
- B(Mov), R(11), R(15),
+ B(Mov), R(0), R(15),
B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
B(Ldar), R(14),
B(SetPendingMessage),
B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
- B(Jump), U8(8),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(3), I8(0),
+ B(Jump), U8(21),
+ B(Mov), R(0), R(15),
+ B(Mov), R(13), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
+ B(Ldar), R(0),
+ /* 54 S> */ B(Return),
B(Ldar), R(13),
/* 54 S> */ B(Return),
B(Ldar), R(13),
@@ -1312,11 +1317,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
Smi [6],
- Smi [9],
+ Smi [19],
+ Smi [22],
]
handlers: [
- [26, 367, 375],
- [29, 328, 330],
+ [26, 358, 366],
+ [29, 318, 320],
[35, 188, 196],
[38, 151, 153],
[256, 266, 268],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 641a2b2eb0..df054bd5b2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -203,7 +203,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(153),
+ B(Wide), B(LdaSmi), I16(154),
B(Star), R(14),
B(LdaConstant), U8(13),
B(Star), R(15),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
index f2653a6ed1..a9f03b2c28 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
@@ -19,9 +19,9 @@ snippet: "
})();
"
-frame size: 6
+frame size: 3
parameter count: 1
-bytecode array length: 82
+bytecode array length: 40
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
@@ -31,32 +31,14 @@ bytecodes: [
/* 45 S> */ B(LdaGlobal), U8(0), U8(2),
B(Star), R(1),
B(LdaSmi), I8(2),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(1), R(2),
- /* 50 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ /* 50 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(1), U8(0),
/* 63 S> */ B(LdaGlobal), U8(0), U8(2),
B(Star), R(1),
/* 70 E> */ B(LdaGlobal), U8(0), U8(2),
B(Star), R(2),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(2), R(3),
- /* 72 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
- B(Star), R(4),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(1), R(2),
- /* 68 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
- /* 84 S> */ B(LdaConstant), U8(3),
- B(Star), R(3),
- B(Mov), R(0), R(2),
- /* 101 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 72 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(1),
+ /* 68 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
+ /* 101 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(3),
/* 108 S> */ B(Return),
]
constant pool: [
@@ -81,9 +63,9 @@ snippet: "
})();
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 77
+bytecode array length: 69
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
@@ -112,10 +94,7 @@ bytecodes: [
B(Inc), U8(15),
/* 66 E> */ B(StaGlobal), U8(1), U8(2),
B(JumpLoop), U8(50), I8(0),
- /* 132 S> */ B(LdaConstant), U8(4),
- B(Star), R(3),
- B(Mov), R(0), R(2),
- /* 149 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 149 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 156 S> */ B(Return),
]
constant pool: [
@@ -143,9 +122,9 @@ snippet: "
})();
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 78
+bytecode array length: 70
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
@@ -174,10 +153,7 @@ bytecodes: [
B(Dec), U8(15),
/* 129 E> */ B(StaGlobal), U8(1), U8(2),
B(JumpLoop), U8(50), I8(0),
- /* 151 S> */ B(LdaConstant), U8(4),
- B(Star), R(3),
- B(Mov), R(0), R(2),
- /* 168 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 168 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 175 S> */ B(Return),
]
constant pool: [
@@ -205,9 +181,9 @@ snippet: "
})();
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 78
+bytecode array length: 70
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
@@ -236,10 +212,7 @@ bytecodes: [
/* 141 E> */ B(TestGreaterThan), R(1), U8(15),
B(JumpIfFalse), U8(5),
B(JumpLoop), U8(50), I8(0),
- /* 154 S> */ B(LdaConstant), U8(4),
- B(Star), R(3),
- B(Mov), R(0), R(2),
- /* 171 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 171 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 178 S> */ B(Return),
]
constant pool: [
@@ -269,9 +242,9 @@ snippet: "
})();
"
-frame size: 6
+frame size: 4
parameter count: 1
-bytecode array length: 121
+bytecode array length: 71
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
@@ -284,44 +257,23 @@ bytecodes: [
/* 31 E> */ B(StaGlobal), U8(1), U8(0),
/* 95 S> */ B(LdaGlobal), U8(1), U8(2),
B(Star), R(1),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(1), R(2),
- /* 101 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 101 E> */ B(LdaNamedPropertyNoFeedback), R(1), U8(2),
B(Star), R(1),
B(LdaSmi), I8(3),
/* 104 E> */ B(TestLessThan), R(1), U8(4),
- B(JumpIfFalse), U8(28),
+ B(JumpIfFalse), U8(15),
/* 121 S> */ B(LdaGlobal), U8(1), U8(2),
B(Star), R(1),
B(LdaSmi), I8(3),
- B(Star), R(4),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(1), R(2),
- /* 126 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
- B(Jump), U8(40),
+ /* 126 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
+ B(Jump), U8(19),
/* 158 S> */ B(LdaGlobal), U8(1), U8(2),
B(Star), R(1),
/* 165 E> */ B(LdaGlobal), U8(1), U8(2),
B(Star), R(2),
- B(LdaConstant), U8(3),
- B(Star), R(4),
- B(Mov), R(2), R(3),
- /* 167 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
- B(Star), R(4),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(1), R(2),
- /* 163 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
- /* 189 S> */ B(LdaConstant), U8(4),
- B(Star), R(3),
- B(Mov), R(0), R(2),
- /* 206 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 167 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(3),
+ /* 163 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
+ /* 206 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 213 S> */ B(Return),
]
constant pool: [
@@ -343,23 +295,20 @@ snippet: "
})();
"
-frame size: 4
+frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 24
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
/* 16 E> */ B(StackCheck),
/* 29 S> */ B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star), R(3),
B(LdaSmi), I8(4),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 31 E> */ B(StaGlobal), U8(1), U8(0),
- /* 60 S> */ B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(0), R(2),
- /* 77 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 77 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
/* 84 S> */ B(Return),
]
constant pool: [
@@ -379,23 +328,20 @@ snippet: "
})();
"
-frame size: 4
+frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 24
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
/* 16 E> */ B(StackCheck),
/* 29 S> */ B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star), R(3),
B(LdaSmi), I8(37),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 31 E> */ B(StaGlobal), U8(1), U8(0),
- /* 45 S> */ B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(0), R(2),
- /* 62 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 62 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
/* 69 S> */ B(Return),
]
constant pool: [
@@ -406,3 +352,300 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+
+ this.f0 = function() {};
+ this.f1 = function(a) {};
+ this.f2 = function(a, b) {};
+ this.f3 = function(a, b, c) {};
+ this.f4 = function(a, b, c, d) {};
+ this.f5 = function(a, b, c, d, e) {};
+ (function() {
+ this.f0();
+ this.f1(1);
+ this.f2(1, 2);
+ this.f3(1, 2, 3);
+ this.f4(1, 2, 3, 4);
+ this.f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 137
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 237 E> */ B(StackCheck),
+ /* 255 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(0),
+ B(Star), R(1),
+ /* 255 E> */ B(CallNoFeedback), R(1), R(this), U8(1),
+ /* 274 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(Mov), R(this), R(2),
+ /* 274 E> */ B(CallNoFeedback), R(1), R(2), U8(2),
+ /* 294 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(Mov), R(this), R(2),
+ /* 294 E> */ B(CallNoFeedback), R(1), R(2), U8(3),
+ /* 317 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(Mov), R(this), R(2),
+ /* 317 E> */ B(CallNoFeedback), R(1), R(2), U8(4),
+ /* 343 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(LdaSmi), I8(4),
+ B(Star), R(6),
+ B(Mov), R(this), R(2),
+ /* 343 E> */ B(CallNoFeedback), R(1), R(2), U8(5),
+ /* 372 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(5),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(LdaSmi), I8(4),
+ B(Star), R(6),
+ B(LdaSmi), I8(5),
+ B(Star), R(7),
+ B(Mov), R(this), R(2),
+ /* 372 E> */ B(CallNoFeedback), R(1), R(2), U8(6),
+ /* 416 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(6),
+ /* 423 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f0"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f1"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f2"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f3"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f4"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f5"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ function f0() {}
+ function f1(a) {}
+ function f2(a, b) {}
+ function f3(a, b, c) {}
+ function f4(a, b, c, d) {}
+ function f5(a, b, c, d, e) {}
+ (function() {
+ f0();
+ f1(1);
+ f2(1, 2);
+ f3(1, 2, 3);
+ f4(1, 2, 3, 4);
+ f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 140
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 189 E> */ B(StackCheck),
+ /* 202 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(0), U8(0),
+ B(Star), R(1),
+ /* 202 E> */ B(CallNoFeedback), R(1), R(2), U8(1),
+ /* 216 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ /* 216 E> */ B(CallNoFeedback), R(1), R(2), U8(2),
+ /* 231 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(2), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ /* 231 E> */ B(CallNoFeedback), R(1), R(2), U8(3),
+ /* 249 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(3), U8(6),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ /* 249 E> */ B(CallNoFeedback), R(1), R(2), U8(4),
+ /* 270 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(4), U8(8),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(LdaSmi), I8(4),
+ B(Star), R(6),
+ /* 270 E> */ B(CallNoFeedback), R(1), R(2), U8(5),
+ /* 294 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(5), U8(10),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(LdaSmi), I8(4),
+ B(Star), R(6),
+ B(LdaSmi), I8(5),
+ B(Star), R(7),
+ /* 294 E> */ B(CallNoFeedback), R(1), R(2), U8(6),
+ /* 338 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(6),
+ /* 345 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f0"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f1"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f2"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f3"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f4"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f5"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ var t = 0;
+ function f2() {};
+ if (t == 0) {
+ (function(){
+ l = {};
+ l.a = 3;
+ l.b = 4;
+ f2();
+ return arguments.callee;
+ })();
+ }
+
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 46
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 79 E> */ B(StackCheck),
+ /* 93 S> */ B(CreateEmptyObjectLiteral),
+ /* 95 E> */ B(StaGlobal), U8(0), U8(0),
+ /* 111 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ /* 115 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(1), U8(0),
+ /* 130 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(4),
+ /* 134 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
+ /* 149 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(3), U8(4),
+ B(Star), R(1),
+ /* 149 E> */ B(CallNoFeedback), R(1), R(2), U8(1),
+ /* 182 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
+ /* 189 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f2"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ function f2() {};
+ function f() {
+ return (function(){
+ l = {};
+ l.a = 3;
+ l.b = 4;
+ f2();
+ return arguments.callee;
+ })();
+ }
+ f();
+
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 43
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 76 E> */ B(StackCheck),
+ /* 92 S> */ B(CreateEmptyObjectLiteral),
+ /* 94 E> */ B(StaGlobal), U8(0), U8(0),
+ /* 112 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ /* 116 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
+ /* 133 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(4),
+ /* 137 E> */ B(StaNamedProperty), R(1), U8(2), U8(6),
+ /* 154 S> */ B(LdaGlobal), U8(3), U8(8),
+ B(Star), R(1),
+ /* 154 E> */ B(CallUndefinedReceiver0), R(1), U8(10),
+ /* 189 S> */ B(LdaNamedProperty), R(0), U8(4), U8(12),
+ /* 196 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f2"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
index f116bdc68f..efe9078eea 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
@@ -107,3 +107,186 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+
+ this.f0 = function() {};
+ this.f1 = function(a) {};
+ this.f2 = function(a, b) {};
+ this.f3 = function(a, b, c) {};
+ this.f4 = function(a, b, c, d) {};
+ this.f5 = function(a, b, c, d, e) {};
+ (function() {
+ this.f0();
+ this.f1(1);
+ this.f2(1, 2);
+ this.f3(1, 2, 3);
+ this.f4(1, 2, 3, 4);
+ this.f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 144
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 237 E> */ B(StackCheck),
+ /* 255 S> */ B(LdaNamedProperty), R(this), U8(0), U8(0),
+ B(Star), R(1),
+ /* 255 E> */ B(CallProperty0), R(1), R(this), U8(2),
+ /* 274 S> */ B(LdaNamedProperty), R(this), U8(1), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ /* 274 E> */ B(CallProperty1), R(1), R(this), R(3), U8(6),
+ /* 294 S> */ B(LdaNamedProperty), R(this), U8(2), U8(8),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ /* 294 E> */ B(CallProperty2), R(1), R(this), R(3), R(4), U8(10),
+ /* 317 S> */ B(LdaNamedProperty), R(this), U8(3), U8(12),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(Mov), R(this), R(2),
+ /* 317 E> */ B(CallProperty), R(1), R(2), U8(4), U8(14),
+ /* 343 S> */ B(LdaNamedProperty), R(this), U8(4), U8(16),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(LdaSmi), I8(4),
+ B(Star), R(6),
+ B(Mov), R(this), R(2),
+ /* 343 E> */ B(CallProperty), R(1), R(2), U8(5), U8(18),
+ /* 372 S> */ B(LdaNamedProperty), R(this), U8(5), U8(20),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaSmi), I8(3),
+ B(Star), R(5),
+ B(LdaSmi), I8(4),
+ B(Star), R(6),
+ B(LdaSmi), I8(5),
+ B(Star), R(7),
+ B(Mov), R(this), R(2),
+ /* 372 E> */ B(CallProperty), R(1), R(2), U8(6), U8(22),
+ /* 416 S> */ B(LdaNamedProperty), R(0), U8(6), U8(24),
+ /* 423 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f0"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f1"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f2"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f3"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f4"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f5"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ function f0() {}
+ function f1(a) {}
+ function f2(a, b) {}
+ function f3(a, b, c) {}
+ function f4(a, b, c, d) {}
+ function f5(a, b, c, d, e) {}
+ (function() {
+ f0();
+ f1(1);
+ f2(1, 2);
+ f3(1, 2, 3);
+ f4(1, 2, 3, 4);
+ f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 126
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 189 E> */ B(StackCheck),
+ /* 202 S> */ B(LdaGlobal), U8(0), U8(0),
+ B(Star), R(1),
+ /* 202 E> */ B(CallUndefinedReceiver0), R(1), U8(2),
+ /* 216 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(2),
+ /* 216 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(6),
+ /* 231 S> */ B(LdaGlobal), U8(2), U8(8),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(3),
+ /* 231 E> */ B(CallUndefinedReceiver2), R(1), R(2), R(3), U8(10),
+ /* 249 S> */ B(LdaGlobal), U8(3), U8(12),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(3),
+ B(LdaSmi), I8(3),
+ B(Star), R(4),
+ /* 249 E> */ B(CallUndefinedReceiver), R(1), R(2), U8(3), U8(14),
+ /* 270 S> */ B(LdaGlobal), U8(4), U8(16),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(3),
+ B(LdaSmi), I8(3),
+ B(Star), R(4),
+ B(LdaSmi), I8(4),
+ B(Star), R(5),
+ /* 270 E> */ B(CallUndefinedReceiver), R(1), R(2), U8(4), U8(18),
+ /* 294 S> */ B(LdaGlobal), U8(5), U8(20),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(3),
+ B(LdaSmi), I8(3),
+ B(Star), R(4),
+ B(LdaSmi), I8(4),
+ B(Star), R(5),
+ B(LdaSmi), I8(5),
+ B(Star), R(6),
+ /* 294 E> */ B(CallUndefinedReceiver), R(1), R(2), U8(5), U8(22),
+ /* 338 S> */ B(LdaNamedProperty), R(0), U8(6), U8(24),
+ /* 345 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f0"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f1"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f2"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f3"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f4"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f5"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index bdfb35c70b..056f9d7b84 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -86,7 +86,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 127
+bytecode array length: 124
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaTheHole),
@@ -101,37 +101,35 @@ bytecodes: [
B(Mov), R(4), R(0),
B(Mov), R(0), R(1),
/* 89 S> */ B(CreateArrayLiteral), U8(2), U8(1), U8(37),
- B(Star), R(3),
- B(LdaConstant), U8(3),
B(Star), R(4),
- /* 101 S> */ B(CreateArrayLiteral), U8(4), U8(7), U8(37),
+ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ /* 101 S> */ B(CreateArrayLiteral), U8(4), U8(5), U8(37),
B(Star), R(8),
- B(LdaNamedProperty), R(8), U8(5), U8(8),
+ B(LdaNamedProperty), R(8), U8(5), U8(6),
B(Star), R(9),
- B(CallProperty0), R(9), R(8), U8(10),
+ B(CallProperty0), R(9), R(8), U8(8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(7),
- B(LdaNamedProperty), R(7), U8(6), U8(12),
+ B(LdaNamedProperty), R(7), U8(6), U8(10),
B(Star), R(6),
- B(CallProperty0), R(6), R(7), U8(14),
+ B(CallProperty0), R(6), R(7), U8(12),
B(Star), R(5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(7), U8(16),
+ B(LdaNamedProperty), R(5), U8(7), U8(14),
B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(5), U8(8), U8(18),
+ B(LdaNamedProperty), R(5), U8(8), U8(16),
B(Star), R(5),
- B(StaInArrayLiteral), R(3), R(4), U8(2),
- B(Ldar), R(4),
- B(Inc), U8(4),
- B(Star), R(4),
+ B(StaInArrayLiteral), R(4), R(3), U8(3),
+ B(Ldar), R(3),
+ B(Inc), U8(2),
+ B(Star), R(3),
B(JumpLoop), U8(35), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(3), R(4), U8(2),
- B(Ldar), R(4),
- B(Inc), U8(4),
- B(Star), R(4),
+ B(StaInArrayLiteral), R(4), R(3), U8(3),
+ B(Mov), R(4), R(3),
B(CallJSRuntime), U8(%reflect_construct), R(2), U8(2),
B(LdaUndefined),
/* 116 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index fa64ffa2a4..a1f4d78f7c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -246,7 +246,7 @@ bytecodes: [
B(Star), R(5),
B(Mov), R(1), R(2),
B(Mov), R(0), R(4),
- /* 57 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ /* 57 E> */ B(CallRuntime), U16(Runtime::kSetKeyedProperty), R(2), U8(4),
B(Ldar), R(2),
/* 61 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
index 3bc175b7da..e6eacf6fd6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
@@ -20,9 +20,9 @@ snippet: "
l['a'] = l['b'];
"
-frame size: 7
+frame size: 4
parameter count: 1
-bytecode array length: 128
+bytecode array length: 77
bytecodes: [
/* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
@@ -33,46 +33,25 @@ bytecodes: [
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
/* 60 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(1),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(1), R(2),
- /* 65 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 65 E> */ B(LdaNamedPropertyNoFeedback), R(1), U8(2),
B(Star), R(1),
/* 73 E> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(2),
- B(LdaConstant), U8(3),
- B(Star), R(4),
- B(Mov), R(2), R(3),
- /* 74 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
+ /* 74 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(3),
/* 71 E> */ B(Add), R(1), U8(2),
/* 62 E> */ B(StaGlobal), U8(4), U8(5),
/* 87 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(1),
B(LdaSmi), I8(7),
- B(Star), R(4),
- B(LdaConstant), U8(3),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(1), R(2),
- /* 94 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ /* 94 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(3), U8(0),
/* 105 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(1),
/* 114 E> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(2),
- B(LdaConstant), U8(3),
- B(Star), R(4),
- B(Mov), R(2), R(3),
- /* 115 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
+ /* 115 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(3),
B(Star), R(2),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(6),
- B(Mov), R(1), R(3),
- B(Mov), R(2), R(5),
- /* 112 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(3), U8(4),
- B(Mov), R(5), R(0),
+ /* 112 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
+ B(Mov), R(2), R(0),
B(Ldar), R(0),
/* 128 S> */ B(Return),
]
@@ -307,9 +286,9 @@ snippet: "
}
"
-frame size: 7
+frame size: 4
parameter count: 1
-bytecode array length: 111
+bytecode array length: 75
bytecodes: [
/* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
@@ -320,40 +299,25 @@ bytecodes: [
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
/* 63 S> */ B(LdaGlobal), U8(1), U8(2),
B(Star), R(1),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(1), R(2),
- /* 68 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 68 E> */ B(LdaNamedPropertyNoFeedback), R(1), U8(2),
B(Star), R(1),
B(LdaSmi), I8(3),
/* 74 E> */ B(TestLessThan), R(1), U8(4),
- B(JumpIfFalse), U8(36),
+ B(JumpIfFalse), U8(22),
/* 89 S> */ B(LdaGlobal), U8(1), U8(2),
B(Star), R(1),
B(LdaSmi), I8(3),
B(Star), R(2),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(6),
- B(Mov), R(1), R(3),
- B(Mov), R(2), R(5),
- /* 96 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(3), U8(4),
- B(Mov), R(5), R(0),
+ /* 96 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
+ B(Mov), R(2), R(0),
B(Ldar), R(2),
- B(Jump), U8(34),
+ B(Jump), U8(20),
/* 124 S> */ B(LdaGlobal), U8(1), U8(2),
B(Star), R(1),
B(LdaSmi), I8(3),
B(Star), R(2),
- B(LdaConstant), U8(3),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(6),
- B(Mov), R(1), R(3),
- B(Mov), R(2), R(5),
- /* 131 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(3), U8(4),
- B(Mov), R(5), R(0),
+ /* 131 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(3), U8(0),
+ B(Mov), R(2), R(0),
B(Ldar), R(2),
B(Ldar), R(0),
/* 150 S> */ B(Return),
@@ -373,16 +337,16 @@ snippet: "
a = [1.1, [2.2, 4.5]];
"
-frame size: 3
+frame size: 5
parameter count: 1
bytecode array length: 20
bytecodes: [
/* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star), R(3),
B(LdaSmi), I8(4),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
B(Star), R(0),
/* 36 S> */ B(Return),
@@ -400,16 +364,16 @@ snippet: "
b = [];
"
-frame size: 3
+frame size: 5
parameter count: 1
bytecode array length: 20
bytecodes: [
/* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star), R(3),
B(LdaSmi), I8(37),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
B(Star), R(0),
/* 21 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index 4607d37d4b..cb09c45b1a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -37,7 +37,7 @@ bytecodes: [
B(Star), R(5),
B(LdaConstant), U8(1),
B(Star), R(6),
- B(LdaConstant), U8(3),
+ /* 60 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
B(Star), R(9),
B(Mov), R(5), R(7),
@@ -59,7 +59,7 @@ bytecodes: [
B(Star), R(5),
B(LdaConstant), U8(7),
B(Star), R(6),
- B(LdaConstant), U8(3),
+ /* 99 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
B(Star), R(9),
B(Mov), R(5), R(7),
@@ -145,7 +145,7 @@ bytecodes: [
B(Star), R(7),
B(LdaConstant), U8(1),
B(Star), R(8),
- B(LdaConstant), U8(5),
+ /* 77 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
B(Mov), R(7), R(9),
@@ -175,7 +175,7 @@ bytecodes: [
B(Star), R(7),
B(LdaConstant), U8(9),
B(Star), R(8),
- B(LdaConstant), U8(5),
+ /* 133 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
B(CreateClosure), U8(13), U8(7), U8(2),
@@ -198,7 +198,7 @@ bytecodes: [
B(Star), R(7),
B(LdaConstant), U8(16),
B(Star), R(8),
- B(LdaConstant), U8(5),
+ /* 256 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
B(Mov), R(7), R(9),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index d870c4362f..849f7beba3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -381,35 +381,32 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 140
+bytecode array length: 144
bytecodes: [
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(3),
+ B(Star), R(0),
B(Mov), R(context), R(6),
B(Mov), R(context), R(7),
/* 36 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star), R(2),
/* 41 S> */ B(LdaSmi), I8(10),
- /* 41 E> */ B(TestLessThan), R(1), U8(0),
+ /* 41 E> */ B(TestLessThan), R(2), U8(0),
B(JumpIfFalse), U8(15),
/* 23 E> */ B(StackCheck),
- /* 62 S> */ B(Mov), R(1), R(0),
- /* 49 S> */ B(Ldar), R(0),
+ /* 62 S> */ B(Mov), R(2), R(1),
+ /* 49 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(1),
+ B(Star), R(2),
B(JumpLoop), U8(17), I8(0),
B(LdaUndefined),
- B(Star), R(9),
- B(Mov), R(3), R(8),
- /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(8), U8(2),
+ B(Star), R(5),
B(LdaZero),
B(Star), R(4),
- B(Mov), R(3), R(5),
- B(Jump), U8(55),
- B(Jump), U8(39),
+ B(Jump), U8(56),
+ B(Jump), U8(40),
B(Star), R(8),
- B(CreateCatchContext), R(8), U8(0),
+ /* 49 E> */ B(CreateCatchContext), R(8), U8(0),
B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
@@ -419,32 +416,37 @@ bytecodes: [
B(Star), R(10),
B(LdaFalse),
B(Star), R(11),
- B(Mov), R(3), R(9),
+ B(Mov), R(0), R(9),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(9), U8(3),
B(PopContext), R(8),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(4),
- B(Mov), R(3), R(5),
+ B(Mov), R(0), R(5),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(5),
B(Star), R(4),
B(Jump), U8(8),
B(Star), R(5),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(6),
B(LdaFalse),
B(Star), R(8),
- B(Mov), R(3), R(7),
+ B(Mov), R(0), R(7),
B(CallJSRuntime), U8(%async_function_promise_release), R(7), U8(2),
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
- B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
- B(Jump), U8(8),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(3), I8(0),
+ B(Jump), U8(21),
+ B(Mov), R(0), R(7),
+ B(Mov), R(5), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(7), U8(2),
+ B(Ldar), R(0),
+ /* 67 S> */ B(Return),
B(Ldar), R(5),
/* 67 S> */ B(Return),
B(Ldar), R(5),
@@ -455,11 +457,12 @@ bytecodes: [
constant pool: [
SCOPE_INFO_TYPE,
Smi [6],
- Smi [9],
+ Smi [19],
+ Smi [22],
]
handlers: [
- [10, 93, 101],
- [13, 54, 56],
+ [10, 84, 92],
+ [13, 44, 46],
]
---
@@ -471,53 +474,50 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 191
+bytecode array length: 195
bytecodes: [
- B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(1),
+ B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(2),
+ B(Star), R(0),
B(Mov), R(context), R(5),
B(Mov), R(context), R(6),
/* 36 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star), R(1),
/* 41 S> */ B(LdaSmi), I8(10),
- /* 41 E> */ B(TestLessThan), R(0), U8(0),
+ /* 41 E> */ B(TestLessThan), R(1), U8(0),
B(JumpIfFalse), U8(50),
/* 23 E> */ B(StackCheck),
- /* 52 S> */ B(Mov), R(1), R(7),
- B(Mov), R(0), R(8),
- B(Mov), R(2), R(9),
+ /* 52 S> */ B(Mov), R(2), R(7),
+ B(Mov), R(1), R(8),
+ B(Mov), R(0), R(9),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(7), U8(3),
- /* 52 E> */ B(SuspendGenerator), R(1), R(0), U8(7), U8(0),
- B(ResumeGenerator), R(1), R(0), U8(7),
+ /* 52 E> */ B(SuspendGenerator), R(2), R(0), U8(7), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(7),
B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(Star), R(8),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
B(Ldar), R(7),
B(ReThrow),
- /* 49 S> */ B(Ldar), R(0),
+ /* 49 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(0),
+ B(Star), R(1),
B(JumpLoop), U8(52), I8(0),
B(LdaUndefined),
- B(Star), R(8),
- B(Mov), R(2), R(7),
- /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(7), U8(2),
+ B(Star), R(4),
B(LdaZero),
B(Star), R(3),
- B(Mov), R(2), R(4),
- B(Jump), U8(55),
- B(Jump), U8(39),
+ B(Jump), U8(56),
+ B(Jump), U8(40),
B(Star), R(7),
- B(CreateCatchContext), R(7), U8(1),
+ /* 49 E> */ B(CreateCatchContext), R(7), U8(1),
B(Star), R(6),
B(LdaTheHole),
B(SetPendingMessage),
@@ -527,32 +527,37 @@ bytecodes: [
B(Star), R(9),
B(LdaFalse),
B(Star), R(10),
- B(Mov), R(2), R(8),
+ B(Mov), R(0), R(8),
B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(8), U8(3),
B(PopContext), R(7),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(3),
- B(Mov), R(2), R(4),
+ B(Mov), R(0), R(4),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
B(Star), R(4),
B(Star), R(3),
B(Jump), U8(8),
B(Star), R(4),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(5),
B(LdaTrue),
B(Star), R(7),
- B(Mov), R(2), R(6),
+ B(Mov), R(0), R(6),
B(CallJSRuntime), U8(%async_function_promise_release), R(6), U8(2),
B(Ldar), R(5),
B(SetPendingMessage),
B(Ldar), R(3),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Jump), U8(8),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(3), I8(0),
+ B(Jump), U8(21),
+ B(Mov), R(0), R(6),
+ B(Mov), R(4), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(6), U8(2),
+ B(Ldar), R(0),
+ /* 61 S> */ B(Return),
B(Ldar), R(4),
/* 61 S> */ B(Return),
B(Ldar), R(4),
@@ -564,10 +569,11 @@ constant pool: [
Smi [58],
SCOPE_INFO_TYPE,
Smi [6],
- Smi [9],
+ Smi [19],
+ Smi [22],
]
handlers: [
- [26, 144, 152],
- [29, 105, 107],
+ [26, 135, 143],
+ [29, 95, 97],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index fcf5e9ae9a..da5c922456 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -44,10 +44,10 @@ bytecodes: [
B(Star), R(5),
B(LdaConstant), U8(1),
B(Star), R(6),
- B(LdaConstant), U8(3),
+ /* 60 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
B(Star), R(9),
- B(LdaConstant), U8(4),
+ /* 92 S> */ B(LdaConstant), U8(4),
B(Star), R(10),
B(LdaConstant), U8(5),
B(TestEqualStrict), R(10), U8(1),
@@ -79,10 +79,10 @@ bytecodes: [
B(Star), R(5),
B(LdaConstant), U8(10),
B(Star), R(6),
- B(LdaConstant), U8(3),
+ /* 131 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(4),
B(Star), R(9),
- B(LdaConstant), U8(4),
+ /* 176 S> */ B(LdaConstant), U8(4),
B(Star), R(10),
B(LdaConstant), U8(5),
B(TestEqualStrict), R(10), U8(1),
@@ -188,10 +188,10 @@ bytecodes: [
B(Star), R(7),
B(LdaConstant), U8(1),
B(Star), R(8),
- B(LdaConstant), U8(5),
+ /* 77 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
- B(LdaConstant), U8(6),
+ /* 109 S> */ B(LdaConstant), U8(6),
B(Star), R(12),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(12), U8(2),
@@ -231,10 +231,10 @@ bytecodes: [
B(Star), R(7),
B(LdaConstant), U8(12),
B(Star), R(8),
- B(LdaConstant), U8(5),
+ /* 165 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
- B(LdaConstant), U8(6),
+ /* 210 S> */ B(LdaConstant), U8(6),
B(Star), R(12),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(12), U8(2),
@@ -267,10 +267,10 @@ bytecodes: [
B(Star), R(7),
B(LdaConstant), U8(20),
B(Star), R(8),
- B(LdaConstant), U8(5),
+ /* 333 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
- B(LdaConstant), U8(6),
+ /* 378 S> */ B(LdaConstant), U8(6),
B(Star), R(12),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(12), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index 39e41739e1..42238ac049 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -93,7 +93,7 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 137
+bytecode array length: 130
bytecodes: [
B(CreateRestParameter),
B(Star), R(2),
@@ -103,55 +103,51 @@ bytecodes: [
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
B(CreateEmptyArrayLiteral), U8(0),
- B(Star), R(6),
- B(LdaZero),
B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(6),
B(LdaSmi), I8(1),
- B(StaKeyedProperty), R(6), R(7), U8(1),
- B(LdaConstant), U8(0),
- /* 152 S> */ B(Star), R(7),
- B(LdaNamedProperty), R(2), U8(1), U8(8),
+ B(StaInArrayLiteral), R(7), R(6), U8(1),
+ B(Ldar), R(6),
+ B(Inc), U8(3),
+ /* 152 S> */ B(Star), R(6),
+ B(LdaNamedProperty), R(2), U8(0), U8(4),
B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(10),
+ B(CallProperty0), R(12), R(2), U8(6),
B(Mov), R(2), R(11),
B(Mov), R(1), R(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(10),
- B(LdaNamedProperty), R(10), U8(2), U8(12),
+ B(LdaNamedProperty), R(10), U8(1), U8(8),
B(Star), R(9),
- B(CallProperty0), R(9), R(10), U8(14),
+ B(CallProperty0), R(9), R(10), U8(10),
B(Star), R(8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(LdaNamedProperty), R(8), U8(3), U8(16),
+ B(LdaNamedProperty), R(8), U8(2), U8(12),
B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(8), U8(4), U8(18),
+ B(LdaNamedProperty), R(8), U8(3), U8(14),
B(Star), R(8),
- B(StaInArrayLiteral), R(6), R(7), U8(3),
- B(Ldar), R(7),
- B(Inc), U8(5),
- B(Star), R(7),
+ B(StaInArrayLiteral), R(7), R(6), U8(1),
+ B(Ldar), R(6),
+ B(Inc), U8(3),
+ B(Star), R(6),
B(JumpLoop), U8(35), I8(0),
B(LdaSmi), I8(1),
- B(StaInArrayLiteral), R(6), R(7), U8(3),
- B(Ldar), R(7),
- B(Inc), U8(5),
- B(Star), R(7),
- B(Mov), R(5), R(8),
- B(Mov), R(6), R(9),
- B(Mov), R(0), R(10),
- /* 140 E> */ B(CallJSRuntime), U8(%reflect_construct), R(8), U8(3),
- B(Star), R(11),
+ B(StaInArrayLiteral), R(7), R(6), U8(1),
+ B(Mov), R(5), R(6),
+ B(Mov), R(0), R(8),
+ /* 140 E> */ B(CallJSRuntime), U8(%reflect_construct), R(6), U8(3),
+ B(Star), R(9),
B(Ldar), R(this),
B(ThrowSuperAlreadyCalledIfNotHole),
- B(Mov), R(11), R(this),
+ B(Mov), R(9), R(this),
B(Ldar), R(this),
B(ThrowSuperNotCalledIfHole),
/* 162 S> */ B(Return),
]
constant pool: [
- Smi [1],
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.cc b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
index acb06f2d8a..855e01e786 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.cc
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
@@ -38,7 +38,7 @@ InterpreterTester::InterpreterTester(Isolate* isolate, const char* source,
: InterpreterTester(isolate, source, MaybeHandle<BytecodeArray>(),
MaybeHandle<FeedbackMetadata>(), filter) {}
-InterpreterTester::~InterpreterTester() {}
+InterpreterTester::~InterpreterTester() = default;
Local<Message> InterpreterTester::CheckThrowsReturnMessage() {
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate_));
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index d795b7ffcd..d670252242 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -36,7 +36,7 @@ class InterpreterCallable {
public:
InterpreterCallable(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate), function_(function) {}
- virtual ~InterpreterCallable() {}
+ virtual ~InterpreterCallable() = default;
MaybeHandle<Object> operator()(A... args) {
return CallInterpreter(isolate_, function_, args...);
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 0ec28d3653..e81b0cf981 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -627,6 +627,69 @@ TEST(IIFEWithOneshotOpt) {
return arguments.callee;
})();
)",
+ // CallNoFeedback instead of CallProperty
+ R"(
+ this.f0 = function() {};
+ this.f1 = function(a) {};
+ this.f2 = function(a, b) {};
+ this.f3 = function(a, b, c) {};
+ this.f4 = function(a, b, c, d) {};
+ this.f5 = function(a, b, c, d, e) {};
+ (function() {
+ this.f0();
+ this.f1(1);
+ this.f2(1, 2);
+ this.f3(1, 2, 3);
+ this.f4(1, 2, 3, 4);
+ this.f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+ )",
+ // CallNoFeedback instead of CallUndefinedReceiver
+ R"(
+ function f0() {}
+ function f1(a) {}
+ function f2(a, b) {}
+ function f3(a, b, c) {}
+ function f4(a, b, c, d) {}
+ function f5(a, b, c, d, e) {}
+ (function() {
+ f0();
+ f1(1);
+ f2(1, 2);
+ f3(1, 2, 3);
+ f4(1, 2, 3, 4);
+ f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+ )",
+ R"(
+ var t = 0;
+ function f2() {};
+ if (t == 0) {
+ (function(){
+ l = {};
+ l.a = 3;
+ l.b = 4;
+ f2();
+ return arguments.callee;
+ })();
+ }
+ )",
+ // No one-shot opt for IIFE`s within a function
+ R"(
+ function f2() {};
+ function f() {
+ return (function(){
+ l = {};
+ l.a = 3;
+ l.b = 4;
+ f2();
+ return arguments.callee;
+ })();
+ }
+ f();
+ )",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IIFEWithOneshotOpt.golden")));
@@ -663,6 +726,40 @@ TEST(IIFEWithoutOneshotOpt) {
return arguments.callee;
})();
)",
+ R"(
+ this.f0 = function() {};
+ this.f1 = function(a) {};
+ this.f2 = function(a, b) {};
+ this.f3 = function(a, b, c) {};
+ this.f4 = function(a, b, c, d) {};
+ this.f5 = function(a, b, c, d, e) {};
+ (function() {
+ this.f0();
+ this.f1(1);
+ this.f2(1, 2);
+ this.f3(1, 2, 3);
+ this.f4(1, 2, 3, 4);
+ this.f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+ )",
+ R"(
+ function f0() {}
+ function f1(a) {}
+ function f2(a, b) {}
+ function f3(a, b, c) {}
+ function f4(a, b, c, d) {}
+ function f5(a, b, c, d, e) {}
+ (function() {
+ f0();
+ f1(1);
+ f2(1, 2);
+ f3(1, 2, 3);
+ f4(1, 2, 3, 4);
+ f5(1, 2, 3, 4, 5);
+ return arguments.callee;
+ })();
+ )",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IIFEWithoutOneshotOpt.golden")));
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index 57d42e2a83..bfc42aa540 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -27,7 +27,7 @@ class InvokeIntrinsicHelper {
template <class... A>
Handle<Object> Invoke(A... args) {
CHECK(IntrinsicsHelper::IsSupported(function_id_));
- BytecodeArrayBuilder builder(zone_, sizeof...(args), 0, 0);
+ BytecodeArrayBuilder builder(zone_, sizeof...(args), 0, nullptr);
RegisterList reg_list = InterpreterTester::NewRegisterList(
builder.Receiver().index(), sizeof...(args));
builder.CallRuntime(function_id_, reg_list).Return();
@@ -94,29 +94,6 @@ TEST(IsArray) {
CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
}
-TEST(IsJSProxy) {
- HandleAndZoneScope handles;
-
- InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
- Runtime::kInlineIsJSProxy);
- Factory* factory = handles.main_isolate()->factory();
-
- CHECK_EQ(*factory->false_value(),
- *helper.Invoke(helper.NewObject("new Date()")));
- CHECK_EQ(*factory->false_value(),
- *helper.Invoke(helper.NewObject("(function() {})")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("([1])")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("({})")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("(/x/)")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
- CHECK_EQ(*factory->false_value(),
- *helper.Invoke(helper.NewObject("'string'")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
- CHECK_EQ(*factory->true_value(),
- *helper.Invoke(helper.NewObject("new Proxy({},{})")));
-}
-
TEST(IsTypedArray) {
HandleAndZoneScope handles;
@@ -198,15 +175,6 @@ TEST(IntrinsicAsStubCall) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- InvokeIntrinsicHelper to_number_helper(isolate, handles.main_zone(),
- Runtime::kInlineToNumber);
- CHECK_EQ(Smi::FromInt(46),
- *to_number_helper.Invoke(to_number_helper.NewObject("'46'")));
-
- InvokeIntrinsicHelper to_integer_helper(isolate, handles.main_zone(),
- Runtime::kInlineToInteger);
- CHECK_EQ(Smi::FromInt(502),
- *to_integer_helper.Invoke(to_integer_helper.NewObject("502.67")));
InvokeIntrinsicHelper has_property_helper(isolate, handles.main_zone(),
Runtime::kInlineHasProperty);
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index c1898adf4e..65eee6f778 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -423,7 +423,8 @@ TEST(InterpreterBinaryOpsBigInt) {
CHECK(return_value->IsBigInt());
MaybeObject* feedback = callable.vector()->Get(slot);
CHECK(feedback->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback->ToSmi()->value());
+ CHECK_EQ(BinaryOperationFeedback::kBigInt,
+ feedback->cast<Smi>()->value());
}
}
}
@@ -543,7 +544,7 @@ TEST(InterpreterStringAdd) {
MaybeObject* feedback = callable.vector()->Get(slot);
CHECK(feedback->IsSmi());
- CHECK_EQ(test_cases[i].expected_feedback, feedback->ToSmi()->value());
+ CHECK_EQ(test_cases[i].expected_feedback, feedback->cast<Smi>()->value());
}
}
@@ -748,7 +749,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
Handle<Object> return_val = callable().ToHandleChecked();
MaybeObject* feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
- CHECK_EQ(test_case.feedback, feedback0->ToSmi()->value());
+ CHECK_EQ(test_case.feedback, feedback0->cast<Smi>()->value());
CHECK(Object::Equals(isolate, test_case.result, return_val).ToChecked());
}
}
@@ -854,7 +855,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
Handle<Object> return_val = callable().ToHandleChecked();
MaybeObject* feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
- CHECK_EQ(test_case.feedback, feedback0->ToSmi()->value());
+ CHECK_EQ(test_case.feedback, feedback0->cast<Smi>()->value());
CHECK(Object::Equals(isolate, test_case.result, return_val).ToChecked());
}
}
@@ -926,23 +927,23 @@ TEST(InterpreterUnaryOpFeedback) {
MaybeObject* feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kSignedSmall,
- feedback0->ToSmi()->value());
+ feedback0->cast<Smi>()->value());
MaybeObject* feedback1 = callable.vector()->Get(slot1);
CHECK(feedback1->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->ToSmi()->value());
+ CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->cast<Smi>()->value());
MaybeObject* feedback2 = callable.vector()->Get(slot2);
CHECK(feedback2->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kNumber, feedback2->ToSmi()->value());
+ CHECK_EQ(BinaryOperationFeedback::kNumber, feedback2->cast<Smi>()->value());
MaybeObject* feedback3 = callable.vector()->Get(slot3);
CHECK(feedback3->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback3->ToSmi()->value());
+ CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback3->cast<Smi>()->value());
MaybeObject* feedback4 = callable.vector()->Get(slot4);
CHECK(feedback4->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kAny, feedback4->ToSmi()->value());
+ CHECK_EQ(BinaryOperationFeedback::kAny, feedback4->cast<Smi>()->value());
}
}
@@ -988,15 +989,15 @@ TEST(InterpreterBitwiseTypeFeedback) {
MaybeObject* feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kSignedSmall,
- feedback0->ToSmi()->value());
+ feedback0->cast<Smi>()->value());
MaybeObject* feedback1 = callable.vector()->Get(slot1);
CHECK(feedback1->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->ToSmi()->value());
+ CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->cast<Smi>()->value());
MaybeObject* feedback2 = callable.vector()->Get(slot2);
CHECK(feedback2->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kAny, feedback2->ToSmi()->value());
+ CHECK_EQ(BinaryOperationFeedback::kAny, feedback2->cast<Smi>()->value());
}
}
@@ -1818,7 +1819,7 @@ TEST(InterpreterSmiComparisons) {
MaybeObject* feedback = callable.vector()->Get(slot);
CHECK(feedback->IsSmi());
CHECK_EQ(CompareOperationFeedback::kSignedSmall,
- feedback->ToSmi()->value());
+ feedback->cast<Smi>()->value());
}
}
}
@@ -1866,7 +1867,8 @@ TEST(InterpreterHeapNumberComparisons) {
CompareC(comparison, inputs[i], inputs[j]));
MaybeObject* feedback = callable.vector()->Get(slot);
CHECK(feedback->IsSmi());
- CHECK_EQ(CompareOperationFeedback::kNumber, feedback->ToSmi()->value());
+ CHECK_EQ(CompareOperationFeedback::kNumber,
+ feedback->cast<Smi>()->value());
}
}
}
@@ -1908,7 +1910,8 @@ TEST(InterpreterBigIntComparisons) {
CHECK(return_value->IsBoolean());
MaybeObject* feedback = callable.vector()->Get(slot);
CHECK(feedback->IsSmi());
- CHECK_EQ(CompareOperationFeedback::kBigInt, feedback->ToSmi()->value());
+ CHECK_EQ(CompareOperationFeedback::kBigInt,
+ feedback->cast<Smi>()->value());
}
}
}
@@ -1959,7 +1962,7 @@ TEST(InterpreterStringComparisons) {
Token::IsOrderedRelationalCompareOp(comparison)
? CompareOperationFeedback::kString
: CompareOperationFeedback::kInternalizedString;
- CHECK_EQ(expected_feedback, feedback->ToSmi()->value());
+ CHECK_EQ(expected_feedback, feedback->cast<Smi>()->value());
}
}
}
@@ -2072,7 +2075,7 @@ TEST(InterpreterMixedComparisons) {
CHECK(feedback->IsSmi());
// Comparison with a number and string collects kAny feedback.
CHECK_EQ(CompareOperationFeedback::kAny,
- feedback->ToSmi()->value());
+ feedback->cast<Smi>()->value());
}
}
}
@@ -5046,6 +5049,35 @@ TEST(InterpreterWithNativeStack) {
interpreter_entry_trampoline->InstructionStart());
}
+TEST(InterpreterGetAndMaybeDeserializeBytecodeHandler) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Interpreter* interpreter = isolate->interpreter();
+
+ // Test that single-width bytecode handlers deserializer correctly.
+ Code* wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
+ Bytecode::kWide, OperandScale::kSingle);
+
+ CHECK_EQ(wide_handler->builtin_index(), Builtins::kWideHandler);
+
+ Code* add_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
+ Bytecode::kAdd, OperandScale::kSingle);
+
+ CHECK_EQ(add_handler->builtin_index(), Builtins::kAddHandler);
+
+ // Test that double-width bytecode handlers deserializer correctly, including
+ // an illegal bytecode handler since there is no Wide.Wide handler.
+ Code* wide_wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
+ Bytecode::kWide, OperandScale::kDouble);
+
+ CHECK_EQ(wide_wide_handler->builtin_index(), Builtins::kIllegalHandler);
+
+ Code* add_wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
+ Bytecode::kAdd, OperandScale::kDouble);
+
+ CHECK_EQ(add_wide_handler->builtin_index(), Builtins::kAddWideHandler);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
index 8f2aae7e0b..2a8e354e54 100644
--- a/deps/v8/test/cctest/interpreter/test-source-positions.cc
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -49,7 +49,7 @@ struct TestCaseData {
const char* arguments() const { return arguments_; }
private:
- TestCaseData();
+ TestCaseData() = delete;
const char* const script_;
const char* const declaration_parameters_;
diff --git a/deps/v8/test/cctest/libsampler/test-sampler.cc b/deps/v8/test/cctest/libsampler/test-sampler.cc
index 2ec3b870df..462da988e4 100644
--- a/deps/v8/test/cctest/libsampler/test-sampler.cc
+++ b/deps/v8/test/cctest/libsampler/test-sampler.cc
@@ -55,7 +55,7 @@ class TestSampler : public Sampler {
class TestApiCallbacks {
public:
- TestApiCallbacks() {}
+ TestApiCallbacks() = default;
static void Getter(v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 473debec40..ecea6f6134 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -8,6 +8,7 @@
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
+#include "src/parsing/preparsed-scope-data-impl.h"
#include "src/parsing/preparsed-scope-data.h"
#include "test/cctest/cctest.h"
@@ -747,8 +748,8 @@ TEST(PreParserScopeAnalysis) {
// Parse the lazy function using the scope data.
i::ParseInfo using_scope_data(isolate, shared);
using_scope_data.set_lazy_compile();
- using_scope_data.consumed_preparsed_scope_data()->SetData(
- isolate, produced_data_on_heap);
+ using_scope_data.set_consumed_preparsed_scope_data(
+ i::ConsumedPreParsedScopeData::For(isolate, produced_data_on_heap));
CHECK(i::parsing::ParseFunction(&using_scope_data, shared, isolate));
// Verify that we skipped at least one function inside that scope.
@@ -814,7 +815,7 @@ TEST(ProducingAndConsumingByteData) {
LocalContext env;
i::Zone zone(isolate->allocator(), ZONE_NAME);
- i::ProducedPreParsedScopeData::ByteData bytes(&zone);
+ i::PreParsedScopeDataBuilder::ByteData bytes(&zone);
// Write some data.
bytes.WriteUint32(1983); // This will be overwritten.
bytes.WriteUint32(2147483647);
@@ -841,32 +842,67 @@ TEST(ProducingAndConsumingByteData) {
// End with a lonely quarter.
bytes.WriteQuarter(2);
- i::Handle<i::PodArray<uint8_t>> data_on_heap = bytes.Serialize(isolate);
- i::ConsumedPreParsedScopeData::ByteData bytes_for_reading;
- i::ConsumedPreParsedScopeData::ByteData::ReadingScope reading_scope(
- &bytes_for_reading, *data_on_heap);
+ {
+ // Serialize as a ZoneConsumedPreParsedScopeData, and read back data.
+ i::ZonePreParsedScopeData zone_serialized(&zone, bytes.begin(), bytes.end(),
+ 0);
+ i::ZoneConsumedPreParsedScopeData::ByteData bytes_for_reading;
+ i::ZoneVectorWrapper wrapper(zone_serialized.byte_data());
+ i::ZoneConsumedPreParsedScopeData::ByteData::ReadingScope reading_scope(
+ &bytes_for_reading, &wrapper);
- // Read the data back.
#ifdef DEBUG
- CHECK_EQ(bytes_for_reading.ReadUint32(), 2017);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 2017);
#else
- CHECK_EQ(bytes_for_reading.ReadUint32(), 1983);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 1983);
#endif
- CHECK_EQ(bytes_for_reading.ReadUint32(), 2147483647);
- CHECK_EQ(bytes_for_reading.ReadUint8(), 4);
- CHECK_EQ(bytes_for_reading.ReadUint8(), 255);
- CHECK_EQ(bytes_for_reading.ReadUint32(), 0);
- CHECK_EQ(bytes_for_reading.ReadUint8(), 0);
- CHECK_EQ(bytes_for_reading.ReadUint8(), 100);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
- CHECK_EQ(bytes_for_reading.ReadUint8(), 50);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
- CHECK_EQ(bytes_for_reading.ReadUint32(), 50);
- CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 2147483647);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 4);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 255);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 100);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 50);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 50);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ }
+
+ {
+ // Serialize as an OnHeapConsumedPreParsedScopeData, and read back data.
+ i::Handle<i::PodArray<uint8_t>> data_on_heap = bytes.Serialize(isolate);
+ i::OnHeapConsumedPreParsedScopeData::ByteData bytes_for_reading;
+ i::OnHeapConsumedPreParsedScopeData::ByteData::ReadingScope reading_scope(
+ &bytes_for_reading, *data_on_heap);
+
+#ifdef DEBUG
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 2017);
+#else
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 1983);
+#endif
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 2147483647);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 4);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 255);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 100);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadUint8(), 50);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), 50);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ }
}
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index a9dc4482ef..bb05231f08 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -40,7 +40,7 @@ class ChunkSource : public v8::ScriptCompiler::ExternalSourceStream {
}
chunks_.push_back({nullptr, 0});
}
- ~ChunkSource() {}
+ ~ChunkSource() override = default;
bool SetBookmark() override { return false; }
void ResetToBookmark() override {}
size_t GetMoreData(const uint8_t** src) override {
@@ -61,15 +61,43 @@ class ChunkSource : public v8::ScriptCompiler::ExternalSourceStream {
size_t current_;
};
-class TestExternalResource : public v8::String::ExternalStringResource {
+// Checks that Lock() / Unlock() pairs are balanced. Not thread-safe.
+class LockChecker {
+ public:
+ LockChecker() : lock_depth_(0) {}
+ ~LockChecker() { CHECK_EQ(0, lock_depth_); }
+
+ void Lock() const { lock_depth_++; }
+
+ void Unlock() const {
+ CHECK_GT(lock_depth_, 0);
+ lock_depth_--;
+ }
+
+ bool IsLocked() const { return lock_depth_ > 0; }
+
+ int LockDepth() const { return lock_depth_; }
+
+ protected:
+ mutable int lock_depth_;
+};
+
+class TestExternalResource : public v8::String::ExternalStringResource,
+ public LockChecker {
public:
explicit TestExternalResource(uint16_t* data, int length)
- : data_(data), length_(static_cast<size_t>(length)) {}
+ : LockChecker(), data_(data), length_(static_cast<size_t>(length)) {}
+
+ const uint16_t* data() const override {
+ CHECK(IsLocked());
+ return data_;
+ }
- ~TestExternalResource() {}
+ size_t length() const override { return length_; }
- const uint16_t* data() const { return data_; }
- size_t length() const { return length_; }
+ bool IsCacheable() const override { return false; }
+ void Lock() const override { LockChecker::Lock(); }
+ void Unlock() const override { LockChecker::Unlock(); }
private:
uint16_t* data_;
@@ -77,13 +105,21 @@ class TestExternalResource : public v8::String::ExternalStringResource {
};
class TestExternalOneByteResource
- : public v8::String::ExternalOneByteStringResource {
+ : public v8::String::ExternalOneByteStringResource,
+ public LockChecker {
public:
TestExternalOneByteResource(const char* data, size_t length)
: data_(data), length_(length) {}
- const char* data() const { return data_; }
- size_t length() const { return length_; }
+ const char* data() const override {
+ CHECK(IsLocked());
+ return data_;
+ }
+ size_t length() const override { return length_; }
+
+ bool IsCacheable() const override { return false; }
+ void Lock() const override { LockChecker::Lock(); }
+ void Unlock() const override { LockChecker::Unlock(); }
private:
const char* data_;
@@ -101,6 +137,17 @@ const char unicode_utf8[] =
const uint16_t unicode_ucs2[] = {97, 98, 99, 228, 10784, 55357,
56489, 100, 101, 102, 0};
+i::Handle<i::String> NewExternalTwoByteStringFromResource(
+ i::Isolate* isolate, TestExternalResource* resource) {
+ i::Factory* factory = isolate->factory();
+ // String creation accesses the resource.
+ resource->Lock();
+ i::Handle<i::String> uc16_string(
+ factory->NewExternalStringFromTwoByte(resource).ToHandleChecked());
+ resource->Unlock();
+ return uc16_string;
+}
+
} // anonymous namespace
TEST(Utf8StreamAsciiOnly) {
@@ -108,7 +155,7 @@ TEST(Utf8StreamAsciiOnly) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
// Read the data without dying.
v8::internal::uc32 c;
@@ -126,7 +173,7 @@ TEST(Utf8StreamBOM) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
// Read the data without tripping over the BOM.
for (size_t i = 0; unicode_ucs2[i]; i++) {
@@ -160,7 +207,7 @@ TEST(Utf8SplitBOM) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
// Read the data without tripping over the BOM.
for (size_t i = 0; unicode_ucs2[i]; i++) {
@@ -176,7 +223,7 @@ TEST(Utf8SplitBOM) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
// Read the data without tripping over the BOM.
for (size_t i = 0; unicode_ucs2[i]; i++) {
@@ -191,7 +238,7 @@ TEST(Utf8SplitMultiBOM) {
ChunkSource chunk_source(chunks);
std::unique_ptr<i::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
// Read the data, ensuring we get exactly one of the two BOMs back.
CHECK_EQ(0xFEFF, stream->Advance());
@@ -213,7 +260,7 @@ TEST(Utf8AdvanceUntil) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
int32_t res = stream->AdvanceUntil(
[](int32_t c0_) { return unibrow::IsLineTerminator(c0_); });
@@ -232,14 +279,12 @@ TEST(AdvanceMatchAdvanceUntil) {
std::unique_ptr<v8::internal::Utf16CharacterStream> stream_advance(
v8::internal::ScannerStream::For(
- &chunk_source_a, v8::ScriptCompiler::StreamedSource::UTF8,
- nullptr));
+ &chunk_source_a, v8::ScriptCompiler::StreamedSource::UTF8));
ChunkSource chunk_source_au(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream_advance_until(
v8::internal::ScannerStream::For(
- &chunk_source_au, v8::ScriptCompiler::StreamedSource::UTF8,
- nullptr));
+ &chunk_source_au, v8::ScriptCompiler::StreamedSource::UTF8));
int32_t au_c0_ = stream_advance_until->AdvanceUntil(
[](int32_t c0_) { return unibrow::IsLineTerminator(c0_); });
@@ -281,7 +326,7 @@ TEST(Utf8AdvanceUntilOverChunkBoundaries) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
int32_t res = stream->AdvanceUntil(
[](int32_t c0_) { return unibrow::IsLineTerminator(c0_); });
@@ -309,7 +354,7 @@ TEST(Utf8ChunkBoundaries) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
for (size_t i = 0; unicode_ucs2[i]; i++) {
CHECK_EQ(unicode_ucs2[i], stream->Advance());
@@ -338,7 +383,7 @@ TEST(Utf8SingleByteChunks) {
ChunkSource chunk_source(chunks);
std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
v8::internal::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
for (size_t j = 0; unicode_ucs2[j]; j++) {
CHECK_EQ(unicode_ucs2[j], stream->Advance());
@@ -400,6 +445,26 @@ void TestCharacterStream(const char* reference, i::Utf16CharacterStream* stream,
CHECK_LT(stream->Advance(), 0);
}
+void TestCloneCharacterStream(const char* reference,
+ i::Utf16CharacterStream* stream,
+ unsigned length) {
+ std::unique_ptr<i::Utf16CharacterStream> clone = stream->Clone();
+
+ unsigned i;
+ unsigned halfway = length / 2;
+ // Advance original half way.
+ for (i = 0; i < halfway; i++) {
+ CHECK_EQU(i, stream->pos());
+ CHECK_EQU(reference[i], stream->Advance());
+ }
+
+ // Test advancing original stream didn't affect the clone.
+ TestCharacterStream(reference, clone.get(), length, 0, length);
+
+ // Test advancing clone didn't affect original stream.
+ TestCharacterStream(reference, stream, length, i, length);
+}
+
#undef CHECK_EQU
void TestCharacterStreams(const char* one_byte_source, unsigned length,
@@ -419,7 +484,7 @@ void TestCharacterStreams(const char* one_byte_source, unsigned length,
}
TestExternalResource resource(uc16_buffer.get(), length);
i::Handle<i::String> uc16_string(
- factory->NewExternalStringFromTwoByte(&resource).ToHandleChecked());
+ NewExternalTwoByteStringFromResource(isolate, &resource));
std::unique_ptr<i::Utf16CharacterStream> uc16_stream(
i::ScannerStream::For(isolate, uc16_string, start, end));
TestCharacterStream(one_byte_source, uc16_stream.get(), length, start, end);
@@ -480,14 +545,13 @@ void TestCharacterStreams(const char* one_byte_source, unsigned length,
ChunkSource single_chunk(data, 1, data_end - data, false);
std::unique_ptr<i::Utf16CharacterStream> one_byte_streaming_stream(
i::ScannerStream::For(&single_chunk,
- v8::ScriptCompiler::StreamedSource::ONE_BYTE,
- nullptr));
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE));
TestCharacterStream(one_byte_source, one_byte_streaming_stream.get(),
length, start, end);
ChunkSource many_chunks(data, 1, data_end - data, true);
one_byte_streaming_stream.reset(i::ScannerStream::For(
- &many_chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE, nullptr));
+ &many_chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE));
TestCharacterStream(one_byte_source, one_byte_streaming_stream.get(),
length, start, end);
}
@@ -498,14 +562,14 @@ void TestCharacterStreams(const char* one_byte_source, unsigned length,
const uint8_t* data_end = one_byte_vector.end();
ChunkSource chunks(data, 1, data_end - data, false);
std::unique_ptr<i::Utf16CharacterStream> utf8_streaming_stream(
- i::ScannerStream::For(&chunks, v8::ScriptCompiler::StreamedSource::UTF8,
- nullptr));
+ i::ScannerStream::For(&chunks,
+ v8::ScriptCompiler::StreamedSource::UTF8));
TestCharacterStream(one_byte_source, utf8_streaming_stream.get(), length,
start, end);
ChunkSource many_chunks(data, 1, data_end - data, true);
utf8_streaming_stream.reset(i::ScannerStream::For(
- &many_chunks, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &many_chunks, v8::ScriptCompiler::StreamedSource::UTF8));
TestCharacterStream(one_byte_source, utf8_streaming_stream.get(), length,
start, end);
}
@@ -518,14 +582,14 @@ void TestCharacterStreams(const char* one_byte_source, unsigned length,
reinterpret_cast<const uint8_t*>(two_byte_vector.end());
ChunkSource chunks(data, 2, data_end - data, false);
std::unique_ptr<i::Utf16CharacterStream> two_byte_streaming_stream(
- i::ScannerStream::For(
- &chunks, v8::ScriptCompiler::StreamedSource::TWO_BYTE, nullptr));
+ i::ScannerStream::For(&chunks,
+ v8::ScriptCompiler::StreamedSource::TWO_BYTE));
TestCharacterStream(one_byte_source, two_byte_streaming_stream.get(),
length, start, end);
ChunkSource many_chunks(data, 2, data_end - data, true);
two_byte_streaming_stream.reset(i::ScannerStream::For(
- &many_chunks, v8::ScriptCompiler::StreamedSource::TWO_BYTE, nullptr));
+ &many_chunks, v8::ScriptCompiler::StreamedSource::TWO_BYTE));
TestCharacterStream(one_byte_source, two_byte_streaming_stream.get(),
length, start, end);
}
@@ -567,7 +631,7 @@ TEST(Regress651333) {
// 65533) instead of the incorrectly coded Latin1 char.
ChunkSource chunks(bytes, 1, len, false);
std::unique_ptr<i::Utf16CharacterStream> stream(i::ScannerStream::For(
- &chunks, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunks, v8::ScriptCompiler::StreamedSource::UTF8));
for (size_t i = 0; i < len; i++) {
CHECK_EQ(unicode[i], stream->Advance());
}
@@ -581,7 +645,7 @@ void TestChunkStreamAgainstReference(
for (size_t c = 0; c < unicode_expected.size(); ++c) {
ChunkSource chunk_source(cases[c]);
std::unique_ptr<i::Utf16CharacterStream> stream(i::ScannerStream::For(
- &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
for (size_t i = 0; i < unicode_expected[c].size(); i++) {
CHECK_EQ(unicode_expected[c][i], stream->Advance());
}
@@ -695,3 +759,98 @@ TEST(RelocatingCharacterStream) {
CHECK_EQ('c', two_byte_string_stream->Advance());
CHECK_EQ('d', two_byte_string_stream->Advance());
}
+
+TEST(CloneCharacterStreams) {
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ const char* one_byte_source = "abcdefghi";
+ unsigned length = static_cast<unsigned>(strlen(one_byte_source));
+
+ // Check that cloning a character stream does not update
+
+ // 2-byte external string
+ std::unique_ptr<i::uc16[]> uc16_buffer(new i::uc16[length]);
+ i::Vector<const i::uc16> two_byte_vector(uc16_buffer.get(),
+ static_cast<int>(length));
+ {
+ for (unsigned i = 0; i < length; i++) {
+ uc16_buffer[i] = static_cast<i::uc16>(one_byte_source[i]);
+ }
+ TestExternalResource resource(uc16_buffer.get(), length);
+ i::Handle<i::String> uc16_string(
+ NewExternalTwoByteStringFromResource(isolate, &resource));
+ std::unique_ptr<i::Utf16CharacterStream> uc16_stream(
+ i::ScannerStream::For(isolate, uc16_string, 0, length));
+
+ CHECK(resource.IsLocked());
+ CHECK_EQ(1, resource.LockDepth());
+ std::unique_ptr<i::Utf16CharacterStream> cloned = uc16_stream->Clone();
+ CHECK_EQ(2, resource.LockDepth());
+ uc16_stream = std::move(cloned);
+ CHECK_EQ(1, resource.LockDepth());
+
+ TestCloneCharacterStream(one_byte_source, uc16_stream.get(), length);
+
+ // This avoids the GC from trying to free a stack allocated resource.
+ if (uc16_string->IsExternalString())
+ i::Handle<i::ExternalTwoByteString>::cast(uc16_string)
+ ->SetResource(isolate, nullptr);
+ }
+
+ // 1-byte external string
+ i::Vector<const uint8_t> one_byte_vector =
+ i::OneByteVector(one_byte_source, static_cast<int>(length));
+ i::Handle<i::String> one_byte_string =
+ factory->NewStringFromOneByte(one_byte_vector).ToHandleChecked();
+ {
+ TestExternalOneByteResource one_byte_resource(one_byte_source, length);
+ i::Handle<i::String> ext_one_byte_string(
+ factory->NewExternalStringFromOneByte(&one_byte_resource)
+ .ToHandleChecked());
+ std::unique_ptr<i::Utf16CharacterStream> one_byte_stream(
+ i::ScannerStream::For(isolate, ext_one_byte_string, 0, length));
+ TestCloneCharacterStream(one_byte_source, one_byte_stream.get(), length);
+ // This avoids the GC from trying to free a stack allocated resource.
+ if (ext_one_byte_string->IsExternalString())
+ i::Handle<i::ExternalOneByteString>::cast(ext_one_byte_string)
+ ->SetResource(isolate, nullptr);
+ }
+
+ // Relocatinable streams aren't clonable.
+ {
+ std::unique_ptr<i::Utf16CharacterStream> string_stream(
+ i::ScannerStream::For(isolate, one_byte_string, 0, length));
+ CHECK(!string_stream->can_be_cloned());
+
+ i::Handle<i::String> two_byte_string =
+ factory->NewStringFromTwoByte(two_byte_vector).ToHandleChecked();
+ std::unique_ptr<i::Utf16CharacterStream> two_byte_string_stream(
+ i::ScannerStream::For(isolate, two_byte_string, 0, length));
+ CHECK(!two_byte_string_stream->can_be_cloned());
+ }
+
+ // Chunk sources currently not cloneable.
+ {
+ const char* chunks[] = {"1234", "\0"};
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<i::Utf16CharacterStream> one_byte_streaming_stream(
+ i::ScannerStream::For(&chunk_source,
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE));
+ CHECK(!one_byte_streaming_stream->can_be_cloned());
+
+ std::unique_ptr<i::Utf16CharacterStream> utf8_streaming_stream(
+ i::ScannerStream::For(&chunk_source,
+ v8::ScriptCompiler::StreamedSource::UTF8));
+ CHECK(!utf8_streaming_stream->can_be_cloned());
+
+ std::unique_ptr<i::Utf16CharacterStream> two_byte_streaming_stream(
+ i::ScannerStream::For(&chunk_source,
+ v8::ScriptCompiler::StreamedSource::TWO_BYTE));
+ CHECK(!two_byte_streaming_stream->can_be_cloned());
+ }
+}
diff --git a/deps/v8/test/cctest/print-extension.h b/deps/v8/test/cctest/print-extension.h
index 922d116efd..a2d237d667 100644
--- a/deps/v8/test/cctest/print-extension.h
+++ b/deps/v8/test/cctest/print-extension.h
@@ -36,8 +36,8 @@ namespace internal {
class PrintExtension : public v8::Extension {
public:
PrintExtension() : v8::Extension("v8/print", "native function print();") { }
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
};
diff --git a/deps/v8/test/cctest/profiler-extension.h b/deps/v8/test/cctest/profiler-extension.h
index dbc12f47a0..f2be3a1334 100644
--- a/deps/v8/test/cctest/profiler-extension.h
+++ b/deps/v8/test/cctest/profiler-extension.h
@@ -41,8 +41,8 @@ class ProfilerExtension : public v8::Extension {
public:
ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void set_profiler(v8::CpuProfiler* profiler) { profiler_ = profiler; }
static void set_profiler(CpuProfiler* profiler) {
diff --git a/deps/v8/test/cctest/scope-test-helper.h b/deps/v8/test/cctest/scope-test-helper.h
index 8c69307d3e..8dd49970a1 100644
--- a/deps/v8/test/cctest/scope-test-helper.h
+++ b/deps/v8/test/cctest/scope-test-helper.h
@@ -24,7 +24,7 @@ class ScopeTestHelper {
baseline->AsDeclarationScope()->function_kind() ==
scope->AsDeclarationScope()->function_kind());
- if (!ProducedPreParsedScopeData::ScopeNeedsData(baseline)) {
+ if (!PreParsedScopeDataBuilder::ScopeNeedsData(baseline)) {
return;
}
diff --git a/deps/v8/test/cctest/setup-isolate-for-tests.cc b/deps/v8/test/cctest/setup-isolate-for-tests.cc
index ba9c4fb488..8aae2de769 100644
--- a/deps/v8/test/cctest/setup-isolate-for-tests.cc
+++ b/deps/v8/test/cctest/setup-isolate-for-tests.cc
@@ -4,8 +4,6 @@
#include "test/cctest/setup-isolate-for-tests.h"
-#include "src/interpreter/setup-interpreter.h"
-
namespace v8 {
namespace internal {
@@ -15,13 +13,6 @@ void SetupIsolateDelegateForTests::SetupBuiltins(Isolate* isolate) {
}
}
-void SetupIsolateDelegateForTests::SetupInterpreter(
- interpreter::Interpreter* interpreter) {
- if (create_heap_objects_) {
- interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
- }
-}
-
bool SetupIsolateDelegateForTests::SetupHeap(Heap* heap) {
if (create_heap_objects_) {
return SetupHeapInternal(heap);
diff --git a/deps/v8/test/cctest/setup-isolate-for-tests.h b/deps/v8/test/cctest/setup-isolate-for-tests.h
index e3d34725f0..c026c04afd 100644
--- a/deps/v8/test/cctest/setup-isolate-for-tests.h
+++ b/deps/v8/test/cctest/setup-isolate-for-tests.h
@@ -14,12 +14,10 @@ class SetupIsolateDelegateForTests : public SetupIsolateDelegate {
public:
explicit SetupIsolateDelegateForTests(bool create_heap_objects)
: SetupIsolateDelegate(create_heap_objects) {}
- virtual ~SetupIsolateDelegateForTests() {}
+ ~SetupIsolateDelegateForTests() override = default;
void SetupBuiltins(Isolate* isolate) override;
- void SetupInterpreter(interpreter::Interpreter* interpreter) override;
-
bool SetupHeap(Heap* heap) override;
};
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index de1901b6d3..a6a02ba762 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -181,7 +181,7 @@ THREADED_TEST(GlobalVariableAccess) {
templ->InstanceTemplate()->SetAccessor(
v8_str("baz"), GetIntValue, SetIntValue,
v8::External::New(isolate, &baz));
- LocalContext env(0, templ->InstanceTemplate());
+ LocalContext env(nullptr, templ->InstanceTemplate());
v8_compile("foo = (++bar) + baz")->Run(env.local()).ToLocalChecked();
CHECK_EQ(-3, bar);
CHECK_EQ(7, foo);
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index 139829dd2b..d5ba49c537 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -37,7 +37,7 @@ class AllocationPlatform : public TestPlatform {
// Now that it's completely constructed, make this the current platform.
i::V8::SetPlatformForTesting(this);
}
- virtual ~AllocationPlatform() = default;
+ ~AllocationPlatform() override = default;
void OnCriticalMemoryPressure() override { oom_callback_called = true; }
@@ -141,24 +141,20 @@ TEST(AlignedAllocOOM) {
TEST(AllocVirtualMemoryOOM) {
AllocationPlatform platform;
CHECK(!platform.oom_callback_called);
- v8::internal::VirtualMemory result;
- bool success =
- v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result);
+ v8::internal::VirtualMemory result(v8::internal::GetPlatformPageAllocator(),
+ GetHugeMemoryAmount(), nullptr);
// On a few systems, allocation somehow succeeds.
- CHECK_IMPLIES(success, result.IsReserved());
- CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called);
+ CHECK_IMPLIES(!result.IsReserved(), platform.oom_callback_called);
}
TEST(AlignedAllocVirtualMemoryOOM) {
AllocationPlatform platform;
CHECK(!platform.oom_callback_called);
- v8::internal::VirtualMemory result;
- bool success = v8::internal::AlignedAllocVirtualMemory(
- GetHugeMemoryAmount(), v8::internal::AllocatePageSize(), nullptr,
- &result);
+ v8::internal::VirtualMemory result(v8::internal::GetPlatformPageAllocator(),
+ GetHugeMemoryAmount(), nullptr,
+ v8::internal::AllocatePageSize());
// On a few systems, allocation somehow succeeds.
- CHECK_IMPLIES(success, result.IsReserved());
- CHECK_IMPLIES(!success, !result.IsReserved() && platform.oom_callback_called);
+ CHECK_IMPLIES(!result.IsReserved(), platform.oom_callback_called);
}
#endif // !defined(V8_USE_ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) &&
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index 5bda0432ea..7c0a7ee8cb 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -240,8 +240,12 @@ static void Getter(v8::Local<v8::Name> name,
static void StringGetter(v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {}
-static void Setter(v8::Local<v8::String> name, v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {}
+static int set_accessor_call_count = 0;
+
+static void Setter(v8::Local<v8::Name> name, v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ set_accessor_call_count++;
+}
} // namespace
// Re-declaration of non-configurable accessors should throw.
@@ -281,7 +285,7 @@ TEST(AccessorSetHasNoSideEffect) {
obj->SetAccessor(context, v8_str("foo"), Getter).ToChecked();
CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true).IsEmpty());
- obj->SetAccessor(context, v8_str("foo"), Getter, 0,
+ obj->SetAccessor(context, v8_str("foo"), Getter, nullptr,
v8::MaybeLocal<v8::Value>(), v8::AccessControl::DEFAULT,
v8::PropertyAttribute::None,
v8::SideEffectType::kHasNoSideEffect)
@@ -297,6 +301,65 @@ TEST(AccessorSetHasNoSideEffect) {
.ToLocalChecked()
->Int32Value(env.local())
.FromJust());
+ CHECK_EQ(0, set_accessor_call_count);
+}
+
+// Set accessors can be whitelisted as side-effect-free via SetAccessor.
+TEST(SetAccessorSetSideEffectReceiverCheck1) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+ obj->SetAccessor(env.local(), v8_str("foo"), Getter, Setter,
+ v8::MaybeLocal<v8::Value>(), v8::AccessControl::DEFAULT,
+ v8::PropertyAttribute::None,
+ v8::SideEffectType::kHasNoSideEffect,
+ v8::SideEffectType::kHasSideEffectToReceiver)
+ .ToChecked();
+ CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo"), true)
+ .ToLocalChecked()
+ ->Equals(env.local(), v8_str("return value"))
+ .FromJust());
+ v8::TryCatch try_catch(isolate);
+ CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj.foo = 1"), true)
+ .IsEmpty());
+ CHECK(try_catch.HasCaught());
+ CHECK_EQ(0, set_accessor_call_count);
+}
+
+static void ConstructCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+}
+
+TEST(SetAccessorSetSideEffectReceiverCheck2) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ i::FLAG_enable_one_shot_optimization = false;
+
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(
+ isolate, ConstructCallback, v8::Local<v8::Value>(),
+ v8::Local<v8::Signature>(), 0, v8::ConstructorBehavior::kAllow,
+ v8::SideEffectType::kHasNoSideEffect);
+ templ->InstanceTemplate()->SetAccessor(
+ v8_str("bar"), Getter, Setter, v8::Local<v8::Value>(),
+ v8::AccessControl::DEFAULT, v8::PropertyAttribute::None,
+ v8::Local<v8::AccessorSignature>(),
+ v8::SideEffectType::kHasSideEffectToReceiver,
+ v8::SideEffectType::kHasSideEffectToReceiver);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("f"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+ CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("new f().bar"), true)
+ .ToLocalChecked()
+ ->Equals(env.local(), v8_str("return value"))
+ .FromJust());
+ v8::debug::EvaluateGlobal(isolate, v8_str("new f().bar = 1"), true)
+ .ToLocalChecked();
+ CHECK_EQ(1, set_accessor_call_count);
}
// Accessors can be whitelisted as side-effect-free via SetNativeDataProperty.
@@ -366,10 +429,10 @@ TEST(ObjectTemplateSetAccessorHasNoSideEffect) {
v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("foo"), StringGetter);
- templ->SetAccessor(v8_str("foo2"), StringGetter, 0, v8::Local<v8::Value>(),
- v8::AccessControl::DEFAULT, v8::PropertyAttribute::None,
- v8::Local<v8::AccessorSignature>(),
- v8::SideEffectType::kHasNoSideEffect);
+ templ->SetAccessor(
+ v8_str("foo2"), StringGetter, nullptr, v8::Local<v8::Value>(),
+ v8::AccessControl::DEFAULT, v8::PropertyAttribute::None,
+ v8::Local<v8::AccessorSignature>(), v8::SideEffectType::kHasNoSideEffect);
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
@@ -395,7 +458,7 @@ TEST(ObjectTemplateSetNativePropertyHasNoSideEffect) {
v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
templ->SetNativeDataProperty(v8_str("foo"), Getter);
templ->SetNativeDataProperty(
- v8_str("foo2"), Getter, 0, v8::Local<v8::Value>(),
+ v8_str("foo2"), Getter, nullptr, v8::Local<v8::Value>(),
v8::PropertyAttribute::None, v8::Local<v8::AccessorSignature>(),
v8::AccessControl::DEFAULT, v8::SideEffectType::kHasNoSideEffect);
v8::Local<v8::Object> obj = templ->NewInstance(env.local()).ToLocalChecked();
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 9d9138670e..3604af020f 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -393,11 +393,11 @@ void QueryCallback(Local<Name> property,
// Examples that show when the query callback is triggered.
THREADED_TEST(QueryInterceptor) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::FunctionTemplate> templ =
- v8::FunctionTemplate::New(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(
- v8::NamedPropertyHandlerConfiguration(0, 0, QueryCallback));
+ v8::NamedPropertyHandlerConfiguration(nullptr, nullptr, QueryCallback));
LocalContext env;
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
@@ -430,43 +430,37 @@ THREADED_TEST(QueryInterceptor) {
CHECK(v8_compile("obj.propertyIsEnumerable('enum');")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(4, query_counter_int);
CHECK(!v8_compile("obj.propertyIsEnumerable('not_enum');")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(5, query_counter_int);
CHECK(v8_compile("obj.hasOwnProperty('enum');")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(5, query_counter_int);
CHECK(v8_compile("obj.hasOwnProperty('not_enum');")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(5, query_counter_int);
CHECK(!v8_compile("obj.hasOwnProperty('x');")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(6, query_counter_int);
CHECK(!v8_compile("obj.propertyIsEnumerable('undef');")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(7, query_counter_int);
v8_compile("Object.defineProperty(obj, 'enum', {value: 42});")
@@ -737,7 +731,8 @@ THREADED_TEST(DefinerCallbackGetAndDefine) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- GetterCallbackOrder, SetterCallback, 0, 0, 0, DefinerCallbackOrder));
+ GetterCallbackOrder, SetterCallback, nullptr, nullptr, nullptr,
+ DefinerCallbackOrder));
LocalContext env;
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
@@ -834,15 +829,15 @@ THREADED_TEST(InterceptorHasOwnProperty) {
v8::Local<Value> value = CompileRun(
"var o = new constructor();"
"o.hasOwnProperty('ostehaps');");
- CHECK(!value->BooleanValue(context.local()).FromJust());
+ CHECK(!value->BooleanValue(isolate));
value = CompileRun(
"o.ostehaps = 42;"
"o.hasOwnProperty('ostehaps');");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
value = CompileRun(
"var p = new constructor();"
"p.hasOwnProperty('ostehaps');");
- CHECK(!value->BooleanValue(context.local()).FromJust());
+ CHECK(!value->BooleanValue(isolate));
}
@@ -876,7 +871,7 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
"var o = new constructor();"
"o.__proto__ = new String(x);"
"o.hasOwnProperty('ostehaps');");
- CHECK(!value->BooleanValue(context.local()).FromJust());
+ CHECK(!value->BooleanValue(isolate));
}
@@ -886,8 +881,8 @@ static void CheckInterceptorLoadIC(
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(getter, 0, 0, 0, 0,
- v8_str("data")));
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ getter, nullptr, nullptr, nullptr, nullptr, v8_str("data")));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("o"),
@@ -1353,7 +1348,7 @@ THREADED_TEST(InterceptorLoadGlobalICGlobalWithInterceptor) {
" f();"
"};"
"f();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
value = CompileRun(
"var f = function() { "
@@ -1368,7 +1363,7 @@ THREADED_TEST(InterceptorLoadGlobalICGlobalWithInterceptor) {
" f();"
"};"
"f();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
value = CompileRun(
"var f = function() { "
@@ -1383,7 +1378,7 @@ THREADED_TEST(InterceptorLoadGlobalICGlobalWithInterceptor) {
" f();"
"};"
"f();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
}
// Test load of a non-existing global through prototype chain when a global
@@ -1454,8 +1449,8 @@ THREADED_TEST(InterceptorStoreIC) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorLoadICGetter, InterceptorStoreICSetter, 0, 0, 0,
- v8_str("data")));
+ InterceptorLoadICGetter, InterceptorStoreICSetter, nullptr, nullptr,
+ nullptr, v8_str("data")));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("o"),
@@ -1574,11 +1569,11 @@ THREADED_TEST(GenericInterceptorDoesSeeSymbols) {
THREADED_TEST(NamedPropertyHandlerGetter) {
echo_named_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::FunctionTemplate> templ =
- v8::FunctionTemplate::New(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- EchoNamedProperty, 0, 0, 0, 0, v8_str("data")));
+ EchoNamedProperty, nullptr, nullptr, nullptr, nullptr, v8_str("data")));
LocalContext env;
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
@@ -1591,7 +1586,7 @@ THREADED_TEST(NamedPropertyHandlerGetter) {
CHECK_EQ(1, echo_named_call_count);
const char* code = "var str = 'oddle'; obj[str] + obj.poddle;";
v8::Local<Value> str = CompileRun(code);
- String::Utf8Value value(CcTest::isolate(), str);
+ String::Utf8Value value(isolate, str);
CHECK_EQ(0, strcmp(*value, "oddlepoddle"));
// Check default behavior
CHECK_EQ(10, v8_compile("obj.flob = 10;")
@@ -1602,13 +1597,11 @@ THREADED_TEST(NamedPropertyHandlerGetter) {
CHECK(v8_compile("'myProperty' in obj")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK(v8_compile("delete obj.myProperty")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
}
namespace {
@@ -1650,7 +1643,8 @@ THREADED_TEST(PropertyDefinerCallback) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, NotInterceptingPropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ NotInterceptingPropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1672,7 +1666,8 @@ THREADED_TEST(PropertyDefinerCallback) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, CheckDescriptorInDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ CheckDescriptorInDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1699,7 +1694,8 @@ THREADED_TEST(PropertyDefinerCallback) {
v8::FunctionTemplate::New(CcTest::isolate());
templ2->InstanceTemplate()->SetHandler(
v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, InterceptingPropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ InterceptingPropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ2->GetFunction(env.local())
.ToLocalChecked()
@@ -1759,7 +1755,8 @@ THREADED_TEST(PropertyDefinerCallbackIndexed) {
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(
v8::IndexedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, NotInterceptingPropertyDefineCallbackIndexed));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ NotInterceptingPropertyDefineCallbackIndexed));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1782,7 +1779,8 @@ THREADED_TEST(PropertyDefinerCallbackIndexed) {
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(
v8::IndexedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, CheckDescriptorInDefineCallbackIndexed));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ CheckDescriptorInDefineCallbackIndexed));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1809,7 +1807,8 @@ THREADED_TEST(PropertyDefinerCallbackIndexed) {
v8::FunctionTemplate::New(CcTest::isolate());
templ2->InstanceTemplate()->SetHandler(
v8::IndexedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, InterceptingPropertyDefineCallbackIndexed));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ InterceptingPropertyDefineCallbackIndexed));
env->Global()
->Set(env.local(), v8_str("obj"), templ2->GetFunction(env.local())
.ToLocalChecked()
@@ -1831,12 +1830,13 @@ THREADED_TEST(PropertyDefinerCallbackIndexed) {
// Test that freeze() is intercepted.
THREADED_TEST(PropertyDefinerCallbackForFreeze) {
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
LocalContext env;
- v8::Local<v8::FunctionTemplate> templ =
- v8::FunctionTemplate::New(CcTest::isolate());
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, InterceptingPropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ InterceptingPropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1851,8 +1851,7 @@ THREADED_TEST(PropertyDefinerCallbackForFreeze) {
CHECK(v8_compile(code)
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
}
// Check that the descriptor passed to the callback is enumerable.
@@ -1878,7 +1877,8 @@ THREADED_TEST(PropertyDefinerCallbackEnumerable) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, CheckEnumerablePropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ CheckEnumerablePropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1918,7 +1918,8 @@ THREADED_TEST(PropertyDefinerCallbackConfigurable) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, CheckConfigurablePropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ CheckConfigurablePropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1954,7 +1955,8 @@ THREADED_TEST(PropertyDefinerCallbackWritable) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, CheckWritablePropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ CheckWritablePropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -1989,7 +1991,8 @@ THREADED_TEST(PropertyDefinerCallbackWithGetter) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, CheckGetterPropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ CheckGetterPropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -2024,7 +2027,8 @@ THREADED_TEST(PropertyDefinerCallbackWithSetter) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, 0, 0, 0, CheckSetterPropertyDefineCallback));
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ CheckSetterPropertyDefineCallback));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -2068,7 +2072,8 @@ THREADED_TEST(PropertyDescriptorCallback) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, EmptyPropertyDescriptorCallback, 0, 0, 0));
+ nullptr, nullptr, EmptyPropertyDescriptorCallback, nullptr, nullptr,
+ nullptr));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -2090,7 +2095,8 @@ THREADED_TEST(PropertyDescriptorCallback) {
v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, InterceptingPropertyDescriptorCallback, 0, 0, 0));
+ nullptr, nullptr, InterceptingPropertyDescriptorCallback, nullptr,
+ nullptr, nullptr));
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
.ToLocalChecked()
@@ -2129,7 +2135,7 @@ THREADED_TEST(IndexedPropertyHandlerGetter) {
v8::HandleScope scope(isolate);
v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- EchoIndexedProperty, 0, 0, 0, 0, v8_num(637)));
+ EchoIndexedProperty, nullptr, nullptr, nullptr, nullptr, v8_num(637)));
LocalContext env;
env->Global()
->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
@@ -2285,7 +2291,7 @@ THREADED_TEST(PrePropertyHandler) {
v8::HandleScope scope(isolate);
v8::Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
desc->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- PrePropertyHandlerGet, 0, PrePropertyHandlerQuery));
+ PrePropertyHandlerGet, nullptr, PrePropertyHandlerQuery));
is_bootstrapping = true;
LocalContext env(nullptr, desc->InstanceTemplate());
is_bootstrapping = false;
@@ -2909,8 +2915,8 @@ THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- UnboxedDoubleIndexedPropertyGetter, UnboxedDoubleIndexedPropertySetter, 0,
- 0, UnboxedDoubleIndexedPropertyEnumerator));
+ UnboxedDoubleIndexedPropertyGetter, UnboxedDoubleIndexedPropertySetter,
+ nullptr, nullptr, UnboxedDoubleIndexedPropertyEnumerator));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("obj"),
@@ -2971,7 +2977,7 @@ THREADED_TEST(IndexedInterceptorSloppyArgsWithIndexedAccessor) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- SloppyIndexedPropertyGetter, 0, 0, 0,
+ SloppyIndexedPropertyGetter, nullptr, nullptr, nullptr,
SloppyArgsIndexedPropertyEnumerator));
LocalContext context;
context->Global()
@@ -4113,7 +4119,7 @@ THREADED_TEST(InterceptorICReferenceErrors) {
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorICRefErrorGetter));
is_bootstrapping = true;
- LocalContext context(0, templ, v8::Local<Value>());
+ LocalContext context(nullptr, templ, v8::Local<Value>());
is_bootstrapping = false;
call_ic_function2 = v8_compile("function h(x) { return x; }; h")
->Run(context.local())
@@ -4126,7 +4132,7 @@ THREADED_TEST(InterceptorICReferenceErrors) {
" return false;"
"};"
"f();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
interceptor_call_count = 0;
value = CompileRun(
"function g() {"
@@ -4136,7 +4142,7 @@ THREADED_TEST(InterceptorICReferenceErrors) {
" return false;"
"};"
"g();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
}
@@ -4169,7 +4175,7 @@ THREADED_TEST(InterceptorICGetterExceptions) {
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorICExceptionGetter));
is_bootstrapping = true;
- LocalContext context(0, templ, v8::Local<Value>());
+ LocalContext context(nullptr, templ, v8::Local<Value>());
is_bootstrapping = false;
call_ic_function3 = v8_compile("function h(x) { return x; }; h")
->Run(context.local())
@@ -4182,7 +4188,7 @@ THREADED_TEST(InterceptorICGetterExceptions) {
" return false;"
"};"
"f();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
interceptor_ic_exception_get_count = 0;
value = CompileRun(
"function f() {"
@@ -4192,7 +4198,7 @@ THREADED_TEST(InterceptorICGetterExceptions) {
" return false;"
"};"
"f();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
}
@@ -4215,9 +4221,9 @@ THREADED_TEST(InterceptorICSetterExceptions) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(0, InterceptorICExceptionSetter));
- LocalContext context(0, templ, v8::Local<Value>());
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ nullptr, InterceptorICExceptionSetter));
+ LocalContext context(nullptr, templ, v8::Local<Value>());
v8::Local<Value> value = CompileRun(
"function f() {"
" for (var i = 0; i < 100; i++) {"
@@ -4226,7 +4232,7 @@ THREADED_TEST(InterceptorICSetterExceptions) {
" return false;"
"};"
"f();");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
}
@@ -4236,7 +4242,7 @@ THREADED_TEST(NullNamedInterceptor) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- static_cast<v8::GenericNamedPropertyGetterCallback>(0)));
+ static_cast<v8::GenericNamedPropertyGetterCallback>(nullptr)));
LocalContext context;
templ->Set(CcTest::isolate(), "x", v8_num(42));
v8::Local<v8::Object> obj =
@@ -4254,7 +4260,7 @@ THREADED_TEST(NullIndexedInterceptor) {
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- static_cast<v8::IndexedPropertyGetterCallback>(0)));
+ static_cast<v8::IndexedPropertyGetterCallback>(nullptr)));
LocalContext context;
templ->Set(CcTest::isolate(), "42", v8_num(42));
v8::Local<v8::Object> obj =
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 1b74ecfd70..9eb73fab7e 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -445,18 +445,14 @@ class TestResource: public String::ExternalStringResource {
while (data[length_]) ++length_;
}
- ~TestResource() {
+ ~TestResource() override {
if (owning_data_) i::DeleteArray(data_);
if (counter_ != nullptr) ++*counter_;
}
- const uint16_t* data() const {
- return data_;
- }
+ const uint16_t* data() const override { return data_; }
- size_t length() const {
- return length_;
- }
+ size_t length() const override { return length_; }
private:
uint16_t* data_;
@@ -475,18 +471,14 @@ class TestOneByteResource : public String::ExternalOneByteStringResource {
length_(strlen(data) - offset),
counter_(counter) {}
- ~TestOneByteResource() {
+ ~TestOneByteResource() override {
i::DeleteArray(orig_data_);
if (counter_ != nullptr) ++*counter_;
}
- const char* data() const {
- return data_;
- }
+ const char* data() const override { return data_; }
- size_t length() const {
- return length_;
- }
+ size_t length() const override { return length_; }
private:
const char* orig_data_;
@@ -692,10 +684,10 @@ TEST(MakingExternalUnalignedOneByteString) {
// Trigger GCs and force evacuation.
CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask,
+ i::GarbageCollectionReason::kTesting);
}
-
THREADED_TEST(UsingExternalString) {
i::Factory* factory = CcTest::i_isolate()->factory();
{
@@ -744,8 +736,8 @@ THREADED_TEST(UsingExternalOneByteString) {
class RandomLengthResource : public v8::String::ExternalStringResource {
public:
explicit RandomLengthResource(int length) : length_(length) {}
- virtual const uint16_t* data() const { return string_; }
- virtual size_t length() const { return length_; }
+ const uint16_t* data() const override { return string_; }
+ size_t length() const override { return length_; }
private:
uint16_t string_[10];
@@ -757,8 +749,8 @@ class RandomLengthOneByteResource
: public v8::String::ExternalOneByteStringResource {
public:
explicit RandomLengthOneByteResource(int length) : length_(length) {}
- virtual const char* data() const { return string_; }
- virtual size_t length() const { return length_; }
+ const char* data() const override { return string_; }
+ size_t length() const override { return length_; }
private:
char string_[10];
@@ -847,7 +839,7 @@ class TestOneByteResourceWithDisposeControl : public TestOneByteResource {
TestOneByteResourceWithDisposeControl(const char* data, bool dispose)
: TestOneByteResource(data, &dispose_count), dispose_(dispose) {}
- void Dispose() {
+ void Dispose() override {
++dispose_calls;
if (dispose_) delete this;
}
@@ -1296,8 +1288,7 @@ THREADED_PROFILED_TEST(FastReturnValues) {
fast_return_value_bool = i == 0;
value = TestFastReturnValues<bool>();
CHECK(value->IsBoolean());
- CHECK_EQ(fast_return_value_bool,
- value->ToBoolean(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(fast_return_value_bool, value->BooleanValue(isolate));
}
// check oddballs
ReturnValueOddball oddballs[] = {
@@ -1392,8 +1383,7 @@ static void TestExternalPointerWrapping() {
" for (var i = 0; i < 13; i++) obj.func();\n"
"}\n"
"foo(), true")
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
}
@@ -1782,7 +1772,6 @@ THREADED_TEST(NumberObject) {
}
THREADED_TEST(BigIntObject) {
- v8::internal::FLAG_harmony_bigint = true;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -1803,7 +1792,7 @@ THREADED_TEST(BigIntObject) {
CHECK(new_unboxed_bigint->IsBigInt());
// Test functionality inherited from v8::Value.
- CHECK(unboxed_bigint->BooleanValue(context).ToChecked());
+ CHECK(unboxed_bigint->BooleanValue(isolate));
v8::Local<v8::String> string =
unboxed_bigint->ToString(context).ToLocalChecked();
CHECK_EQ(0, strcmp("42", *v8::String::Utf8Value(isolate, string)));
@@ -1840,48 +1829,49 @@ THREADED_TEST(BooleanObject) {
THREADED_TEST(PrimitiveAndWrappedBooleans) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- Local<Value> primitive_false = Boolean::New(env->GetIsolate(), false);
+ Local<Value> primitive_false = Boolean::New(isolate, false);
CHECK(primitive_false->IsBoolean());
CHECK(!primitive_false->IsBooleanObject());
- CHECK(!primitive_false->BooleanValue(env.local()).FromJust());
+ CHECK(!primitive_false->BooleanValue(isolate));
CHECK(!primitive_false->IsTrue());
CHECK(primitive_false->IsFalse());
- Local<Value> false_value = BooleanObject::New(env->GetIsolate(), false);
+ Local<Value> false_value = BooleanObject::New(isolate, false);
CHECK(!false_value->IsBoolean());
CHECK(false_value->IsBooleanObject());
- CHECK(false_value->BooleanValue(env.local()).FromJust());
+ CHECK(false_value->BooleanValue(isolate));
CHECK(!false_value->IsTrue());
CHECK(!false_value->IsFalse());
Local<BooleanObject> false_boolean_object = false_value.As<BooleanObject>();
CHECK(!false_boolean_object->IsBoolean());
CHECK(false_boolean_object->IsBooleanObject());
- CHECK(false_boolean_object->BooleanValue(env.local()).FromJust());
+ CHECK(false_boolean_object->BooleanValue(isolate));
CHECK(!false_boolean_object->ValueOf());
CHECK(!false_boolean_object->IsTrue());
CHECK(!false_boolean_object->IsFalse());
- Local<Value> primitive_true = Boolean::New(env->GetIsolate(), true);
+ Local<Value> primitive_true = Boolean::New(isolate, true);
CHECK(primitive_true->IsBoolean());
CHECK(!primitive_true->IsBooleanObject());
- CHECK(primitive_true->BooleanValue(env.local()).FromJust());
+ CHECK(primitive_true->BooleanValue(isolate));
CHECK(primitive_true->IsTrue());
CHECK(!primitive_true->IsFalse());
- Local<Value> true_value = BooleanObject::New(env->GetIsolate(), true);
+ Local<Value> true_value = BooleanObject::New(isolate, true);
CHECK(!true_value->IsBoolean());
CHECK(true_value->IsBooleanObject());
- CHECK(true_value->BooleanValue(env.local()).FromJust());
+ CHECK(true_value->BooleanValue(isolate));
CHECK(!true_value->IsTrue());
CHECK(!true_value->IsFalse());
Local<BooleanObject> true_boolean_object = true_value.As<BooleanObject>();
CHECK(!true_boolean_object->IsBoolean());
CHECK(true_boolean_object->IsBooleanObject());
- CHECK(true_boolean_object->BooleanValue(env.local()).FromJust());
+ CHECK(true_boolean_object->BooleanValue(isolate));
CHECK(true_boolean_object->ValueOf());
CHECK(!true_boolean_object->IsTrue());
CHECK(!true_boolean_object->IsFalse());
@@ -1937,22 +1927,21 @@ THREADED_TEST(Boolean) {
v8::Local<v8::Boolean> f = v8::False(isolate);
CHECK(!f->Value());
v8::Local<v8::Primitive> u = v8::Undefined(isolate);
- CHECK(!u->BooleanValue(env.local()).FromJust());
+ CHECK(!u->BooleanValue(isolate));
v8::Local<v8::Primitive> n = v8::Null(isolate);
- CHECK(!n->BooleanValue(env.local()).FromJust());
+ CHECK(!n->BooleanValue(isolate));
v8::Local<String> str1 = v8_str("");
- CHECK(!str1->BooleanValue(env.local()).FromJust());
+ CHECK(!str1->BooleanValue(isolate));
v8::Local<String> str2 = v8_str("x");
- CHECK(str2->BooleanValue(env.local()).FromJust());
- CHECK(!v8::Number::New(isolate, 0)->BooleanValue(env.local()).FromJust());
- CHECK(v8::Number::New(isolate, -1)->BooleanValue(env.local()).FromJust());
- CHECK(v8::Number::New(isolate, 1)->BooleanValue(env.local()).FromJust());
- CHECK(v8::Number::New(isolate, 42)->BooleanValue(env.local()).FromJust());
+ CHECK(str2->BooleanValue(isolate));
+ CHECK(!v8::Number::New(isolate, 0)->BooleanValue(isolate));
+ CHECK(v8::Number::New(isolate, -1)->BooleanValue(isolate));
+ CHECK(v8::Number::New(isolate, 1)->BooleanValue(isolate));
+ CHECK(v8::Number::New(isolate, 42)->BooleanValue(isolate));
CHECK(!v8_compile("NaN")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
}
@@ -1979,7 +1968,7 @@ THREADED_TEST(GlobalPrototype) {
v8::Local<ObjectTemplate> templ = func_templ->InstanceTemplate();
templ->Set(isolate, "x", v8_num(200));
templ->SetAccessor(v8_str("m"), GetM);
- LocalContext env(0, templ);
+ LocalContext env(nullptr, templ);
v8::Local<Script> script(v8_compile("dummy()"));
v8::Local<Value> result(script->Run(env.local()).ToLocalChecked());
CHECK_EQ(13.4, result->NumberValue(env.local()).FromJust());
@@ -2010,10 +1999,10 @@ THREADED_TEST(ObjectTemplate) {
templ1->NewInstance(env.local()).ToLocalChecked();
CHECK(class_name->StrictEquals(instance1->GetConstructorName()));
CHECK(env->Global()->Set(env.local(), v8_str("p"), instance1).FromJust());
- CHECK(CompileRun("(p.x == 10)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(p.y == 13)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(p.foo() == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(p.foo == acc)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(p.x == 10)")->BooleanValue(isolate));
+ CHECK(CompileRun("(p.y == 13)")->BooleanValue(isolate));
+ CHECK(CompileRun("(p.foo() == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(p.foo == acc)")->BooleanValue(isolate));
// Ensure that foo become a data field.
CompileRun("p.foo = function() {}");
Local<v8::FunctionTemplate> fun2 = v8::FunctionTemplate::New(isolate);
@@ -2026,41 +2015,37 @@ THREADED_TEST(ObjectTemplate) {
Local<v8::Object> instance2 =
templ2->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("q"), instance2).FromJust());
- CHECK(CompileRun("(q.nirk == 123)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.a == 12)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.b.x == 10)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.b.y == 13)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.b.foo() == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.b.foo === acc)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.b !== p)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.acc == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.bar() == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q.bar == acc)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.nirk == 123)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.a == 12)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.b.x == 10)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.b.y == 13)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.b.foo() == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.b.foo === acc)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.b !== p)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.acc == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.bar() == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q.bar == acc)")->BooleanValue(isolate));
instance2 = templ2->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("q2"), instance2).FromJust());
- CHECK(CompileRun("(q2.nirk == 123)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.a == 12)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.b.x == 10)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.b.y == 13)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.b.foo() == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.b.foo === acc)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.acc == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.bar() == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(q2.bar === acc)")->BooleanValue(env.local()).FromJust());
-
- CHECK(CompileRun("(q.b !== q2.b)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("q.b.x = 17; (q2.b.x == 10)")
- ->BooleanValue(env.local())
- .FromJust());
+ CHECK(CompileRun("(q2.nirk == 123)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.a == 12)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.b.x == 10)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.b.y == 13)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.b.foo() == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.b.foo === acc)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.acc == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.bar() == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(q2.bar === acc)")->BooleanValue(isolate));
+
+ CHECK(CompileRun("(q.b !== q2.b)")->BooleanValue(isolate));
+ CHECK(CompileRun("q.b.x = 17; (q2.b.x == 10)")->BooleanValue(isolate));
CHECK(CompileRun("desc1 = Object.getOwnPropertyDescriptor(q, 'acc');"
"(desc1.get === acc)")
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK(CompileRun("desc2 = Object.getOwnPropertyDescriptor(q2, 'acc');"
"(desc2.get === acc)")
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
}
THREADED_TEST(IntegerValue) {
@@ -2380,27 +2365,20 @@ THREADED_TEST(DescriptorInheritance) {
// Checks right __proto__ chain.
CHECK(CompileRun("base1.prototype.__proto__ == s.prototype")
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK(CompileRun("base2.prototype.__proto__ == s.prototype")
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK(v8_compile("s.prototype.PI == 3.14")
->Run(env.local())
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
// Instance accessor should not be visible on function object or its prototype
+ CHECK(CompileRun("s.knurd == undefined")->BooleanValue(isolate));
+ CHECK(CompileRun("s.prototype.knurd == undefined")->BooleanValue(isolate));
CHECK(
- CompileRun("s.knurd == undefined")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("s.prototype.knurd == undefined")
- ->BooleanValue(env.local())
- .FromJust());
- CHECK(CompileRun("base1.prototype.knurd == undefined")
- ->BooleanValue(env.local())
- .FromJust());
+ CompileRun("base1.prototype.knurd == undefined")->BooleanValue(isolate));
CHECK(env->Global()
->Set(env.local(), v8_str("obj"), base1->GetFunction(env.local())
@@ -2410,9 +2388,9 @@ THREADED_TEST(DescriptorInheritance) {
.FromJust());
CHECK_EQ(17.2,
CompileRun("obj.flabby()")->NumberValue(env.local()).FromJust());
- CHECK(CompileRun("'flabby' in obj")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("'flabby' in obj")->BooleanValue(isolate));
CHECK_EQ(15.2, CompileRun("obj.knurd")->NumberValue(env.local()).FromJust());
- CHECK(CompileRun("'knurd' in obj")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("'knurd' in obj")->BooleanValue(isolate));
CHECK_EQ(20.1, CompileRun("obj.v1")->NumberValue(env.local()).FromJust());
CHECK(env->Global()
@@ -2423,9 +2401,9 @@ THREADED_TEST(DescriptorInheritance) {
.FromJust());
CHECK_EQ(17.2,
CompileRun("obj2.flabby()")->NumberValue(env.local()).FromJust());
- CHECK(CompileRun("'flabby' in obj2")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("'flabby' in obj2")->BooleanValue(isolate));
CHECK_EQ(15.2, CompileRun("obj2.knurd")->NumberValue(env.local()).FromJust());
- CHECK(CompileRun("'knurd' in obj2")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("'knurd' in obj2")->BooleanValue(isolate));
CHECK_EQ(10.1, CompileRun("obj2.v2")->NumberValue(env.local()).FromJust());
// base1 and base2 cannot cross reference to each's prototype
@@ -4643,7 +4621,7 @@ void TestGlobalValueMap() {
}
CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
if (map.IsWeak()) {
- CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
} else {
map.Clear();
}
@@ -5247,22 +5225,6 @@ THREADED_TEST(Array) {
CHECK_EQ(27u, array->Length());
array = v8::Array::New(context->GetIsolate(), -27);
CHECK_EQ(0u, array->Length());
-
- std::vector<Local<Value>> vector = {v8_num(1), v8_num(2), v8_num(3)};
- array = v8::Array::New(context->GetIsolate(), vector.data(), vector.size());
- CHECK_EQ(vector.size(), array->Length());
- CHECK_EQ(1, arr->Get(context.local(), 0)
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(2, arr->Get(context.local(), 1)
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(3, arr->Get(context.local(), 2)
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
}
@@ -5283,7 +5245,7 @@ THREADED_TEST(Vector) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> global = ObjectTemplate::New(isolate);
global->Set(v8_str("f"), v8::FunctionTemplate::New(isolate, HandleF));
- LocalContext context(0, global);
+ LocalContext context(nullptr, global);
const char* fun = "f()";
Local<v8::Array> a0 = CompileRun(fun).As<v8::Array>();
@@ -5672,13 +5634,44 @@ THREADED_TEST(isNumberType) {
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(obj->IsInt32());
CHECK(obj->IsUint32());
- // Positive zero
+ // Negative zero
CompileRun("var obj = -0.0;");
obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(!obj->IsInt32());
CHECK(!obj->IsUint32());
}
+THREADED_TEST(IntegerType) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+
+ // Small positive integer
+ result = CompileRun("42;");
+ CHECK(result->IsNumber());
+ CHECK_EQ(42, result.As<v8::Integer>()->Value());
+ // Small negative integer
+ result = CompileRun("-42;");
+ CHECK(result->IsNumber());
+ CHECK_EQ(-42, result.As<v8::Integer>()->Value());
+ // Positive non-int32 integer
+ result = CompileRun("1099511627776;");
+ CHECK(result->IsNumber());
+ CHECK_EQ(1099511627776, result.As<v8::Integer>()->Value());
+ // Negative non-int32 integer
+ result = CompileRun("-1099511627776;");
+ CHECK(result->IsNumber());
+ CHECK_EQ(-1099511627776, result.As<v8::Integer>()->Value());
+ // Positive non-integer
+ result = CompileRun("3.14;");
+ CHECK(result->IsNumber());
+ CHECK_EQ(3, result.As<v8::Integer>()->Value());
+ // Negative non-integer
+ result = CompileRun("-3.14;");
+ CHECK(result->IsNumber());
+ CHECK_EQ(-3, result.As<v8::Integer>()->Value());
+}
+
static void CheckUncle(v8::Isolate* isolate, v8::TryCatch* try_catch) {
CHECK(try_catch->HasCaught());
String::Utf8Value str_value(isolate, try_catch->Exception());
@@ -5760,7 +5753,7 @@ THREADED_TEST(APICatch) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(isolate, ThrowFromC));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
CompileRun(
"var thrown = false;"
"try {"
@@ -5771,7 +5764,7 @@ THREADED_TEST(APICatch) {
Local<Value> thrown = context->Global()
->Get(context.local(), v8_str("thrown"))
.ToLocalChecked();
- CHECK(thrown->BooleanValue(context.local()).FromJust());
+ CHECK(thrown->BooleanValue(isolate));
}
@@ -5781,7 +5774,7 @@ THREADED_TEST(APIThrowTryCatch) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(isolate, ThrowFromC));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
v8::TryCatch try_catch(isolate);
CompileRun("ThrowFromC();");
CHECK(try_catch.HasCaught());
@@ -5800,7 +5793,7 @@ TEST(TryCatchInTryFinally) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("CCatcher"), v8::FunctionTemplate::New(isolate, CCatcher));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
Local<Value> result = CompileRun(
"try {"
" try {"
@@ -5966,7 +5959,7 @@ TEST(APIThrowMessage) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(isolate, ThrowFromC));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
CompileRun("ThrowFromC();");
CHECK(message_received);
isolate->RemoveMessageListeners(receive_message);
@@ -5981,7 +5974,7 @@ TEST(APIThrowMessageAndVerboseTryCatch) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(isolate, ThrowFromC));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
v8::TryCatch try_catch(isolate);
try_catch.SetVerbose(true);
Local<Value> result = CompileRun("ThrowFromC();");
@@ -6013,7 +6006,7 @@ THREADED_TEST(ExternalScriptException) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(isolate, ThrowFromC));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
v8::TryCatch try_catch(isolate);
Local<Value> result = CompileRun("ThrowFromC(); throw 'panama';");
@@ -6066,8 +6059,9 @@ void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
void JSCheck(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CHECK_EQ(3, args.Length());
- v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- bool equality = args[0]->BooleanValue(context).FromJust();
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ bool equality = args[0]->BooleanValue(isolate);
int count = args[1]->Int32Value(context).FromJust();
int expected = args[2]->Int32Value(context).FromJust();
if (equality) {
@@ -6121,7 +6115,7 @@ TEST(ExceptionOrder) {
templ->Set(v8_str("check"), v8::FunctionTemplate::New(isolate, JSCheck));
templ->Set(v8_str("CThrowCountDown"),
v8::FunctionTemplate::New(isolate, CThrowCountDown));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
CompileRun(
"function JSThrowCountDown(count, jsInterval, cInterval, expected) {"
" if (count == 0) throw 'FromJS';"
@@ -6186,7 +6180,7 @@ THREADED_TEST(ThrowValues) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("Throw"), v8::FunctionTemplate::New(isolate, ThrowValue));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
v8::Local<v8::Array> result = v8::Local<v8::Array>::Cast(
CompileRun("function Run(obj) {"
" try {"
@@ -6380,7 +6374,7 @@ TEST(TryCatchMixedNesting) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("TryCatchMixedNestingHelper"),
v8::FunctionTemplate::New(isolate, TryCatchMixedNestingHelper));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
CompileRunWithOrigin("TryCatchMixedNestingHelper();\n", "outer", 1, 1);
TryCatchMixedNestingCheck(&try_catch);
}
@@ -6402,7 +6396,7 @@ TEST(TryCatchNative) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("TryCatchNativeHelper"),
v8::FunctionTemplate::New(isolate, TryCatchNativeHelper));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
CompileRun("TryCatchNativeHelper();");
CHECK(!try_catch.HasCaught());
}
@@ -6427,7 +6421,7 @@ TEST(TryCatchNativeReset) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("TryCatchNativeResetHelper"),
v8::FunctionTemplate::New(isolate, TryCatchNativeResetHelper));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
CompileRun("TryCatchNativeResetHelper();");
CHECK(!try_catch.HasCaught());
}
@@ -6630,7 +6624,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
"obj, 'x');"
"prop.configurable;");
Local<Value> result = script_desc->Run(context.local()).ToLocalChecked();
- CHECK(result->BooleanValue(context.local()).FromJust());
+ CHECK(result->BooleanValue(isolate));
// Redefine get - but still configurable
Local<Script> script_define = v8_compile(
@@ -6643,7 +6637,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
// Check that the accessor is still configurable
result = script_desc->Run(context.local()).ToLocalChecked();
- CHECK(result->BooleanValue(context.local()).FromJust());
+ CHECK(result->BooleanValue(isolate));
// Redefine to a non-configurable
script_define = v8_compile(
@@ -6654,7 +6648,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
result = script_define->Run(context.local()).ToLocalChecked();
CHECK(result->Equals(context.local(), v8_num(43)).FromJust());
result = script_desc->Run(context.local()).ToLocalChecked();
- CHECK(!result->BooleanValue(context.local()).FromJust());
+ CHECK(!result->BooleanValue(isolate));
// Make sure that it is not possible to redefine again
v8::TryCatch try_catch(isolate);
@@ -6683,7 +6677,7 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
"obj, 'x');"
"prop.configurable;");
Local<Value> result = script_desc->Run(context.local()).ToLocalChecked();
- CHECK(result->BooleanValue(context.local()).FromJust());
+ CHECK(result->BooleanValue(isolate));
Local<Script> script_define = v8_compile(
"var desc = {get: function(){return 42; },"
@@ -6694,7 +6688,7 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
CHECK(result->Equals(context.local(), v8_num(42)).FromJust());
result = script_desc->Run(context.local()).ToLocalChecked();
- CHECK(result->BooleanValue(context.local()).FromJust());
+ CHECK(result->BooleanValue(isolate));
script_define = v8_compile(
"var desc = {get: function(){return 43; },"
@@ -6705,7 +6699,7 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
CHECK(result->Equals(context.local(), v8_num(43)).FromJust());
result = script_desc->Run(context.local()).ToLocalChecked();
- CHECK(!result->BooleanValue(context.local()).FromJust());
+ CHECK(!result->BooleanValue(isolate));
v8::TryCatch try_catch(isolate);
CHECK(script_define->Run(context.local()).IsEmpty());
@@ -7007,7 +7001,7 @@ THREADED_TEST(MultiContexts) {
Local<String> password = v8_str("Password");
// Create an environment
- LocalContext context0(0, templ);
+ LocalContext context0(nullptr, templ);
context0->SetSecurityToken(password);
v8::Local<v8::Object> global0 = context0->Global();
CHECK(global0->Set(context0.local(), v8_str("custom"), v8_num(1234))
@@ -7018,7 +7012,7 @@ THREADED_TEST(MultiContexts) {
.FromJust());
// Create an independent environment
- LocalContext context1(0, templ);
+ LocalContext context1(nullptr, templ);
context1->SetSecurityToken(password);
v8::Local<v8::Object> global1 = context1->Global();
CHECK(global1->Set(context1.local(), v8_str("custom"), v8_num(1234))
@@ -7034,7 +7028,7 @@ THREADED_TEST(MultiContexts) {
.FromJust());
// Now create a new context with the old global
- LocalContext context2(0, templ, global1);
+ LocalContext context2(nullptr, templ, global1);
context2->SetSecurityToken(password);
v8::Local<v8::Object> global2 = context2->Global();
CHECK(global1->Equals(context2.local(), global2).FromJust());
@@ -7290,7 +7284,7 @@ THREADED_TEST(GlobalObjectTemplate) {
Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
global_template->Set(v8_str("JSNI_Log"),
v8::FunctionTemplate::New(isolate, HandleLogDelegator));
- v8::Local<Context> context = Context::New(isolate, 0, global_template);
+ v8::Local<Context> context = Context::New(isolate, nullptr, global_template);
Context::Scope context_scope(context);
CompileRun("JSNI_Log('LOG')");
}
@@ -7377,8 +7371,9 @@ TEST(ExtensionWithSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
i::ScopedVector<char> extension_name(32);
i::SNPrintF(extension_name, "ext #%d", source_len);
- v8::RegisterExtension(new Extension(
- extension_name.start(), kEmbeddedExtensionSource, 0, 0, source_len));
+ v8::RegisterExtension(new Extension(extension_name.start(),
+ kEmbeddedExtensionSource, 0, nullptr,
+ source_len));
const char* extension_names[1] = {extension_name.start()};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7537,8 +7532,8 @@ class NativeFunctionExtension : public Extension {
v8::FunctionCallback fun = &Echo)
: Extension(name, source), function_(fun) {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name) {
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override {
return v8::FunctionTemplate::New(isolate, function_);
}
@@ -7668,8 +7663,8 @@ static void CallFun(const v8::FunctionCallbackInfo<v8::Value>& args) {
class FunctionExtension : public Extension {
public:
FunctionExtension() : Extension("functiontest", kExtensionTestScript) {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<String> name) override;
};
@@ -8013,7 +8008,7 @@ void v8::internal::heap::HeapTester::ResetWeakHandle(bool global_gc) {
object_a.handle.Reset(iso, a);
object_b.handle.Reset(iso, b);
if (global_gc) {
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
} else {
CcTest::CollectGarbage(i::NEW_SPACE);
}
@@ -8039,7 +8034,7 @@ void v8::internal::heap::HeapTester::ResetWeakHandle(bool global_gc) {
#endif
}
if (global_gc) {
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
} else {
CcTest::CollectGarbage(i::NEW_SPACE);
}
@@ -8639,8 +8634,8 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(0, str->WriteOneByte(isolate, nullptr, 0, 0,
String::NO_NULL_TERMINATION));
- CHECK_EQ(0,
- str->WriteUtf8(isolate, nullptr, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->WriteUtf8(isolate, nullptr, 0, nullptr,
+ String::NO_NULL_TERMINATION));
CHECK_EQ(0, str->Write(isolate, nullptr, 0, 0, String::NO_NULL_TERMINATION));
}
@@ -8946,6 +8941,49 @@ THREADED_TEST(ToArrayIndex) {
CHECK(index.IsEmpty());
}
+static v8::MaybeLocal<Value> PrepareStackTrace42(v8::Local<Context> context,
+ v8::Local<Value> error,
+ v8::Local<StackTrace> trace) {
+ return v8::Number::New(context->GetIsolate(), 42);
+}
+
+static v8::MaybeLocal<Value> PrepareStackTraceThrow(
+ v8::Local<Context> context, v8::Local<Value> error,
+ v8::Local<StackTrace> trace) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<String> message = v8_str("42");
+ isolate->ThrowException(v8::Exception::Error(message));
+ return v8::MaybeLocal<Value>();
+}
+
+THREADED_TEST(IsolatePrepareStackTrace) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetPrepareStackTraceCallback(PrepareStackTrace42);
+
+ v8::Local<Value> v = CompileRun("new Error().stack");
+
+ CHECK(v->IsNumber());
+ CHECK_EQ(v.As<v8::Number>()->Int32Value(context.local()).FromJust(), 42);
+}
+
+THREADED_TEST(IsolatePrepareStackTraceThrow) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetPrepareStackTraceCallback(PrepareStackTraceThrow);
+
+ v8::Local<Value> v = CompileRun("try { new Error().stack } catch (e) { e }");
+
+ CHECK(v->IsNativeError());
+
+ v8::Local<String> message = v8::Exception::CreateMessage(isolate, v)->Get();
+
+ CHECK(message->StrictEquals(v8_str("Uncaught Error: 42")));
+}
THREADED_TEST(ErrorConstruction) {
LocalContext context;
@@ -9297,7 +9335,7 @@ TEST(TryCatchFinallyStoresMessageUsingTryCatchHandler) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("CEvaluate"),
v8::FunctionTemplate::New(isolate, CEvaluate));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
v8::TryCatch try_catch(isolate);
CompileRun("try {"
" CEvaluate('throw 1;');"
@@ -9741,7 +9779,7 @@ TEST(ContextDetachGlobal) {
env2->DetachGlobal();
v8::Local<Context> env3 = Context::New(
- env1->GetIsolate(), 0, v8::Local<v8::ObjectTemplate>(), global2);
+ env1->GetIsolate(), nullptr, v8::Local<v8::ObjectTemplate>(), global2);
env3->SetSecurityToken(v8_str("bar"));
env3->Enter();
@@ -9821,7 +9859,7 @@ TEST(DetachGlobal) {
// Reuse global2 for env3.
v8::Local<Context> env3 = Context::New(
- env1->GetIsolate(), 0, v8::Local<v8::ObjectTemplate>(), global2);
+ env1->GetIsolate(), nullptr, v8::Local<v8::ObjectTemplate>(), global2);
CHECK(global2->Equals(env1.local(), env3->Global()).FromJust());
// Start by using the same security token for env3 as for env1 and env2.
@@ -9907,8 +9945,8 @@ TEST(DetachedAccesses) {
CHECK(v8_str("env2_x")->Equals(env1.local(), result).FromJust());
// Reattach env2's proxy
- env2 = Context::New(env1->GetIsolate(), 0, v8::Local<v8::ObjectTemplate>(),
- env2_global);
+ env2 = Context::New(env1->GetIsolate(), nullptr,
+ v8::Local<v8::ObjectTemplate>(), env2_global);
env2->SetSecurityToken(foo);
{
v8::Context::Scope scope(env2);
@@ -10259,12 +10297,9 @@ TEST(AccessControlES5) {
CHECK(global1->Set(context1, v8_str("other"), global0).FromJust());
// Regression test for issue 1154.
- CHECK(CompileRun("Object.keys(other).length == 1")
- ->BooleanValue(context1)
- .FromJust());
+ CHECK(CompileRun("Object.keys(other).length == 1")->BooleanValue(isolate));
CHECK(CompileRun("Object.keys(other)[0] == 'accessible_prop'")
- ->BooleanValue(context1)
- .FromJust());
+ ->BooleanValue(isolate));
CHECK(CompileRun("other.blocked_prop").IsEmpty());
// Regression test for issue 1027.
@@ -10354,12 +10389,12 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
value = CompileRun(
"var names = Object.getOwnPropertyNames(other);"
"names.length == 1 && names[0] == 'accessible_prop';");
- CHECK(value->BooleanValue(context1).FromJust());
+ CHECK(value->BooleanValue(isolate));
value = CompileRun(
"var names = Object.getOwnPropertyNames(object);"
"names.length == 1 && names[0] == 'accessible_prop';");
- CHECK(value->BooleanValue(context1).FromJust());
+ CHECK(value->BooleanValue(isolate));
context1->Exit();
context0->Exit();
@@ -10412,12 +10447,12 @@ THREADED_TEST(CrossDomainAccessors) {
func_template->PrototypeTemplate();
// Add an accessor to proto that's accessible by cross-domain JS code.
- proto_template->SetAccessor(v8_str("accessible"), ConstTenGetter, 0,
+ proto_template->SetAccessor(v8_str("accessible"), ConstTenGetter, nullptr,
v8::Local<Value>(), v8::ALL_CAN_READ);
// Add an accessor that is not accessible by cross-domain JS code.
- global_template->SetAccessor(v8_str("unreachable"), UnreachableGetter, 0,
- v8::Local<Value>(), v8::DEFAULT);
+ global_template->SetAccessor(v8_str("unreachable"), UnreachableGetter,
+ nullptr, v8::Local<Value>(), v8::DEFAULT);
v8::Local<Context> context0 = Context::New(isolate, nullptr, global_template);
context0->Enter();
@@ -10555,7 +10590,7 @@ TEST(AccessControlIC) {
// Force obj into slow case.
value = CompileRun("delete obj.prop");
- CHECK(value->BooleanValue(context1).FromJust());
+ CHECK(value->BooleanValue(isolate));
// Force inline caches into dictionary probing mode.
CompileRun("var o = { x: 0 }; delete o.x; testProp(o);");
// Test that the named access check is called.
@@ -10873,7 +10908,7 @@ THREADED_TEST(ShadowObject) {
Local<Value> value =
CompileRun("this.propertyIsEnumerable(0)");
CHECK(value->IsBoolean());
- CHECK(!value->BooleanValue(context.local()).FromJust());
+ CHECK(!value->BooleanValue(isolate));
value = CompileRun("x");
CHECK_EQ(12, value->Int32Value(context.local()).FromJust());
@@ -11335,8 +11370,7 @@ THREADED_TEST(FunctionReadOnlyPrototype) {
" descriptor = Object.getOwnPropertyDescriptor(func1, 'prototype');"
" return (descriptor['writable'] == false);"
"})()")
- ->BooleanValue(context.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(
42,
CompileRun("func1.prototype.x")->Int32Value(context.local()).FromJust());
@@ -11357,8 +11391,7 @@ THREADED_TEST(FunctionReadOnlyPrototype) {
" descriptor = Object.getOwnPropertyDescriptor(func2, 'prototype');"
" return (descriptor['writable'] == true);"
"})()")
- ->BooleanValue(context.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(
42,
CompileRun("func2.prototype.x")->Int32Value(context.local()).FromJust());
@@ -11405,9 +11438,7 @@ THREADED_TEST(FunctionRemovePrototype) {
Local<v8::Function> fun = t1->GetFunction(context.local()).ToLocalChecked();
CHECK(!fun->IsConstructor());
CHECK(context->Global()->Set(context.local(), v8_str("fun"), fun).FromJust());
- CHECK(!CompileRun("'prototype' in fun")
- ->BooleanValue(context.local())
- .FromJust());
+ CHECK(!CompileRun("'prototype' in fun")->BooleanValue(isolate));
v8::TryCatch try_catch(isolate);
CompileRun("new fun()");
@@ -11460,7 +11491,7 @@ THREADED_TEST(Constructor) {
i::Handle<i::JSReceiver> obj(v8::Utils::OpenHandle(*inst));
CHECK(obj->IsJSObject());
Local<Value> value = CompileRun("(new Fun()).constructor === Fun");
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
}
@@ -13022,17 +13053,16 @@ THREADED_TEST(Overriding) {
// Add 'h' as an accessor to the proto template with ReadOnly attributes
// so 'h' can be shadowed on the instance object.
Local<ObjectTemplate> child_proto_templ = child_templ->PrototypeTemplate();
- child_proto_templ->SetAccessor(v8_str("h"), ParentGetter, 0,
+ child_proto_templ->SetAccessor(v8_str("h"), ParentGetter, nullptr,
v8::Local<Value>(), v8::DEFAULT, v8::ReadOnly);
// Add 'i' as an accessor to the instance template with ReadOnly attributes
// but the attribute does not have effect because it is duplicated with
// nullptr setter.
- child_instance_templ->SetAccessor(v8_str("i"), ChildGetter, 0,
+ child_instance_templ->SetAccessor(v8_str("i"), ChildGetter, nullptr,
v8::Local<Value>(), v8::DEFAULT,
v8::ReadOnly);
-
// Instantiate the child template.
Local<v8::Object> instance = child_templ->GetFunction(context.local())
.ToLocalChecked()
@@ -13475,9 +13505,9 @@ THREADED_TEST(IsConstructCall) {
templ->GetFunction(context.local()).ToLocalChecked())
.FromJust());
Local<Value> value = v8_compile("f()")->Run(context.local()).ToLocalChecked();
- CHECK(!value->BooleanValue(context.local()).FromJust());
+ CHECK(!value->BooleanValue(isolate));
value = v8_compile("new f()")->Run(context.local()).ToLocalChecked();
- CHECK(value->BooleanValue(context.local()).FromJust());
+ CHECK(value->BooleanValue(isolate));
}
static void NewTargetHandler(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -13625,7 +13655,7 @@ TEST(ObjectProtoToStringES6) {
CHECK(value->IsString() && \
value->Equals(context.local(), v8_str("[object " #expected "]")) \
.FromJust()); \
- } while (0)
+ } while (false)
TEST_TOSTRINGTAG(Array, Object, Object);
TEST_TOSTRINGTAG(Object, Arguments, Arguments);
@@ -13664,7 +13694,7 @@ TEST(ObjectProtoToStringES6) {
CHECK(value->IsString() && \
value->Equals(context.local(), v8_str("[object " #expected "]")) \
.FromJust()); \
- } while (0)
+ } while (false)
#define TEST_TOSTRINGTAG_TYPES(tagValue) \
TEST_TOSTRINGTAG(Array, tagValue, Array); \
@@ -13701,7 +13731,7 @@ TEST(ObjectProtoToStringES6) {
obj = v8::Object::New(isolate);
obj.As<v8::Object>()
->SetAccessor(context.local(), toStringTag,
- SymbolAccessorGetterReturnsDefault, 0, v8_str("Test"))
+ SymbolAccessorGetterReturnsDefault, nullptr, v8_str("Test"))
.FromJust();
{
TryCatch try_catch(isolate);
@@ -14163,7 +14193,7 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ CcTest::CollectAllGarbage();
int count = GetGlobalObjectsCount();
CHECK_EQ(expected, count);
}
@@ -14260,9 +14290,7 @@ TEST(WeakCallbackApi) {
handle->SetWeak<v8::Persistent<v8::Object>>(
handle, WeakApiCallback, v8::WeakCallbackType::kParameter);
}
- reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask,
- i::GarbageCollectionReason::kTesting);
+ CcTest::PreciseCollectAllGarbage();
// Verify disposed.
CHECK_EQ(initial_handles, globals->global_handles_count());
}
@@ -15170,7 +15198,7 @@ THREADED_TEST(TryCatchSourceInfoForEOSError) {
CHECK(v8::Script::Compile(context.local(), v8_str("!\n")).IsEmpty());
CHECK(try_catch.HasCaught());
v8::Local<v8::Message> message = try_catch.Message();
- CHECK_EQ(1, message->GetLineNumber(context.local()).FromJust());
+ CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
CHECK_EQ(0, message->GetStartColumn(context.local()).FromJust());
}
@@ -15776,10 +15804,10 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
public:
explicit OneByteVectorResource(i::Vector<const char> vector)
: data_(vector) {}
- virtual ~OneByteVectorResource() {}
- virtual size_t length() const { return data_.length(); }
- virtual const char* data() const { return data_.start(); }
- virtual void Dispose() {}
+ ~OneByteVectorResource() override = default;
+ size_t length() const override { return data_.length(); }
+ const char* data() const override { return data_.start(); }
+ void Dispose() override {}
private:
i::Vector<const char> data_;
@@ -15790,10 +15818,10 @@ class UC16VectorResource : public v8::String::ExternalStringResource {
public:
explicit UC16VectorResource(i::Vector<const i::uc16> vector)
: data_(vector) {}
- virtual ~UC16VectorResource() {}
- virtual size_t length() const { return data_.length(); }
- virtual const i::uc16* data() const { return data_.start(); }
- virtual void Dispose() {}
+ ~UC16VectorResource() override = default;
+ size_t length() const override { return data_.length(); }
+ const i::uc16* data() const override { return data_.start(); }
+ void Dispose() override {}
private:
i::Vector<const i::uc16> data_;
@@ -15962,7 +15990,7 @@ class RegExpInterruptionThread : public v8::base::Thread {
explicit RegExpInterruptionThread(v8::Isolate* isolate)
: Thread(Options("TimeoutThread")), isolate_(isolate) {}
- virtual void Run() {
+ void Run() override {
for (v8::base::Relaxed_Store(&regexp_interruption_data.loop_count, 0);
v8::base::Relaxed_Load(&regexp_interruption_data.loop_count) < 7;
v8::base::Relaxed_AtomicIncrement(&regexp_interruption_data.loop_count,
@@ -16034,7 +16062,7 @@ TEST(ReadOnlyPropertyInGlobalProto) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
v8::Local<v8::Object> global = context->Global();
v8::Local<v8::Object> global_proto = v8::Local<v8::Object>::Cast(
global->Get(context.local(), v8_str("__proto__")).ToLocalChecked());
@@ -16629,9 +16657,9 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
i::ExternalArrayType array_type,
int64_t low, int64_t high) {
i::Handle<i::JSReceiver> jsobj = v8::Utils::OpenHandle(*obj);
- i::Isolate* isolate = jsobj->GetIsolate();
- obj->Set(context, v8_str("field"),
- v8::Int32::New(reinterpret_cast<v8::Isolate*>(isolate), 1503))
+ v8::Isolate* v8_isolate = context->GetIsolate();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ obj->Set(context, v8_str("field"), v8::Int32::New(v8_isolate, 1503))
.FromJust();
CHECK(context->Global()->Set(context, v8_str("ext_array"), obj).FromJust());
v8::Local<v8::Value> result = CompileRun("ext_array.field");
@@ -16751,7 +16779,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
"caught_exception;",
element_count);
result = CompileRun(test_buf.start());
- CHECK(!result->BooleanValue(context).FromJust());
+ CHECK(!result->BooleanValue(v8_isolate));
// Make sure out-of-range stores do not throw.
i::SNPrintF(test_buf,
@@ -16764,7 +16792,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
"caught_exception;",
element_count);
result = CompileRun(test_buf.start());
- CHECK(!result->BooleanValue(context).FromJust());
+ CHECK(!result->BooleanValue(v8_isolate));
// Check other boundary conditions, values and operations.
result = CompileRun("for (var i = 0; i < 8; i++) {"
@@ -16856,7 +16884,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
unsigned_data :
(is_pixel_data ? pixel_data : signed_data)));
result = CompileRun(test_buf.start());
- CHECK(result->BooleanValue(context).FromJust());
+ CHECK(result->BooleanValue(v8_isolate));
}
i::Handle<ExternalArrayClass> array(
@@ -17485,7 +17513,7 @@ TEST(CaptureStackTrace) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeStackInNativeCode"),
v8::FunctionTemplate::New(isolate, AnalyzeStackInNativeCode));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
// Test getting OVERVIEW information. Should ignore information that is not
// script name, function name, line number, and column offset.
@@ -17899,6 +17927,7 @@ int promise_reject_msg_column_number = -1;
int promise_reject_line_number = -1;
int promise_reject_column_number = -1;
int promise_reject_frame_count = -1;
+bool promise_reject_is_shared_cross_origin = false;
void PromiseRejectCallback(v8::PromiseRejectMessage reject_message) {
v8::Local<v8::Object> global = CcTest::global();
@@ -17920,6 +17949,8 @@ void PromiseRejectCallback(v8::PromiseRejectMessage reject_message) {
message->GetLineNumber(context).FromJust();
promise_reject_msg_column_number =
message->GetStartColumn(context).FromJust() + 1;
+ promise_reject_is_shared_cross_origin =
+ message->IsSharedCrossOrigin();
if (!stack_trace.IsEmpty()) {
promise_reject_frame_count = stack_trace->GetFrameCount();
@@ -18350,6 +18381,67 @@ TEST(PromiseRejectCallback) {
CHECK_EQ(7, promise_reject_msg_column_number);
}
+TEST(PromiseRejectIsSharedCrossOrigin) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetPromiseRejectCallback(PromiseRejectCallback);
+
+ ResetPromiseStates();
+
+ // Create promise p0.
+ CompileRun(
+ "var reject; \n"
+ "var p0 = new Promise( \n"
+ " function(res, rej) { \n"
+ " reject = rej; \n"
+ " } \n"
+ "); \n");
+ CHECK(!GetPromise("p0")->HasHandler());
+ CHECK_EQ(0, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+ // Not set because it's not yet rejected.
+ CHECK(!promise_reject_is_shared_cross_origin);
+
+ // Reject p0.
+ CompileRun("reject('ppp');");
+ CHECK_EQ(1, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+ // Not set because the ScriptOriginOptions is from the script.
+ CHECK(!promise_reject_is_shared_cross_origin);
+
+ ResetPromiseStates();
+
+ // Create promise p1
+ CompileRun(
+ "var reject; \n"
+ "var p1 = new Promise( \n"
+ " function(res, rej) { \n"
+ " reject = rej; \n"
+ " } \n"
+ "); \n");
+ CHECK(!GetPromise("p1")->HasHandler());
+ CHECK_EQ(0, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+ // Not set because it's not yet rejected.
+ CHECK(!promise_reject_is_shared_cross_origin);
+
+ // Add resolve handler (and default reject handler) to p1.
+ CompileRun("var p2 = p1.then(function(){});");
+ CHECK(GetPromise("p1")->HasHandler());
+ CHECK(!GetPromise("p2")->HasHandler());
+ CHECK_EQ(0, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+
+ // Reject p1.
+ CompileRun("reject('ppp');");
+ CHECK_EQ(1, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+ // Set because the event is from an empty script.
+ CHECK(promise_reject_is_shared_cross_origin);
+}
+
void PromiseRejectCallbackConstructError(
v8::PromiseRejectMessage reject_message) {
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
@@ -18403,7 +18495,7 @@ TEST(SourceURLInStackTrace) {
templ->Set(v8_str("AnalyzeStackOfEvalWithSourceURL"),
v8::FunctionTemplate::New(isolate,
AnalyzeStackOfEvalWithSourceURL));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
const char *source =
"function outer() {\n"
@@ -18447,7 +18539,7 @@ TEST(ScriptIdInStackTrace) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeScriptIdInStack"),
v8::FunctionTemplate::New(isolate, AnalyzeScriptIdInStack));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
v8::Local<v8::String> scriptSource = v8_str(
"function foo() {\n"
@@ -18486,7 +18578,7 @@ TEST(InlineScriptWithSourceURLInStackTrace) {
templ->Set(v8_str("AnalyzeStackOfInlineScriptWithSourceURL"),
v8::FunctionTemplate::New(
CcTest::isolate(), AnalyzeStackOfInlineScriptWithSourceURL));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
const char *source =
"function outer() {\n"
@@ -18901,7 +18993,7 @@ TEST(DynamicWithSourceURLInStackTrace) {
templ->Set(v8_str("AnalyzeStackOfDynamicScriptWithSourceURL"),
v8::FunctionTemplate::New(
CcTest::isolate(), AnalyzeStackOfDynamicScriptWithSourceURL));
- LocalContext context(0, templ);
+ LocalContext context(nullptr, templ);
const char *source =
"function outer() {\n"
@@ -19202,7 +19294,7 @@ TEST(NumberOfNativeContexts) {
}
for (size_t i = 0; i < kNumTestContexts; i++) {
context[i].Reset();
- CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CcTest::isolate()->GetHeapStatistics(&heap_statistics);
CHECK_EQ(kNumTestContexts - i - 1u,
heap_statistics.number_of_native_contexts());
@@ -19228,7 +19320,7 @@ TEST(NumberOfDetachedContexts) {
}
for (size_t i = 0; i < kNumTestContexts; i++) {
context[i].Reset();
- CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CcTest::isolate()->GetHeapStatistics(&heap_statistics);
CHECK_EQ(kNumTestContexts - i - 1u,
heap_statistics.number_of_detached_contexts());
@@ -19243,8 +19335,8 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
found_resource_[i] = false;
}
}
- virtual ~VisitorImpl() {}
- virtual void VisitExternalString(v8::Local<v8::String> string) {
+ ~VisitorImpl() override = default;
+ void VisitExternalString(v8::Local<v8::String> string) override {
if (!string->IsExternal()) {
CHECK(string->IsExternalOneByte());
return;
@@ -19772,7 +19864,8 @@ THREADED_TEST(FunctionGetInferredName) {
THREADED_TEST(FunctionGetDebugName) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
const char* code =
"var error = false;"
"function a() { this.x = 1; };"
@@ -19823,7 +19916,7 @@ THREADED_TEST(FunctionGetDebugName) {
.ToLocalChecked();
v8::Local<v8::Value> error =
env->Global()->Get(env.local(), v8_str("error")).ToLocalChecked();
- CHECK(!error->BooleanValue(env.local()).FromJust());
+ CHECK(!error->BooleanValue(isolate));
const char* functions[] = {"a", "display_a",
"b", "display_b",
"c", "c",
@@ -19840,20 +19933,20 @@ THREADED_TEST(FunctionGetDebugName) {
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(),
- v8::String::NewFromUtf8(env->GetIsolate(), functions[i * 2],
+ v8::String::NewFromUtf8(isolate, functions[i * 2],
v8::NewStringType::kNormal)
.ToLocalChecked())
.ToLocalChecked());
CHECK_EQ(0, strcmp(functions[i * 2 + 1],
- *v8::String::Utf8Value(env->GetIsolate(),
- f->GetDebugName())));
+ *v8::String::Utf8Value(isolate, f->GetDebugName())));
}
}
THREADED_TEST(FunctionGetDisplayName) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
const char* code = "var error = false;"
"function a() { this.x = 1; };"
"a.displayName = 'display_a';"
@@ -19907,18 +20000,17 @@ THREADED_TEST(FunctionGetDisplayName) {
env->Global()->Get(env.local(), v8_str("f")).ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
env->Global()->Get(env.local(), v8_str("g")).ToLocalChecked());
- CHECK(!error->BooleanValue(env.local()).FromJust());
- CHECK_EQ(0, strcmp("display_a", *v8::String::Utf8Value(env->GetIsolate(),
- a->GetDisplayName())));
- CHECK_EQ(0, strcmp("display_b", *v8::String::Utf8Value(env->GetIsolate(),
- b->GetDisplayName())));
+ CHECK(!error->BooleanValue(isolate));
+ CHECK_EQ(0, strcmp("display_a",
+ *v8::String::Utf8Value(isolate, a->GetDisplayName())));
+ CHECK_EQ(0, strcmp("display_b",
+ *v8::String::Utf8Value(isolate, b->GetDisplayName())));
CHECK(c->GetDisplayName()->IsUndefined());
CHECK(d->GetDisplayName()->IsUndefined());
CHECK(e->GetDisplayName()->IsUndefined());
CHECK(f->GetDisplayName()->IsUndefined());
- CHECK_EQ(
- 0, strcmp("set_in_runtime", *v8::String::Utf8Value(env->GetIsolate(),
- g->GetDisplayName())));
+ CHECK_EQ(0, strcmp("set_in_runtime",
+ *v8::String::Utf8Value(isolate, g->GetDisplayName())));
}
@@ -20313,7 +20405,7 @@ void PrologueCallbackAlloc(v8::Isolate* isolate,
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
- CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
}
@@ -20332,7 +20424,7 @@ void EpilogueCallbackAlloc(v8::Isolate* isolate,
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
- CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
}
@@ -20455,7 +20547,7 @@ TEST(GCCallbacks) {
CHECK_EQ(0, epilogue_call_count_alloc);
isolate->AddGCPrologueCallback(PrologueCallbackAlloc);
isolate->AddGCEpilogueCallback(EpilogueCallbackAlloc);
- CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_EQ(1, prologue_call_count_alloc);
CHECK_EQ(1, epilogue_call_count_alloc);
isolate->RemoveGCPrologueCallback(PrologueCallbackAlloc);
@@ -20998,7 +21090,7 @@ class IsolateThread : public v8::base::Thread {
explicit IsolateThread(int fib_limit)
: Thread(Options("IsolateThread")), fib_limit_(fib_limit), result_(0) {}
- void Run() {
+ void Run() override {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -21078,7 +21170,7 @@ class InitDefaultIsolateThread : public v8::base::Thread {
testCase_(testCase),
result_(false) {}
- void Run() {
+ void Run() override {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
switch (testCase_) {
@@ -21268,8 +21360,8 @@ class Visitor42 : public v8::PersistentHandleVisitor {
explicit Visitor42(v8::Persistent<v8::Object>* object)
: counter_(0), object_(object) { }
- virtual void VisitPersistentHandle(Persistent<Value>* value,
- uint16_t class_id) {
+ void VisitPersistentHandle(Persistent<Value>* value,
+ uint16_t class_id) override {
if (class_id != 42) return;
CHECK_EQ(42, value->WrapperClassId());
v8::Isolate* isolate = CcTest::isolate();
@@ -21765,7 +21857,7 @@ TEST(HasOwnProperty) {
{ // Check named query interceptors.
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- 0, 0, HasOwnPropertyNamedPropertyQuery));
+ nullptr, nullptr, HasOwnPropertyNamedPropertyQuery));
Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
CHECK(!instance->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
@@ -21773,7 +21865,7 @@ TEST(HasOwnProperty) {
{ // Check indexed query interceptors.
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- 0, 0, HasOwnPropertyIndexedPropertyQuery));
+ nullptr, nullptr, HasOwnPropertyIndexedPropertyQuery));
Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(instance->HasOwnProperty(env.local(), v8_str("42")).FromJust());
CHECK(instance->HasOwnProperty(env.local(), 42).FromJust());
@@ -21790,7 +21882,7 @@ TEST(HasOwnProperty) {
{ // Check that query wins on disagreement.
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- HasOwnPropertyNamedPropertyGetter, 0,
+ HasOwnPropertyNamedPropertyGetter, nullptr,
HasOwnPropertyNamedPropertyQuery2));
Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(!instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
@@ -21806,7 +21898,7 @@ TEST(HasOwnProperty) {
"var dyn_string = 'this string ';"
"dyn_string += 'does not exist elsewhere';"
"({}).hasOwnProperty.call(obj, dyn_string)";
- CHECK(CompileRun(src)->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun(src)->BooleanValue(isolate));
}
}
@@ -21824,14 +21916,14 @@ TEST(IndexedInterceptorWithStringProto) {
.FromJust());
CompileRun("var s = new String('foobar'); obj.__proto__ = s;");
// These should be intercepted.
- CHECK(CompileRun("42 in obj")->BooleanValue(context.local()).FromJust());
- CHECK(CompileRun("'42' in obj")->BooleanValue(context.local()).FromJust());
+ CHECK(CompileRun("42 in obj")->BooleanValue(isolate));
+ CHECK(CompileRun("'42' in obj")->BooleanValue(isolate));
// These should fall through to the String prototype.
- CHECK(CompileRun("0 in obj")->BooleanValue(context.local()).FromJust());
- CHECK(CompileRun("'0' in obj")->BooleanValue(context.local()).FromJust());
+ CHECK(CompileRun("0 in obj")->BooleanValue(isolate));
+ CHECK(CompileRun("'0' in obj")->BooleanValue(isolate));
// And these should both fail.
- CHECK(!CompileRun("32 in obj")->BooleanValue(context.local()).FromJust());
- CHECK(!CompileRun("'32' in obj")->BooleanValue(context.local()).FromJust());
+ CHECK(!CompileRun("32 in obj")->BooleanValue(isolate));
+ CHECK(!CompileRun("'32' in obj")->BooleanValue(isolate));
}
@@ -22008,7 +22100,7 @@ static int CountLiveMapsInMapCache(i::Context* context) {
int length = map_cache->length();
int count = 0;
for (int i = 0; i < length; i++) {
- if (map_cache->Get(i)->IsWeakHeapObject()) count++;
+ if (map_cache->Get(i)->IsWeak()) count++;
}
return count;
}
@@ -22034,7 +22126,7 @@ THREADED_TEST(Regress1516) {
CHECK_LE(1, elements);
// We have to abort incremental marking here to abandon black pages.
- CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_GT(elements, CountLiveMapsInMapCache(CcTest::i_isolate()->context()));
}
@@ -23585,7 +23677,7 @@ THREADED_TEST(JSONStringifyObjectWithGap) {
class ThreadInterruptTest {
public:
ThreadInterruptTest() : sem_(0), sem_value_(0) { }
- ~ThreadInterruptTest() {}
+ ~ThreadInterruptTest() = default;
void RunTest() {
InterruptThread i_thread(this);
@@ -23603,7 +23695,7 @@ class ThreadInterruptTest {
explicit InterruptThread(ThreadInterruptTest* test)
: Thread(Options("InterruptThread")), test_(test) {}
- virtual void Run() {
+ void Run() override {
struct sigaction action;
// Ensure that we'll enter waiting condition
@@ -23798,8 +23890,9 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("JSON.stringify(other)");
CheckCorrectThrow("has_own_property(other, 'x')");
CheckCorrectThrow("%GetProperty(other, 'x')");
- CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 0)");
+ CheckCorrectThrow("%SetKeyedProperty(other, 'x', 'foo', 0)");
CheckCorrectThrow("%AddNamedProperty(other, 'x', 'foo', 1)");
+ CheckCorrectThrow("%SetNamedProperty(other, 'y', 'foo', 1)");
STATIC_ASSERT(static_cast<int>(i::LanguageMode::kSloppy) == 0);
STATIC_ASSERT(static_cast<int>(i::LanguageMode::kStrict) == 1);
CheckCorrectThrow("%DeleteProperty(other, 'x', 0)"); // 0 == SLOPPY
@@ -23932,7 +24025,7 @@ class RequestInterruptTestBase {
should_continue_(true) {
}
- virtual ~RequestInterruptTestBase() { }
+ virtual ~RequestInterruptTestBase() = default;
virtual void StartInterruptThread() = 0;
@@ -23987,9 +24080,7 @@ class RequestInterruptTestBaseWithSimpleInterrupt
public:
RequestInterruptTestBaseWithSimpleInterrupt() : i_thread(this) { }
- virtual void StartInterruptThread() {
- i_thread.Start();
- }
+ void StartInterruptThread() override { i_thread.Start(); }
private:
class InterruptThread : public v8::base::Thread {
@@ -23997,7 +24088,7 @@ class RequestInterruptTestBaseWithSimpleInterrupt
explicit InterruptThread(RequestInterruptTestBase* test)
: Thread(Options("RequestInterruptTest")), test_(test) {}
- virtual void Run() {
+ void Run() override {
test_->sem_.Wait();
test_->isolate_->RequestInterrupt(&OnInterrupt, test_);
}
@@ -24018,7 +24109,7 @@ class RequestInterruptTestBaseWithSimpleInterrupt
class RequestInterruptTestWithFunctionCall
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
- virtual void TestBody() {
+ void TestBody() override {
Local<Function> func = Function::New(env_.local(), ShouldContinueCallback,
v8::External::New(isolate_, this))
.ToLocalChecked();
@@ -24034,7 +24125,7 @@ class RequestInterruptTestWithFunctionCall
class RequestInterruptTestWithMethodCall
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
- virtual void TestBody() {
+ void TestBody() override {
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
v8::Local<v8::Template> proto = t->PrototypeTemplate();
proto->Set(v8_str("shouldContinue"),
@@ -24053,7 +24144,7 @@ class RequestInterruptTestWithMethodCall
class RequestInterruptTestWithAccessor
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
- virtual void TestBody() {
+ void TestBody() override {
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
v8::Local<v8::Template> proto = t->PrototypeTemplate();
proto->SetAccessorProperty(v8_str("shouldContinue"), FunctionTemplate::New(
@@ -24071,7 +24162,7 @@ class RequestInterruptTestWithAccessor
class RequestInterruptTestWithNativeAccessor
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
- virtual void TestBody() {
+ void TestBody() override {
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
t->InstanceTemplate()->SetNativeDataProperty(
v8_str("shouldContinue"), &ShouldContinueNativeGetter, nullptr,
@@ -24099,7 +24190,7 @@ class RequestInterruptTestWithNativeAccessor
class RequestInterruptTestWithMethodCallAndInterceptor
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
- virtual void TestBody() {
+ void TestBody() override {
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
v8::Local<v8::Template> proto = t->PrototypeTemplate();
proto->Set(v8_str("shouldContinue"),
@@ -24126,7 +24217,7 @@ class RequestInterruptTestWithMethodCallAndInterceptor
class RequestInterruptTestWithMathAbs
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
- virtual void TestBody() {
+ void TestBody() override {
env_->Global()
->Set(env_.local(), v8_str("WakeUpInterruptor"),
Function::New(env_.local(), WakeUpInterruptorCallback,
@@ -24164,9 +24255,7 @@ class RequestInterruptTestWithMathAbs
private:
static void WakeUpInterruptorCallback(
const v8::FunctionCallbackInfo<Value>& info) {
- if (!info[0]
- ->BooleanValue(info.GetIsolate()->GetCurrentContext())
- .FromJust()) {
+ if (!info[0]->BooleanValue(info.GetIsolate())) {
return;
}
@@ -24220,11 +24309,9 @@ class RequestMultipleInterrupts : public RequestInterruptTestBase {
public:
RequestMultipleInterrupts() : i_thread(this), counter_(0) {}
- virtual void StartInterruptThread() {
- i_thread.Start();
- }
+ void StartInterruptThread() override { i_thread.Start(); }
- virtual void TestBody() {
+ void TestBody() override {
Local<Function> func = Function::New(env_.local(), ShouldContinueCallback,
v8::External::New(isolate_, this))
.ToLocalChecked();
@@ -24242,7 +24329,7 @@ class RequestMultipleInterrupts : public RequestInterruptTestBase {
explicit InterruptThread(RequestMultipleInterrupts* test)
: Thread(Options("RequestInterruptTest")), test_(test) {}
- virtual void Run() {
+ void Run() override {
test_->sem_.Wait();
for (int i = 0; i < NUM_INTERRUPTS; i++) {
test_->isolate_->RequestInterrupt(&OnInterrupt, test_);
@@ -24369,7 +24456,8 @@ TEST(Regress239669) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessor(v8_str("x"), 0, SetterWhichExpectsThisAndHolderToDiffer);
+ templ->SetAccessor(v8_str("x"), nullptr,
+ SetterWhichExpectsThisAndHolderToDiffer);
CHECK(context->Global()
->Set(context.local(), v8_str("P"),
templ->NewInstance(context.local()).ToLocalChecked())
@@ -25591,7 +25679,7 @@ class TestSourceStream : public v8::ScriptCompiler::ExternalSourceStream {
public:
explicit TestSourceStream(const char** chunks) : chunks_(chunks), index_(0) {}
- virtual size_t GetMoreData(const uint8_t** src) {
+ size_t GetMoreData(const uint8_t** src) override {
// Unlike in real use cases, this function will never block.
if (chunks_[index_] == nullptr) {
return 0;
@@ -26466,58 +26554,6 @@ TEST(TurboAsmDisablesNeuter) {
CHECK(!result->IsNeuterable());
}
-TEST(GetPrototypeAccessControl) {
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
-
- v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
- obj_template->SetAccessCheckCallback(AccessAlwaysBlocked);
-
- CHECK(env->Global()
- ->Set(env.local(), v8_str("prohibited"),
- obj_template->NewInstance(env.local()).ToLocalChecked())
- .FromJust());
-
- CHECK(CompileRun(
- "function f() { return %_GetPrototype(prohibited); }"
- "%OptimizeFunctionOnNextCall(f);"
- "f();")->IsNull());
-}
-
-
-TEST(GetPrototypeHidden) {
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
-
- Local<FunctionTemplate> t = FunctionTemplate::New(isolate);
- t->SetHiddenPrototype(true);
- Local<Object> proto = t->GetFunction(env.local())
- .ToLocalChecked()
- ->NewInstance(env.local())
- .ToLocalChecked();
- Local<Object> object = Object::New(isolate);
- Local<Object> proto2 = Object::New(isolate);
- object->SetPrototype(env.local(), proto).FromJust();
- proto->SetPrototype(env.local(), proto2).FromJust();
-
- CHECK(env->Global()->Set(env.local(), v8_str("object"), object).FromJust());
- CHECK(env->Global()->Set(env.local(), v8_str("proto"), proto).FromJust());
- CHECK(env->Global()->Set(env.local(), v8_str("proto2"), proto2).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("%_GetPrototype(object)");
- CHECK(result->Equals(env.local(), proto2).FromJust());
-
- result = CompileRun(
- "function f() { return %_GetPrototype(object); }"
- "%OptimizeFunctionOnNextCall(f);"
- "f()");
- CHECK(result->Equals(env.local(), proto2).FromJust());
-}
-
TEST(ClassPrototypeCreationContext) {
v8::Isolate* isolate = CcTest::isolate();
@@ -27146,35 +27182,32 @@ THREADED_TEST(ReceiverConversionForAccessors) {
Local<v8::Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("p"), instance).FromJust());
- CHECK(CompileRun("(p.acc == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(p.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(p.acc == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(p.acc = 7) == 7")->BooleanValue(isolate));
CHECK(!CompileRun("Number.prototype.__proto__ = p;"
"var a = 1;")
.IsEmpty());
- CHECK(CompileRun("(a.acc == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(a.acc == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(isolate));
CHECK(!CompileRun("Boolean.prototype.__proto__ = p;"
"var a = true;")
.IsEmpty());
- CHECK(CompileRun("(a.acc == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(a.acc == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(isolate));
CHECK(!CompileRun("String.prototype.__proto__ = p;"
"var a = 'foo';")
.IsEmpty());
- CHECK(CompileRun("(a.acc == 42)")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(a.acc == 42)")->BooleanValue(isolate));
+ CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(isolate));
- CHECK(CompileRun("acc.call(1) == 42")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("acc.call(true)==42")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("acc.call('aa')==42")->BooleanValue(env.local()).FromJust());
- CHECK(
- CompileRun("acc.call(null) == 42")->BooleanValue(env.local()).FromJust());
- CHECK(CompileRun("acc.call(undefined) == 42")
- ->BooleanValue(env.local())
- .FromJust());
+ CHECK(CompileRun("acc.call(1) == 42")->BooleanValue(isolate));
+ CHECK(CompileRun("acc.call(true)==42")->BooleanValue(isolate));
+ CHECK(CompileRun("acc.call('aa')==42")->BooleanValue(isolate));
+ CHECK(CompileRun("acc.call(null) == 42")->BooleanValue(isolate));
+ CHECK(CompileRun("acc.call(undefined) == 42")->BooleanValue(isolate));
}
class FutexInterruptionThread : public v8::base::Thread {
@@ -27182,7 +27215,7 @@ class FutexInterruptionThread : public v8::base::Thread {
explicit FutexInterruptionThread(v8::Isolate* isolate)
: Thread(Options("FutexInterruptionThread")), isolate_(isolate) {}
- virtual void Run() {
+ void Run() override {
// Wait a bit before terminating.
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
isolate_->TerminateExecution();
@@ -27622,7 +27655,7 @@ class MemoryPressureThread : public v8::base::Thread {
isolate_(isolate),
level_(level) {}
- virtual void Run() { isolate_->MemoryPressureNotification(level_); }
+ void Run() override { isolate_->MemoryPressureNotification(level_); }
private:
v8::Isolate* isolate_;
@@ -27679,13 +27712,13 @@ TEST(SetIntegrityLevel) {
CHECK(context->Global()->Set(context.local(), v8_str("o"), obj).FromJust());
v8::Local<v8::Value> is_frozen = CompileRun("Object.isFrozen(o)");
- CHECK(!is_frozen->BooleanValue(context.local()).FromJust());
+ CHECK(!is_frozen->BooleanValue(isolate));
CHECK(obj->SetIntegrityLevel(context.local(), v8::IntegrityLevel::kFrozen)
.FromJust());
is_frozen = CompileRun("Object.isFrozen(o)");
- CHECK(is_frozen->BooleanValue(context.local()).FromJust());
+ CHECK(is_frozen->BooleanValue(isolate));
}
TEST(PrivateForApiIsNumber) {
@@ -27851,7 +27884,7 @@ THREADED_TEST(ImmutableProtoGlobal) {
v8::HandleScope handle_scope(isolate);
Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
global_template->SetImmutableProto();
- v8::Local<Context> context = Context::New(isolate, 0, global_template);
+ v8::Local<Context> context = Context::New(isolate, nullptr, global_template);
Context::Scope context_scope(context);
v8::Local<Value> result = CompileRun(
"global = this;"
@@ -27871,7 +27904,7 @@ THREADED_TEST(MutableProtoGlobal) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
- v8::Local<Context> context = Context::New(isolate, 0, global_template);
+ v8::Local<Context> context = Context::New(isolate, nullptr, global_template);
Context::Scope context_scope(context);
v8::Local<Value> result = CompileRun(
"global = this;"
@@ -28445,7 +28478,7 @@ class StopAtomicsWaitThread : public v8::base::Thread {
explicit StopAtomicsWaitThread(AtomicsWaitCallbackInfo* info)
: Thread(Options("StopAtomicsWaitThread")), info_(info) {}
- virtual void Run() {
+ void Run() override {
CHECK_NOT_NULL(info_->wake_handle);
info_->wake_handle->Wake();
}
@@ -28778,216 +28811,28 @@ TEST(TestSetWasmThreadsEnabledCallback) {
CHECK(i_isolate->AreWasmThreadsEnabled(i_context));
}
-TEST(PreviewSetIteratorEntriesWithDeleted) {
+TEST(TestGetEmbeddedCodeRange) {
LocalContext env;
- v8::HandleScope handle_scope(env->GetIsolate());
- v8::Local<v8::Context> context = env.local();
+ v8::Isolate* isolate = env->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- {
- // Create set, delete entry, create iterator, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); set.delete(1); set.keys()")
- ->ToObject(context)
- .ToLocalChecked();
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create set, create iterator, delete entry, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); set.keys()")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1);");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create set, create iterator, delete entry, iterate, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); var it = set.keys(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(1, entries->Length());
- CHECK_EQ(3, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create set, create iterator, delete entry, iterate until empty, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); var it = set.keys(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1); it.next(); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(0, entries->Length());
- }
- {
- // Create set, create iterator, delete entry, iterate, trigger rehash,
- // preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); var it = set.keys(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1); it.next();");
- CompileRun("for (var i = 4; i < 20; i++) set.add(i);");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(17, entries->Length());
- for (uint32_t i = 0; i < 17; i++) {
- CHECK_EQ(i + 3, entries->Get(context, i)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- }
-}
+ v8::MemoryRange builtins_range = isolate->GetEmbeddedCodeRange();
-TEST(PreviewMapIteratorEntriesWithDeleted) {
- LocalContext env;
- v8::HandleScope handle_scope(env->GetIsolate());
- v8::Local<v8::Context> context = env.local();
+ // Check that each off-heap builtin is within the builtins code range.
+ if (i::FLAG_embedded_builtins) {
+ for (int id = 0; id < i::Builtins::builtin_count; id++) {
+ if (!i::Builtins::IsIsolateIndependent(id)) continue;
+ i::Code* builtin = i_isolate->builtins()->builtin(id);
+ i::Address start = builtin->InstructionStart();
+ i::Address end = start + builtin->InstructionSize();
- {
- // Create map, delete entry, create iterator, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = {}; map.set(key, 1);"
- "map.set({}, 2); map.set({}, 3);"
- "map.delete(key);"
- "map.values()")
- ->ToObject(context)
- .ToLocalChecked();
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create map, create iterator, delete entry, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = {}; map.set(key, 1);"
- "map.set({}, 2); map.set({}, 3);"
- "map.values()")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("map.delete(key);");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create map, create iterator, delete entry, iterate, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = {}; map.set(key, 1);"
- "map.set({}, 2); map.set({}, 3);"
- "var it = map.values(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("map.delete(key); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(1, entries->Length());
- CHECK_EQ(3, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create map, create iterator, delete entry, iterate until empty, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = {}; map.set(key, 1);"
- "map.set({}, 2); map.set({}, 3);"
- "var it = map.values(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("map.delete(key); it.next(); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(0, entries->Length());
- }
- {
- // Create map, create iterator, delete entry, iterate, trigger rehash,
- // preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = {}; map.set(key, 1);"
- "map.set({}, 2); map.set({}, 3);"
- "var it = map.values(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("map.delete(key); it.next();");
- CompileRun("for (var i = 4; i < 20; i++) map.set({}, i);");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(17, entries->Length());
- for (uint32_t i = 0; i < 17; i++) {
- CHECK_EQ(i + 3, entries->Get(context, i)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
+ i::Address builtins_start =
+ reinterpret_cast<i::Address>(builtins_range.start);
+ CHECK(start >= builtins_start &&
+ end < builtins_start + builtins_range.length_in_bytes);
}
+ } else {
+ CHECK_EQ(nullptr, builtins_range.start);
+ CHECK_EQ(0, builtins_range.length_in_bytes);
}
}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index c0f8e171c7..f2ca5c01e5 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -209,7 +209,8 @@ static void InitializeVM() {
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
-#define TEARDOWN() CHECK(v8::internal::FreePages(buf, allocated));
+#define TEARDOWN() \
+ CHECK(v8::internal::FreePages(GetPlatformPageAllocator(), buf, allocated));
#endif // ifdef USE_SIMULATOR.
@@ -15068,9 +15069,6 @@ TEST(default_nan_double) {
TEST(call_no_relocation) {
- Address call_start;
- Address return_address;
-
INIT_V8();
SETUP();
@@ -15091,9 +15089,7 @@ TEST(call_no_relocation) {
__ Push(lr, xzr);
{
Assembler::BlockConstPoolScope scope(&masm);
- call_start = buf_addr + __ pc_offset();
__ Call(buf_addr + function.pos(), RelocInfo::NONE);
- return_address = buf_addr + __ pc_offset();
}
__ Pop(xzr, lr);
END();
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 785ffa2fa3..ebae2e9ed5 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -85,7 +85,7 @@ TEST(MIPS1) {
Label L, C;
__ mov(a1, a0);
- __ li(v0, 0);
+ __ li(v0, 0l);
__ b(&C);
__ nop();
@@ -2316,7 +2316,7 @@ TEST(movt_movd) {
__ Lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)));
__ cfc1(t0, FCSR);
__ ctc1(t1, FCSR);
- __ li(t2, 0x0);
+ __ li(t2, 0x0l);
__ mtc1(t2, f12);
__ mtc1(t2, f10);
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)));
@@ -5421,7 +5421,7 @@ uint64_t run_jic(int16_t offset) {
Label get_program_counter, stop_execution;
__ push(ra);
- __ li(v0, 0);
+ __ li(v0, 0l);
__ li(t1, 0x66);
__ addiu(v0, v0, 0x1); // <-- offset = -32
@@ -5496,7 +5496,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
v8::internal::CodeObjectRequired::kYes);
Label stop_execution;
- __ li(v0, 0);
+ __ li(v0, 0l);
__ li(t1, 0x66);
__ addiu(v0, v0, 0x1); // <-- offset = -8
@@ -5755,7 +5755,7 @@ uint64_t run_jialc(int16_t offset) {
Label main_block, get_program_counter;
__ push(ra);
- __ li(v0, 0);
+ __ li(v0, 0l);
__ beq(v0, v0, &main_block);
__ nop();
@@ -5980,8 +5980,8 @@ int64_t run_bc(int32_t offset) {
Label continue_1, stop_execution;
__ push(ra);
- __ li(v0, 0);
- __ li(t8, 0);
+ __ li(v0, 0l);
+ __ li(t8, 0l);
__ li(t9, 2); // Condition for the stopping execution.
for (int32_t i = -100; i <= -11; ++i) {
@@ -6060,8 +6060,8 @@ int64_t run_balc(int32_t offset) {
Label continue_1, stop_execution;
__ push(ra);
- __ li(v0, 0);
- __ li(t8, 0);
+ __ li(v0, 0l);
+ __ li(t8, 0l);
__ li(t9, 2); // Condition for stopping execution.
__ beq(t8, t8, &continue_1);
@@ -7072,7 +7072,7 @@ void run_msa_ctc_cfc(uint64_t value) {
MSAControlRegister msareg = {kMSACSRRegister};
__ li(t0, value);
- __ li(t2, 0);
+ __ li(t2, 0l);
__ cfcmsa(t1, msareg);
__ ctcmsa(msareg, t0);
__ cfcmsa(t2, msareg);
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 7ecef4429c..a340322bd6 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -1068,7 +1068,7 @@ TEST(AssemblerX64FMA_sd) {
// - xmm0 * xmm1 + xmm2
__ movaps(xmm3, xmm0);
__ mulsd(xmm3, xmm1);
- __ Move(xmm4, (uint64_t)1 << 63);
+ __ Move(xmm4, static_cast<uint64_t>(1) << 63);
__ xorpd(xmm3, xmm4);
__ addsd(xmm3, xmm2); // Expected result in xmm3
@@ -1117,7 +1117,7 @@ TEST(AssemblerX64FMA_sd) {
// - xmm0 * xmm1 - xmm2
__ movaps(xmm3, xmm0);
__ mulsd(xmm3, xmm1);
- __ Move(xmm4, (uint64_t)1 << 63);
+ __ Move(xmm4, static_cast<uint64_t>(1) << 63);
__ xorpd(xmm3, xmm4);
__ subsd(xmm3, xmm2); // Expected result in xmm3
@@ -1294,7 +1294,7 @@ TEST(AssemblerX64FMA_ss) {
// - xmm0 * xmm1 + xmm2
__ movaps(xmm3, xmm0);
__ mulss(xmm3, xmm1);
- __ Move(xmm4, (uint32_t)1 << 31);
+ __ Move(xmm4, static_cast<uint32_t>(1) << 31);
__ xorps(xmm3, xmm4);
__ addss(xmm3, xmm2); // Expected result in xmm3
@@ -1343,7 +1343,7 @@ TEST(AssemblerX64FMA_ss) {
// - xmm0 * xmm1 - xmm2
__ movaps(xmm3, xmm0);
__ mulss(xmm3, xmm1);
- __ Move(xmm4, (uint32_t)1 << 31);
+ __ Move(xmm4, static_cast<uint32_t>(1) << 31);
__ xorps(xmm3, xmm4);
__ subss(xmm3, xmm2); // Expected result in xmm3
diff --git a/deps/v8/test/cctest/test-circular-queue.cc b/deps/v8/test/cctest/test-circular-queue.cc
index a5bcb486af..85ab4c4fad 100644
--- a/deps/v8/test/cctest/test-circular-queue.cc
+++ b/deps/v8/test/cctest/test-circular-queue.cc
@@ -113,7 +113,7 @@ class ProducerThread: public v8::base::Thread {
value_(value),
finished_(finished) {}
- virtual void Run() {
+ void Run() override {
for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
Record* rec = reinterpret_cast<Record*>(scq_->StartEnqueue());
CHECK(rec);
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 2036e13450..ffe9200eee 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -907,9 +907,12 @@ TEST(TransitionLookup) {
}
}
- CHECK(root_map->raw_transitions()->ToStrongHeapObject()->IsTransitionArray());
+ CHECK(root_map->raw_transitions()
+ ->GetHeapObjectAssumeStrong()
+ ->IsTransitionArray());
Handle<TransitionArray> transitions(
- TransitionArray::cast(root_map->raw_transitions()->ToStrongHeapObject()),
+ TransitionArray::cast(
+ root_map->raw_transitions()->GetHeapObjectAssumeStrong()),
isolate);
DCHECK(transitions->IsSortedNoDuplicates());
@@ -2495,7 +2498,7 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
Node* const context = m.Parameter(kNumParams + 2);
Node* const native_context = m.LoadNativeContext(context);
- Node* const map = m.LoadRoot(Heap::kPromiseCapabilityMapRootIndex);
+ Node* const map = m.LoadRoot(RootIndex::kPromiseCapabilityMap);
Node* const capability = m.AllocateStruct(map);
m.StoreObjectFieldNoWriteBarrier(
capability, PromiseCapability::kPromiseOffset, m.UndefinedConstant());
@@ -3495,6 +3498,37 @@ TEST(TestCallBuiltinIndirectLoad) {
Handle<String>::cast(result.ToHandleChecked())));
}
+TEST(TestGotoIfDebugExecutionModeChecksSideEffects) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ Label is_true(&m), is_false(&m);
+ m.GotoIfDebugExecutionModeChecksSideEffects(&is_true);
+ m.Goto(&is_false);
+ m.BIND(&is_false);
+ m.Return(m.BooleanConstant(false));
+
+ m.BIND(&is_true);
+ m.Return(m.BooleanConstant(true));
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+
+ CHECK(isolate->debug_execution_mode() != DebugInfo::kSideEffects);
+
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ CHECK(result->IsBoolean());
+ CHECK_EQ(false, result->BooleanValue(isolate));
+
+ isolate->debug()->StartSideEffectCheckMode();
+ CHECK(isolate->debug_execution_mode() == DebugInfo::kSideEffects);
+
+ result = ft.Call().ToHandleChecked();
+ CHECK(result->IsBoolean());
+ CHECK_EQ(true, result->BooleanValue(isolate));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index 54f53e57c3..d9bfe9bb17 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -67,7 +67,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (; reg_num < Register::kNumRegisters; ++reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
+ if (GetRegConfig()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
if (reg != esp && reg != ebp && reg != destination_reg) {
__ push(reg);
@@ -88,7 +88,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
+ if (GetRegConfig()->IsAllocatableGeneralCode(reg_num)) {
Register reg = Register::from_code(reg_num);
if (reg != esp && reg != ebp && reg != destination_reg) {
__ cmp(reg, MemOperand(esp, 0));
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 63904e086f..76ce276c06 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -55,7 +55,7 @@ static void SetGlobalProperty(const char* name, Object* value) {
isolate->factory()->InternalizeUtf8String(name);
Handle<JSObject> global(isolate->context()->global_object(), isolate);
Runtime::SetObjectProperty(isolate, global, internalized_name, object,
- LanguageMode::kSloppy)
+ LanguageMode::kSloppy, StoreOrigin::kMaybeKeyed)
.Check();
}
@@ -318,7 +318,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
MaybeObject* object = feedback_vector->Get(slot_for_a);
{
HeapObject* heap_object;
- CHECK(object->ToWeakHeapObject(&heap_object));
+ CHECK(object->GetHeapObjectIfWeak(&heap_object));
CHECK(heap_object->IsJSFunction());
}
@@ -330,7 +330,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
object = f->feedback_vector()->Get(slot_for_a);
{
HeapObject* heap_object;
- CHECK(object->ToWeakHeapObject(&heap_object));
+ CHECK(object->GetHeapObjectIfWeak(&heap_object));
CHECK(heap_object->IsJSFunction());
}
}
@@ -676,7 +676,7 @@ void TestCompileFunctionInContextToStringImpl() {
V8_Fatal(__FILE__, __LINE__, \
"Unexpected exception thrown during %s:\n\t%s\n", op, *error); \
} \
- } while (0)
+ } while (false)
{ // NOLINT
CcTest::InitializeVM();
@@ -766,11 +766,6 @@ void TestCompileFunctionInContextToStringImpl() {
#undef CHECK_NOT_CAUGHT
}
-TEST(CompileFunctionInContextHarmonyFunctionToString) {
- v8::internal::FLAG_harmony_function_tostring = true;
- TestCompileFunctionInContextToStringImpl();
-}
-
TEST(CompileFunctionInContextFunctionToString) {
TestCompileFunctionInContextToStringImpl();
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index e08bec375e..75af3f6d98 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -40,7 +40,6 @@
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-listener.h"
-#include "src/source-position-table.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
@@ -2545,61 +2544,6 @@ TEST(MultipleProfilers) {
profiler2->StopProfiling("2");
}
-int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source) {
- i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*CompileRun(source)));
- if (function->IsInterpreted()) return -1;
- i::Handle<i::Code> code(function->code(), isolate);
- i::SourcePositionTableIterator iterator(
- ByteArray::cast(code->source_position_table()));
- int count = 0;
- while (!iterator.done()) {
- count++;
- iterator.Advance();
- }
- return count;
-}
-
-UNINITIALIZED_TEST(DetailedSourcePositionAPI) {
- i::FLAG_detailed_line_info = false;
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
-
- const char* source =
- "function fib(i) {"
- " if (i <= 1) return 1; "
- " return fib(i - 1) +"
- " fib(i - 2);"
- "}"
- "fib(5);"
- "%OptimizeFunctionOnNextCall(fib);"
- "fib(5);"
- "fib";
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
- CHECK(!i_isolate->NeedsDetailedOptimizedCodeLineInfo());
-
- int non_detailed_positions = GetSourcePositionEntryCount(i_isolate, source);
-
- v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
- CHECK(i_isolate->NeedsDetailedOptimizedCodeLineInfo());
-
- int detailed_positions = GetSourcePositionEntryCount(i_isolate, source);
-
- CHECK((non_detailed_positions == -1 && detailed_positions == -1) ||
- non_detailed_positions < detailed_positions);
- }
-
- isolate->Dispose();
-}
-
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 61545d7859..d1d8efe26c 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -44,7 +44,7 @@ class DateCacheMock: public DateCache {
: local_offset_(local_offset), rules_(rules), rules_count_(rules_count) {}
protected:
- virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
+ int GetDaylightSavingsOffsetFromOS(int64_t time_sec) override {
int days = DaysFromTime(time_sec * 1000);
int time_in_day_sec = TimeInDay(time_sec * 1000, days) / 1000;
int year, month, day;
@@ -53,7 +53,7 @@ class DateCacheMock: public DateCache {
return rule == nullptr ? 0 : rule->offset_sec * 1000;
}
- virtual int GetLocalOffsetFromOS(int64_t time_sec, bool is_utc) {
+ int GetLocalOffsetFromOS(int64_t time_sec, bool is_utc) override {
return local_offset_ + GetDaylightSavingsOffsetFromOS(time_sec);
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index f678b8ca6f..2e2128e50b 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -161,7 +161,7 @@ void CheckDebuggerUnloaded() {
// Collect garbage to ensure weak handles are cleared.
CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ CcTest::CollectAllGarbage();
// Iterate the heap and check that there are no debugger related objects left.
HeapIterator iterator(CcTest::heap());
@@ -189,8 +189,9 @@ int break_point_hit_count = 0;
int break_point_hit_count_deoptimize = 0;
class DebugEventCounter : public v8::debug::DebugDelegate {
public:
- void BreakProgramRequested(v8::Local<v8::Context>,
- const std::vector<v8::debug::BreakpointId>&) {
+ void BreakProgramRequested(
+ v8::Local<v8::Context>,
+ const std::vector<v8::debug::BreakpointId>&) override {
break_point_hit_count++;
// Perform a full deoptimization when the specified number of
// breaks have been hit.
@@ -211,8 +212,9 @@ class DebugEventCounter : public v8::debug::DebugDelegate {
// Debug event handler which performs a garbage collection.
class DebugEventBreakPointCollectGarbage : public v8::debug::DebugDelegate {
public:
- void BreakProgramRequested(v8::Local<v8::Context>,
- const std::vector<v8::debug::BreakpointId>&) {
+ void BreakProgramRequested(
+ v8::Local<v8::Context>,
+ const std::vector<v8::debug::BreakpointId>&) override {
// Perform a garbage collection when break point is hit and continue. Based
// on the number of break points hit either scavenge or mark compact
// collector is used.
@@ -231,8 +233,9 @@ class DebugEventBreakPointCollectGarbage : public v8::debug::DebugDelegate {
// collector to have the heap verified.
class DebugEventBreak : public v8::debug::DebugDelegate {
public:
- void BreakProgramRequested(v8::Local<v8::Context>,
- const std::vector<v8::debug::BreakpointId>&) {
+ void BreakProgramRequested(
+ v8::Local<v8::Context>,
+ const std::vector<v8::debug::BreakpointId>&) override {
// Count the number of breaks.
break_point_hit_count++;
@@ -255,8 +258,9 @@ int max_break_point_hit_count = 0;
bool terminate_after_max_break_point_hit = false;
class DebugEventBreakMax : public v8::debug::DebugDelegate {
public:
- void BreakProgramRequested(v8::Local<v8::Context>,
- const std::vector<v8::debug::BreakpointId>&) {
+ void BreakProgramRequested(
+ v8::Local<v8::Context>,
+ const std::vector<v8::debug::BreakpointId>&) override {
v8::Isolate* v8_isolate = CcTest::isolate();
v8::internal::Isolate* isolate = CcTest::i_isolate();
if (break_point_hit_count < max_break_point_hit_count) {
@@ -2999,9 +3003,9 @@ int event_listener_hit_count = 0;
class EmptyExternalStringResource : public v8::String::ExternalStringResource {
public:
EmptyExternalStringResource() { empty_[0] = 0; }
- virtual ~EmptyExternalStringResource() {}
- virtual size_t length() const { return empty_.length(); }
- virtual const uint16_t* data() const { return empty_.start(); }
+ ~EmptyExternalStringResource() override = default;
+ size_t length() const override { return empty_.length(); }
+ const uint16_t* data() const override { return empty_.start(); }
private:
::v8::internal::EmbeddedVector<uint16_t, 1> empty_;
@@ -3064,8 +3068,8 @@ class ContextCheckEventListener : public v8::debug::DebugDelegate {
}
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
- v8::Local<v8::Value> promise,
- bool is_uncaught) override {
+ v8::Local<v8::Value> promise, bool is_uncaught,
+ v8::debug::ExceptionType) override {
CheckContext();
}
bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
@@ -3677,7 +3681,7 @@ class TerminationThread : public v8::base::Thread {
explicit TerminationThread(v8::Isolate* isolate)
: Thread(Options("terminator")), isolate_(isolate) {}
- virtual void Run() {
+ void Run() override {
terminate_requested_semaphore.Wait();
isolate_->TerminateExecution();
terminate_fired_semaphore.Signal();
@@ -3712,7 +3716,7 @@ class ArchiveRestoreThread : public v8::base::Thread,
spawn_count_(spawn_count),
break_count_(0) {}
- virtual void Run() {
+ void Run() override {
v8::Locker locker(isolate_);
isolate_->Enter();
@@ -3743,8 +3747,9 @@ class ArchiveRestoreThread : public v8::base::Thread,
isolate_->Exit();
}
- void BreakProgramRequested(v8::Local<v8::Context> context,
- const std::vector<v8::debug::BreakpointId>&) {
+ void BreakProgramRequested(
+ v8::Local<v8::Context> context,
+ const std::vector<v8::debug::BreakpointId>&) override {
auto stack_traces = v8::debug::StackTraceIterator::Create(isolate_);
if (!stack_traces->Done()) {
v8::debug::Location location = stack_traces->GetSourceLocation();
@@ -3833,8 +3838,8 @@ class DebugEventExpectNoException : public v8::debug::DebugDelegate {
public:
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
- v8::Local<v8::Value> promise,
- bool is_uncaught) override {
+ v8::Local<v8::Value> promise, bool is_uncaught,
+ v8::debug::ExceptionType) override {
CHECK(false);
}
};
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index f0e8080275..8b59fe5960 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -123,11 +123,9 @@ void DeclarationContext::InitializeIfNeeded() {
Local<FunctionTemplate> function = FunctionTemplate::New(isolate);
Local<Value> data = External::New(CcTest::isolate(), this);
GetHolder(function)->SetHandler(v8::NamedPropertyHandlerConfiguration(
- &HandleGet, &HandleSet, &HandleQuery, 0, 0, data));
- Local<Context> context = Context::New(isolate,
- 0,
- function->InstanceTemplate(),
- Local<Value>());
+ &HandleGet, &HandleSet, &HandleQuery, nullptr, nullptr, data));
+ Local<Context> context = Context::New(
+ isolate, nullptr, function->InstanceTemplate(), Local<Value>());
context_.Reset(isolate, context);
context->Enter();
is_initialized_ = true;
@@ -256,7 +254,7 @@ TEST(Unknown) {
class AbsentPropertyContext: public DeclarationContext {
protected:
- virtual v8::Local<Integer> Query(Local<Name> key) {
+ v8::Local<Integer> Query(Local<Name> key) override {
return v8::Local<Integer>();
}
};
@@ -306,7 +304,7 @@ class AppearingPropertyContext: public DeclarationContext {
AppearingPropertyContext() : state_(DECLARE) { }
protected:
- virtual v8::Local<Integer> Query(Local<Name> key) {
+ v8::Local<Integer> Query(Local<Name> key) override {
switch (state_) {
case DECLARE:
// Force declaration by returning that the
@@ -361,13 +359,13 @@ class ExistsInPrototypeContext: public DeclarationContext {
public:
ExistsInPrototypeContext() { InitializeIfNeeded(); }
protected:
- virtual v8::Local<Integer> Query(Local<Name> key) {
+ v8::Local<Integer> Query(Local<Name> key) override {
// Let it seem that the property exists in the prototype object.
return Integer::New(isolate(), v8::None);
}
// Use the prototype as the holder for the interceptors.
- virtual Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) {
+ Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) override {
return function->PrototypeTemplate();
}
};
@@ -404,13 +402,13 @@ TEST(ExistsInPrototype) {
class AbsentInPrototypeContext: public DeclarationContext {
protected:
- virtual v8::Local<Integer> Query(Local<Name> key) {
+ v8::Local<Integer> Query(Local<Name> key) override {
// Let it seem that the property is absent in the prototype object.
return Local<Integer>();
}
// Use the prototype as the holder for the interceptors.
- virtual Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) {
+ Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) override {
return function->PrototypeTemplate();
}
};
@@ -439,13 +437,13 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
}
protected:
- virtual v8::Local<Integer> Query(Local<Name> key) {
+ v8::Local<Integer> Query(Local<Name> key) override {
// Let it seem that the property exists in the hidden prototype object.
return Integer::New(isolate(), v8::None);
}
// Install the hidden prototype after the global object has been created.
- virtual void PostInitializeContext(Local<Context> context) {
+ void PostInitializeContext(Local<Context> context) override {
Local<Object> global_object = context->Global();
Local<Object> hidden_proto = hidden_proto_->GetFunction(context)
.ToLocalChecked()
@@ -457,7 +455,7 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
}
// Use the hidden prototype as the holder for the interceptors.
- virtual Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) {
+ Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) override {
return hidden_proto_->InstanceTemplate();
}
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 0d86f135ea..544a0f587d 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -95,15 +95,6 @@ class AllowNativesSyntaxNoInlining {
bool turbo_inlining_;
};
-
-// Abort any ongoing incremental marking to make sure that all weak global
-// handle callbacks are processed.
-static void NonIncrementalGC(i::Isolate* isolate) {
- isolate->heap()->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
- i::GarbageCollectionReason::kTesting);
-}
-
-
static Handle<JSFunction> GetJSFunction(v8::Local<v8::Context> context,
const char* property_name) {
v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
@@ -127,7 +118,7 @@ TEST(DeoptimizeSimple) {
"function f() { g(); };"
"f();");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -147,7 +138,7 @@ TEST(DeoptimizeSimple) {
"function f(x) { if (x) { g(); } else { return } };"
"f(true);");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -174,7 +165,7 @@ TEST(DeoptimizeSimpleWithArguments) {
"function f(x, y, z) { g(1,x); y+z; };"
"f(1, \"2\", false);");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -195,7 +186,7 @@ TEST(DeoptimizeSimpleWithArguments) {
"function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };"
"f(true, 1, \"2\");");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -223,7 +214,7 @@ TEST(DeoptimizeSimpleNested) {
"function g(z) { count++; %DeoptimizeFunction(f); return z;}"
"function f(x,y,z) { return h(x, y, g(z)); };"
"result = f(1, 2, 3);");
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -257,7 +248,7 @@ TEST(DeoptimizeRecursive) {
"function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };"
"f(10);");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -300,7 +291,7 @@ TEST(DeoptimizeMultiple) {
"function f1(x) { return f2(x + 1, x + 1) + x; };"
"result = f1(1);");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -330,7 +321,7 @@ TEST(DeoptimizeConstructor) {
"function f() { g(); };"
"result = new f() instanceof f;");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -354,7 +345,7 @@ TEST(DeoptimizeConstructor) {
"result = new f(1, 2);"
"result = result.x + result.y;");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -392,7 +383,7 @@ TEST(DeoptimizeConstructorMultiple) {
"function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };"
"result = new f1(1).result;");
}
- NonIncrementalGC(CcTest::i_isolate());
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, env->Global()
->Get(env.local(), v8_str("count"))
@@ -453,7 +444,7 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationADDString) {
"deopt = true;"
"var result = f('a+', new X());");
}
- NonIncrementalGC(i_isolate);
+ CcTest::CollectAllGarbage(i_isolate);
CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
CHECK_EQ(1, env->Global()
@@ -513,7 +504,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
// Call f and force deoptimization while processing the binary operation.
CompileRun("deopt = true;"
"var result = f(7, new X());");
- NonIncrementalGC(i_isolate);
+ CcTest::CollectAllGarbage(i_isolate);
CHECK(!GetJSFunction((*env).local(), "f")->IsOptimized());
}
@@ -718,7 +709,7 @@ UNINITIALIZED_TEST(DeoptimizeCompare) {
"deopt = true;"
"var result = f('a', new X());");
}
- NonIncrementalGC(i_isolate);
+ CcTest::CollectAllGarbage(i_isolate);
CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
CHECK_EQ(1, env->Global()
@@ -729,8 +720,7 @@ UNINITIALIZED_TEST(DeoptimizeCompare) {
CHECK_EQ(true, env->Global()
->Get(env.local(), v8_str("result"))
.ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ ->BooleanValue(isolate));
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
@@ -817,7 +807,7 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreIC) {
"f2(new X(), 'z');"
"g2(new X(), 'z');");
}
- NonIncrementalGC(i_isolate);
+ CcTest::CollectAllGarbage(i_isolate);
CHECK(!GetJSFunction(env.local(), "f1")->IsOptimized());
CHECK(!GetJSFunction(env.local(), "g1")->IsOptimized());
@@ -919,7 +909,7 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreICNested) {
"deopt = true;"
"var result = f1(new X());");
}
- NonIncrementalGC(i_isolate);
+ CcTest::CollectAllGarbage(i_isolate);
CHECK(!GetJSFunction(env.local(), "f1")->IsOptimized());
CHECK(!GetJSFunction(env.local(), "g1")->IsOptimized());
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index 29497fd9d6..1b0bdcc270 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -1177,19 +1177,6 @@ TEST(Type3) {
COMPARE_PC_REL_COMPACT(bgtz(a0, 32767), "1c807fff bgtz a0, 32767",
32767);
- int64_t pc_region;
- GET_PC_REGION(pc_region);
-
- int64_t target = pc_region | 0x4;
- COMPARE_PC_JUMP(j(target), "08000001 j ", target);
- target = pc_region | 0xFFFFFFC;
- COMPARE_PC_JUMP(j(target), "0bffffff j ", target);
-
- target = pc_region | 0x4;
- COMPARE_PC_JUMP(jal(target), "0c000001 jal ", target);
- target = pc_region | 0xFFFFFFC;
- COMPARE_PC_JUMP(jal(target), "0fffffff jal ", target);
-
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index c42606485c..1e530c3ce2 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -142,8 +142,11 @@ TEST(DisasmX64) {
__ shll_cl(Operand(rdi, rax, times_4, 100));
__ shll(rdx, Immediate(1));
__ shll(rdx, Immediate(6));
- __ bts(Operand(rdx, 0), rcx);
- __ bts(Operand(rbx, rcx, times_4, 0), rcx);
+ __ btq(Operand(rdx, 0), rcx);
+ __ btsq(Operand(rdx, 0), rcx);
+ __ btsq(Operand(rbx, rcx, times_4, 0), rcx);
+ __ btsq(rcx, Immediate(13));
+ __ btrq(rcx, Immediate(13));
__ nop();
__ pushq(Immediate(12));
__ pushq(Immediate(23456));
@@ -267,7 +270,6 @@ TEST(DisasmX64) {
__ xorq(rdx, Immediate(12345));
__ xorq(rdx, Operand(rbx, rcx, times_8, 10000));
- __ bts(Operand(rbx, rcx, times_8, 10000), rdx);
__ pshufw(xmm5, xmm1, 3);
__ hlt();
__ int3();
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index b809854270..80ae82d799 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -96,7 +96,7 @@ TEST(VectorStructure) {
FeedbackMetadata::GetSlotSize(FeedbackSlotKind::kCreateClosure));
FeedbackSlot slot = helper.slot(1);
FeedbackCell* cell =
- FeedbackCell::cast(vector->Get(slot)->ToStrongHeapObject());
+ FeedbackCell::cast(vector->Get(slot)->GetHeapObjectAssumeStrong());
CHECK_EQ(cell->value(), *factory->undefined_value());
}
}
@@ -203,7 +203,7 @@ TEST(VectorCallFeedback) {
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
HeapObject* heap_object;
- CHECK(nexus.GetFeedback()->ToWeakHeapObject(&heap_object));
+ CHECK(nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(*foo, heap_object);
CcTest::CollectAllGarbage();
@@ -228,7 +228,7 @@ TEST(VectorCallFeedbackForArray) {
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
HeapObject* heap_object;
- CHECK(nexus.GetFeedback()->ToWeakHeapObject(&heap_object));
+ CHECK(nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(*isolate->array_function(), heap_object);
CcTest::CollectAllGarbage();
@@ -236,6 +236,68 @@ TEST(VectorCallFeedbackForArray) {
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
+size_t GetFeedbackVectorLength(Isolate* isolate, const char* src,
+ bool with_oneshot_opt) {
+ i::FLAG_enable_one_shot_optimization = with_oneshot_opt;
+ i::Handle<i::Object> i_object = v8::Utils::OpenHandle(*CompileRun(src));
+ i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(i_object);
+ Handle<FeedbackVector> feedback_vector =
+ Handle<FeedbackVector>(f->feedback_vector(), isolate);
+ return feedback_vector->length();
+}
+
+TEST(OneShotCallICSlotCount) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+ i::FLAG_compilation_cache = false;
+
+ const char* no_call = R"(
+ function f1() {};
+ function f2() {};
+ (function() {
+ return arguments.callee;
+ })();
+ )";
+ // len = 2 * 1 ldaNamed property
+ CHECK_EQ(GetFeedbackVectorLength(isolate, no_call, false), 2);
+ // no slots of named property loads/stores in one shot
+ CHECK_EQ(GetFeedbackVectorLength(isolate, no_call, true), 0);
+
+ const char* single_call = R"(
+ function f1() {};
+ function f2() {};
+ (function() {
+ f1();
+ return arguments.callee;
+ })();
+ )";
+ // len = 2 * 1 ldaNamed Slot + 2 * 1 CachedGlobalSlot + 2 * 1 CallICSlot
+ CHECK_EQ(GetFeedbackVectorLength(isolate, single_call, false), 6);
+ // len = 2 * 1 CachedGlobalSlot
+ CHECK_EQ(GetFeedbackVectorLength(isolate, single_call, true), 2);
+
+ const char* multiple_calls = R"(
+ function f1() {};
+ function f2() {};
+ (function() {
+ f1();
+ f2();
+ f1();
+ f2();
+ return arguments.callee;
+ })();
+ )";
+ // len = 2 * 1 ldaNamedSlot + 2 * 2 CachedGlobalSlot (one for each unique
+ // function) + 2 * 4 CallICSlot (one for each function call)
+ CHECK_EQ(GetFeedbackVectorLength(isolate, multiple_calls, false), 14);
+ // CachedGlobalSlot (one for each unique function)
+ // len = 2 * 2 CachedGlobalSlot (one for each unique function)
+ CHECK_EQ(GetFeedbackVectorLength(isolate, multiple_calls, true), 4);
+}
+
TEST(VectorCallCounts) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -284,7 +346,7 @@ TEST(VectorConstructCounts) {
FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- CHECK(feedback_vector->Get(slot)->IsWeakHeapObject());
+ CHECK(feedback_vector->Get(slot)->IsWeak());
CompileRun("f(Foo); f(Foo);");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 4e37103558..f40dbe83bd 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -370,9 +370,8 @@ class Expectations {
heap_type);
Handle<String> name = MakeName("prop", property_index);
- return Map::TransitionToDataProperty(
- isolate_, map, name, value, attributes, constness,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ return Map::TransitionToDataProperty(isolate_, map, name, value, attributes,
+ constness, StoreOrigin::kNamed);
}
Handle<Map> TransitionToDataConstant(Handle<Map> map,
@@ -383,9 +382,9 @@ class Expectations {
SetDataConstant(property_index, attributes, value);
Handle<String> name = MakeName("prop", property_index);
- return Map::TransitionToDataProperty(
- isolate_, map, name, value, attributes, PropertyConstness::kConst,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ return Map::TransitionToDataProperty(isolate_, map, name, value, attributes,
+ PropertyConstness::kConst,
+ StoreOrigin::kNamed);
}
Handle<Map> FollowDataTransition(Handle<Map> map,
@@ -657,7 +656,9 @@ static void TestGeneralizeField(int detach_property_at_index,
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
- dependencies.DependOnFieldType(MapRef(&broker, map), property_index);
+ MapRef map_ref(&broker, map);
+ map_ref.SerializeOwnDescriptors();
+ dependencies.DependOnFieldType(map_ref, property_index);
Handle<Map> field_owner(map->FindFieldOwner(isolate, property_index),
isolate);
@@ -1029,7 +1030,9 @@ static void TestReconfigureDataFieldAttribute_GeneralizeField(
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
- dependencies.DependOnFieldType(MapRef(&broker, map), kSplitProp);
+ MapRef map_ref(&broker, map);
+ map_ref.SerializeOwnDescriptors();
+ dependencies.DependOnFieldType(map_ref, kSplitProp);
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1113,7 +1116,9 @@ static void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
- dependencies.DependOnFieldType(MapRef(&broker, map), kSplitProp);
+ MapRef map_ref(&broker, map);
+ map_ref.SerializeOwnDescriptors();
+ dependencies.DependOnFieldType(map_ref, kSplitProp);
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1794,7 +1799,9 @@ static void TestReconfigureElementsKind_GeneralizeField(
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
- dependencies.DependOnFieldType(MapRef(&broker, map), kDiffProp);
+ MapRef map_ref(&broker, map);
+ map_ref.SerializeOwnDescriptors();
+ dependencies.DependOnFieldType(map_ref, kDiffProp);
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
@@ -1889,7 +1896,9 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
- dependencies.DependOnFieldType(MapRef(&broker, map), kDiffProp);
+ MapRef map_ref(&broker, map);
+ map_ref.SerializeOwnDescriptors();
+ dependencies.DependOnFieldType(map_ref, kDiffProp);
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index e4e5f4c8dc..257ef1c723 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -157,12 +157,9 @@ static Optional<SourceLocation> GetLocation(const v8::HeapSnapshot* s,
const v8::HeapGraphNode* node) {
const i::HeapSnapshot* snapshot = reinterpret_cast<const i::HeapSnapshot*>(s);
const std::vector<SourceLocation>& locations = snapshot->locations();
- const int index =
- const_cast<i::HeapEntry*>(reinterpret_cast<const i::HeapEntry*>(node))
- ->index();
-
+ const i::HeapEntry* entry = reinterpret_cast<const i::HeapEntry*>(node);
for (const auto& loc : locations) {
- if (loc.entry_index == index) {
+ if (loc.entry_index == entry->index()) {
return Optional<SourceLocation>(loc);
}
}
@@ -223,7 +220,7 @@ static bool ValidateSnapshot(const v8::HeapSnapshot* snapshot, int depth = 3) {
entry->value = reinterpret_cast<void*>(ref_count + 1);
}
uint32_t unretained_entries_count = 0;
- std::vector<i::HeapEntry>& entries = heap_snapshot->entries();
+ std::deque<i::HeapEntry>& entries = heap_snapshot->entries();
for (i::HeapEntry& entry : entries) {
v8::base::HashMap::Entry* map_entry = visited.Lookup(
reinterpret_cast<void*>(&entry),
@@ -506,9 +503,6 @@ TEST(HeapSnapshotHeapNumbers) {
}
TEST(HeapSnapshotHeapBigInts) {
- // TODO(luoe): remove flag when it is on by default.
- v8::internal::FLAG_harmony_bigint = true;
-
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -686,7 +680,7 @@ TEST(HeapSnapshotWeakCollection) {
++weak_entries;
}
}
- CHECK_EQ(1, weak_entries); // Key is the only weak.
+ CHECK_EQ(2, weak_entries); // Key and value are weak.
const v8::HeapGraphNode* wm_s =
GetProperty(env->GetIsolate(), wm, v8::HeapGraphEdge::kProperty, "str");
CHECK(wm_s);
@@ -1003,21 +997,6 @@ TEST(HeapEntryIdsAndGC) {
CHECK_EQ(b1->GetId(), b2->GetId());
}
-
-TEST(HeapSnapshotRootPreservedAfterSorting) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
- CHECK(ValidateSnapshot(snapshot));
- const v8::HeapGraphNode* root1 = snapshot->GetRoot();
- const_cast<i::HeapSnapshot*>(reinterpret_cast<const i::HeapSnapshot*>(
- snapshot))->GetSortedEntriesList();
- const v8::HeapGraphNode* root2 = snapshot->GetRoot();
- CHECK_EQ(root1, root2);
-}
-
-
namespace {
class TestJSONStream : public v8::OutputStream {
@@ -1025,9 +1004,9 @@ class TestJSONStream : public v8::OutputStream {
TestJSONStream() : eos_signaled_(0), abort_countdown_(-1) {}
explicit TestJSONStream(int abort_countdown)
: eos_signaled_(0), abort_countdown_(abort_countdown) {}
- virtual ~TestJSONStream() {}
- virtual void EndOfStream() { ++eos_signaled_; }
- virtual WriteResult WriteAsciiChunk(char* buffer, int chars_written) {
+ ~TestJSONStream() override = default;
+ void EndOfStream() override { ++eos_signaled_; }
+ WriteResult WriteAsciiChunk(char* buffer, int chars_written) override {
if (abort_countdown_ > 0) --abort_countdown_;
if (abort_countdown_ == 0) return kAbort;
CHECK_GT(chars_written, 0);
@@ -1053,8 +1032,9 @@ class OneByteResource : public v8::String::ExternalOneByteStringResource {
explicit OneByteResource(i::Vector<char> string) : data_(string.start()) {
length_ = string.length();
}
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
+
private:
const char* data_;
size_t length_;
@@ -1215,20 +1195,15 @@ class TestStatsStream : public v8::OutputStream {
intervals_count_(0),
first_interval_index_(-1) { }
TestStatsStream(const TestStatsStream& stream)
- : v8::OutputStream(stream),
- eos_signaled_(stream.eos_signaled_),
- updates_written_(stream.updates_written_),
- entries_count_(stream.entries_count_),
- entries_size_(stream.entries_size_),
- intervals_count_(stream.intervals_count_),
- first_interval_index_(stream.first_interval_index_) { }
- virtual ~TestStatsStream() {}
- virtual void EndOfStream() { ++eos_signaled_; }
- virtual WriteResult WriteAsciiChunk(char* buffer, int chars_written) {
+
+ = default;
+ ~TestStatsStream() override = default;
+ void EndOfStream() override { ++eos_signaled_; }
+ WriteResult WriteAsciiChunk(char* buffer, int chars_written) override {
UNREACHABLE();
}
- virtual WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* buffer,
- int updates_written) {
+ WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* buffer,
+ int updates_written) override {
++intervals_count_;
CHECK(updates_written);
updates_written_ += updates_written;
@@ -1533,7 +1508,7 @@ class TestActivityControl : public v8::ActivityControl {
total_(0),
abort_count_(abort_count),
reported_finish_(false) {}
- ControlOption ReportProgressValue(int done, int total) {
+ ControlOption ReportProgressValue(int done, int total) override {
done_ = done;
total_ = total;
CHECK_LE(done_, total_);
@@ -1610,7 +1585,7 @@ class EmbedderGraphBuilder : public v8::PersistentHandleVisitor {
public:
explicit Group(const char* name) : Node(name, 0) {}
// v8::EmbedderGraph::EmbedderNode
- bool IsRootNode() { return true; }
+ bool IsRootNode() override { return true; }
};
EmbedderGraphBuilder(v8::Isolate* isolate, v8::EmbedderGraph* graph)
@@ -1784,7 +1759,7 @@ TEST(DeleteHeapSnapshot) {
class NameResolver : public v8::HeapProfiler::ObjectNameResolver {
public:
- virtual const char* GetName(v8::Local<v8::Object> object) {
+ const char* GetName(v8::Local<v8::Object> object) override {
return "Global object name";
}
};
@@ -3062,7 +3037,7 @@ class EmbedderRootNode : public EmbedderNode {
public:
explicit EmbedderRootNode(const char* name) : EmbedderNode(name, 0) {}
// Graph::Node override.
- bool IsRootNode() { return true; }
+ bool IsRootNode() override { return true; }
};
// Used to pass the global object to the BuildEmbedderGraph callback.
@@ -3900,45 +3875,3 @@ TEST(WeakReference) {
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
}
-
-TEST(Bug8373_1) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
-
- heap_profiler->StartSamplingHeapProfiler(100);
-
- heap_profiler->TakeHeapSnapshot();
- // Causes the StringsStorage to be deleted.
- heap_profiler->DeleteAllHeapSnapshots();
-
- // Triggers an allocation sample that tries to use the StringsStorage.
- for (int i = 0; i < 2 * 1024; ++i) {
- CompileRun(
- "new Array(64);"
- "new Uint8Array(16);");
- }
-
- heap_profiler->StopSamplingHeapProfiler();
-}
-
-TEST(Bug8373_2) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
-
- heap_profiler->StartTrackingHeapObjects(true);
-
- heap_profiler->TakeHeapSnapshot();
- // Causes the StringsStorage to be deleted.
- heap_profiler->DeleteAllHeapSnapshots();
-
- // Triggers an allocations that try to use the StringsStorage.
- for (int i = 0; i < 2 * 1024; ++i) {
- CompileRun(
- "new Array(64);"
- "new Uint8Array(16);");
- }
-
- heap_profiler->StopTrackingHeapObjects();
-}
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 0e850b1682..9255dc04b0 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -1143,8 +1143,6 @@ TEST(SubclassArrayBuiltinNoInlineNew) {
TEST(SubclassTypedArrayBuiltin) {
// Avoid eventual completion of in-object slack tracking.
FLAG_always_opt = false;
- // Make BigInt64Array/BigUint64Array available for testing.
- FLAG_harmony_bigint = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-inspector.cc b/deps/v8/test/cctest/test-inspector.cc
index 8986c3c488..6dd2aefb9e 100644
--- a/deps/v8/test/cctest/test-inspector.cc
+++ b/deps/v8/test/cctest/test-inspector.cc
@@ -20,7 +20,7 @@ namespace {
class NoopChannel : public V8Inspector::Channel {
public:
- virtual ~NoopChannel() {}
+ ~NoopChannel() override = default;
void sendResponse(int callId,
std::unique_ptr<StringBuffer> message) override {}
void sendNotification(std::unique_ptr<StringBuffer> message) override {}
diff --git a/deps/v8/test/cctest/test-intl.cc b/deps/v8/test/cctest/test-intl.cc
index 950fbe3d30..3359a3878b 100644
--- a/deps/v8/test/cctest/test-intl.cc
+++ b/deps/v8/test/cctest/test-intl.cc
@@ -4,10 +4,10 @@
#ifdef V8_INTL_SUPPORT
-#include "src/builtins/builtins-intl.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-number-format.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -125,8 +125,7 @@ TEST(GetStringOption) {
Handle<String> key = isolate->factory()->NewStringFromAsciiChecked("foo");
v8::internal::LookupIterator it(isolate, options, key);
CHECK(Object::SetProperty(&it, Handle<Smi>(Smi::FromInt(42), isolate),
- LanguageMode::kStrict,
- AllocationMemento::MAY_BE_STORE_FROM_KEYED)
+ LanguageMode::kStrict, StoreOrigin::kMaybeKeyed)
.FromJust());
{
@@ -209,52 +208,26 @@ TEST(GetBoolOption) {
}
}
-bool ScriptTagWasRemoved(std::string locale, std::string expected) {
- std::string without_script_tag;
- bool didShorten = Intl::RemoveLocaleScriptTag(locale, &without_script_tag);
- return didShorten && expected == without_script_tag;
-}
-
-bool ScriptTagWasNotRemoved(std::string locale) {
- std::string without_script_tag;
- bool didShorten = Intl::RemoveLocaleScriptTag(locale, &without_script_tag);
- return !didShorten && without_script_tag.empty();
-}
-
-TEST(RemoveLocaleScriptTag) {
- CHECK(ScriptTagWasRemoved("aa_Bbbb_CC", "aa_CC"));
- CHECK(ScriptTagWasRemoved("aaa_Bbbb_CC", "aaa_CC"));
-
- CHECK(ScriptTagWasNotRemoved("aa"));
- CHECK(ScriptTagWasNotRemoved("aaa"));
- CHECK(ScriptTagWasNotRemoved("aa_CC"));
- CHECK(ScriptTagWasNotRemoved("aa_Bbb_CC"));
- CHECK(ScriptTagWasNotRemoved("aa_1bbb_CC"));
-}
-
TEST(GetAvailableLocales) {
std::set<std::string> locales;
- locales = Intl::GetAvailableLocales(IcuService::kBreakIterator);
+ locales = Intl::GetAvailableLocales(ICUService::kBreakIterator);
CHECK(locales.count("en-US"));
CHECK(!locales.count("abcdefg"));
- locales = Intl::GetAvailableLocales(IcuService::kCollator);
- CHECK(locales.count("en-US"));
-
- locales = Intl::GetAvailableLocales(IcuService::kDateFormat);
+ locales = Intl::GetAvailableLocales(ICUService::kCollator);
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(IcuService::kNumberFormat);
+ locales = Intl::GetAvailableLocales(ICUService::kDateFormat);
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(IcuService::kPluralRules);
+ locales = Intl::GetAvailableLocales(ICUService::kNumberFormat);
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(IcuService::kResourceBundle);
+ locales = Intl::GetAvailableLocales(ICUService::kPluralRules);
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(IcuService::kRelativeDateTimeFormatter);
+ locales = Intl::GetAvailableLocales(ICUService::kRelativeDateTimeFormatter);
CHECK(locales.count("en-US"));
}
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
index 3b1f1a1d12..428726fdc7 100644
--- a/deps/v8/test/cctest/test-javascript-arm64.cc
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -46,7 +46,7 @@ namespace test_javascript_arm64 {
static void ExpectBoolean(Local<v8::Context> context, bool expected,
Local<Value> result) {
CHECK(result->IsBoolean());
- CHECK_EQ(expected, result->BooleanValue(context).FromJust());
+ CHECK_EQ(expected, result->BooleanValue(context->GetIsolate()));
}
static void ExpectInt32(Local<v8::Context> context, int32_t expected,
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index dec279e781..5a4dcd588e 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -51,7 +51,7 @@ class DeoptimizeCodeThread : public v8::base::Thread {
context_(isolate, context),
source_(trigger) {}
- void Run() {
+ void Run() override {
v8::Locker locker(isolate_);
isolate_->Enter();
v8::HandleScope handle_scope(isolate_);
@@ -290,7 +290,7 @@ class KangarooThread : public v8::base::Thread {
isolate_(isolate),
context_(isolate, context) {}
- void Run() {
+ void Run() override {
{
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
@@ -362,7 +362,7 @@ class JoinableThread {
thread_(this) {
}
- virtual ~JoinableThread() {}
+ virtual ~JoinableThread() = default;
void Start() {
thread_.Start();
@@ -382,7 +382,7 @@ class JoinableThread {
: Thread(Options(joinable_thread->name_)),
joinable_thread_(joinable_thread) {}
- virtual void Run() {
+ void Run() override {
joinable_thread_->Run();
joinable_thread_->semaphore_.Signal();
}
@@ -408,7 +408,7 @@ class IsolateLockingThreadWithLocalContext : public JoinableThread {
isolate_(isolate) {
}
- virtual void Run() {
+ void Run() override {
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
@@ -460,7 +460,7 @@ class IsolateNestedLockingThread : public JoinableThread {
explicit IsolateNestedLockingThread(v8::Isolate* isolate)
: JoinableThread("IsolateNestedLocking"), isolate_(isolate) {
}
- virtual void Run() {
+ void Run() override {
v8::Locker lock(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
@@ -508,7 +508,7 @@ class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread {
isolate1_(isolate1), isolate2_(isolate2) {
}
- virtual void Run() {
+ void Run() override {
v8::Locker lock(isolate1_);
v8::Isolate::Scope isolate_scope(isolate1_);
v8::HandleScope handle_scope(isolate1_);
@@ -556,7 +556,7 @@ class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread {
isolate_(isolate),
context_(isolate, context) {}
- virtual void Run() {
+ void Run() override {
v8::Locker lock(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
@@ -577,7 +577,7 @@ class LockerUnlockerThread : public JoinableThread {
isolate_(isolate) {
}
- virtual void Run() {
+ void Run() override {
isolate_->DiscardThreadSpecificMetadata(); // No-op
{
v8::Locker lock(isolate_);
@@ -637,7 +637,7 @@ class LockTwiceAndUnlockThread : public JoinableThread {
isolate_(isolate) {
}
- virtual void Run() {
+ void Run() override {
v8::Locker lock(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
@@ -697,7 +697,7 @@ class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
isolate2_(isolate2) {
}
- virtual void Run() {
+ void Run() override {
std::unique_ptr<LockIsolateAndCalculateFibSharedContextThread> thread;
v8::Locker lock1(isolate1_);
CHECK(v8::Locker::IsLocked(isolate1_));
@@ -760,7 +760,7 @@ class LockUnlockLockThread : public JoinableThread {
isolate_(isolate),
context_(isolate, context) {}
- virtual void Run() {
+ void Run() override {
v8::Locker lock1(isolate_);
CHECK(v8::Locker::IsLocked(isolate_));
CHECK(!v8::Locker::IsLocked(CcTest::isolate()));
@@ -827,7 +827,7 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
: JoinableThread("LockUnlockLockDefaultIsolateThread"),
context_(CcTest::isolate(), context) {}
- virtual void Run() {
+ void Run() override {
v8::Locker lock1(CcTest::isolate());
{
v8::Isolate::Scope isolate_scope(CcTest::isolate());
@@ -914,7 +914,7 @@ class IsolateGenesisThread : public JoinableThread {
extension_names_(extension_names)
{}
- virtual void Run() {
+ void Run() override {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 9ac73af3e5..f7774b7bda 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -155,12 +155,16 @@ class ScopedLoggerInitializer {
start = IndexOfLine({search_term}, start);
if (start == std::string::npos) break;
std::vector<std::string> columns = Split(log_.at(start), ',');
- CHECK_LT(address_column, columns.size());
+ ++start; // Skip the found line.
+ // TODO(crbug.com/v8/8084): These two continue lines should really be
+ // errors. But on Windows the log is sometimes mysteriously cut off at the
+ // end. If the cut-off point happens to fall in the address field, the
+ // conditions will be triggered.
+ if (address_column >= columns.size()) continue;
uintptr_t address =
strtoll(columns.at(address_column).c_str(), nullptr, 16);
- CHECK_GT(address, 0);
+ if (address == 0) continue;
result.insert(address);
- ++start; // Skip the found line.
}
return result;
}
@@ -259,9 +263,9 @@ class SimpleExternalString : public v8::String::ExternalStringResource {
for (int i = 0; i < utf_source_.length(); ++i)
utf_source_[i] = source[i];
}
- virtual ~SimpleExternalString() {}
- virtual size_t length() const { return utf_source_.length(); }
- virtual const uint16_t* data() const { return utf_source_.start(); }
+ ~SimpleExternalString() override = default;
+ size_t length() const override { return utf_source_.length(); }
+ const uint16_t* data() const override { return utf_source_.start(); }
private:
i::ScopedVector<uint16_t> utf_source_;
};
@@ -428,8 +432,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
logger.logger()->StopProfiler();
- reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
- i::Heap::kMakeHeapIterableMask, i::GarbageCollectionReason::kTesting);
+ CcTest::PreciseCollectAllGarbage();
logger.StringEvent("test-logging-done", "");
// Iterate heap to find compiled functions, will write to log.
@@ -555,6 +558,8 @@ TEST(Issue539892) {
TEST(LogAll) {
SETUP_FLAGS();
i::FLAG_log_all = true;
+ i::FLAG_turbo_inlining = false;
+ i::FLAG_enable_one_shot_optimization = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -580,7 +585,9 @@ TEST(LogAll) {
CHECK(logger.ContainsLine({"api,v8::Script::Run"}));
CHECK(logger.ContainsLine({"code-creation,LazyCompile,", "testAddFn"}));
if (i::FLAG_opt && !i::FLAG_always_opt) {
- CHECK(logger.ContainsLine({"code-deopt,", "soft"}));
+ CHECK(logger.ContainsLine({"code-deopt,", "not a Smi"}));
+ if (i::FLAG_enable_one_shot_optimization)
+ CHECK(logger.ContainsLine({"code-deopt,", "DeoptimizeNow"}));
CHECK(logger.ContainsLine({"timer-event-start", "V8.DeoptimizeCode"}));
CHECK(logger.ContainsLine({"timer-event-end", "V8.DeoptimizeCode"}));
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 18404d6629..97ddda12c5 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -703,7 +703,7 @@ TEST(min_max_nan) {
auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
__ bind(nan);
- __ LoadRoot(t8, Heap::kNanValueRootIndex);
+ __ LoadRoot(t8, RootIndex::kNanValue);
__ Ldc1(dst, FieldMemOperand(t8, HeapNumber::kValueOffset));
__ Branch(back);
};
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 9a6e319363..b2aea23920 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -944,7 +944,7 @@ TEST(min_max_nan) {
auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
__ bind(nan);
- __ LoadRoot(t8, Heap::kNanValueRootIndex);
+ __ LoadRoot(t8, RootIndex::kNanValue);
__ Ldc1(dst, FieldMemOperand(t8, HeapNumber::kValueOffset));
__ Branch(back);
};
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index 59653ccd73..3690752f13 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -67,7 +67,7 @@ TEST(Regress340063) {
// Call GC to see if we can handle a poisonous memento right after the
// current new space top pointer.
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
}
@@ -84,7 +84,7 @@ TEST(Regress470390) {
// Call GC to see if we can handle a poisonous memento right after the
// current new space top pointer.
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 72e3711405..942d597ccc 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -71,6 +71,326 @@ void MockUseCounterCallback(v8::Isolate* isolate,
} // namespace
+TEST(IsContextualKeyword) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(Token::TypeForTesting(token) == 'C',
+ Token::IsContextualKeyword(token));
+ }
+}
+
+bool TokenIsAnyIdentifier(Token::Value token) {
+ switch (token) {
+ case Token::IDENTIFIER:
+ case Token::ASYNC:
+ case Token::AWAIT:
+ case Token::YIELD:
+ case Token::LET:
+ case Token::STATIC:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ case Token::ENUM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(AnyIdentifierToken) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsAnyIdentifier(token), Token::IsAnyIdentifier(token));
+ }
+}
+
+bool TokenIsIdentifier(Token::Value token, LanguageMode language_mode,
+ bool is_generator, bool disallow_await) {
+ switch (token) {
+ case Token::IDENTIFIER:
+ case Token::ASYNC:
+ return true;
+ case Token::YIELD:
+ return !is_generator && is_sloppy(language_mode);
+ case Token::AWAIT:
+ return !disallow_await;
+ case Token::LET:
+ case Token::STATIC:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ return is_sloppy(language_mode);
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+TEST(IsIdentifierToken) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ for (size_t raw_language_mode = 0; raw_language_mode < LanguageModeSize;
+ raw_language_mode++) {
+ LanguageMode mode = static_cast<LanguageMode>(raw_language_mode);
+ for (int is_generator = 0; is_generator < 2; is_generator++) {
+ for (int disallow_await = 0; disallow_await < 2; disallow_await++) {
+ CHECK_EQ(
+ TokenIsIdentifier(token, mode, is_generator, disallow_await),
+ Token::IsIdentifier(token, mode, is_generator, disallow_await));
+ }
+ }
+ }
+ }
+}
+
+bool TokenIsStrictReservedWord(Token::Value token) {
+ switch (token) {
+ case Token::LET:
+ case Token::STATIC:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+TEST(IsStrictReservedWord) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsStrictReservedWord(token),
+ Token::IsStrictReservedWord(token));
+ }
+}
+
+bool TokenIsLiteral(Token::Value token) {
+ switch (token) {
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER:
+ case Token::SMI:
+ case Token::BIGINT:
+ case Token::STRING:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+TEST(IsLiteralToken) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsLiteral(token), Token::IsLiteral(token));
+ }
+}
+bool TokenIsAssignmentOp(Token::Value token) {
+ switch (token) {
+ case Token::INIT:
+ case Token::ASSIGN:
+#define T(name, string, precedence) case Token::name:
+ BINARY_OP_TOKEN_LIST(T, EXPAND_BINOP_ASSIGN_TOKEN)
+#undef T
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(AssignmentOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsAssignmentOp(token), Token::IsAssignmentOp(token));
+ }
+}
+
+bool TokenIsBinaryOp(Token::Value token) {
+ switch (token) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+#define T(name, string, precedence) case Token::name:
+ BINARY_OP_TOKEN_LIST(T, EXPAND_BINOP_TOKEN)
+#undef T
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(BinaryOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsBinaryOp(token), Token::IsBinaryOp(token));
+ }
+}
+
+bool TokenIsCompareOp(Token::Value token) {
+ switch (token) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ case Token::NE:
+ case Token::NE_STRICT:
+ case Token::LT:
+ case Token::GT:
+ case Token::LTE:
+ case Token::GTE:
+ case Token::INSTANCEOF:
+ case Token::IN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(CompareOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsCompareOp(token), Token::IsCompareOp(token));
+ }
+}
+
+bool TokenIsOrderedRelationalCompareOp(Token::Value token) {
+ switch (token) {
+ case Token::LT:
+ case Token::GT:
+ case Token::LTE:
+ case Token::GTE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsOrderedRelationalCompareOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsOrderedRelationalCompareOp(token),
+ Token::IsOrderedRelationalCompareOp(token));
+ }
+}
+
+bool TokenIsEqualityOp(Token::Value token) {
+ switch (token) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsEqualityOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsEqualityOp(token), Token::IsEqualityOp(token));
+ }
+}
+
+bool TokenIsBitOp(Token::Value token) {
+ switch (token) {
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::BIT_NOT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsBitOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsBitOp(token), Token::IsBitOp(token));
+ }
+}
+
+bool TokenIsUnaryOp(Token::Value token) {
+ switch (token) {
+ case Token::NOT:
+ case Token::BIT_NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ case Token::VOID:
+ case Token::ADD:
+ case Token::SUB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsUnaryOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsUnaryOp(token), Token::IsUnaryOp(token));
+ }
+}
+
+bool TokenIsCountOp(Token::Value token) {
+ switch (token) {
+ case Token::INC:
+ case Token::DEC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsCountOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsCountOp(token), Token::IsCountOp(token));
+ }
+}
+
+bool TokenIsShiftOp(Token::Value token) {
+ switch (token) {
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsShiftOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsShiftOp(token), Token::IsShiftOp(token));
+ }
+}
+
+bool TokenIsTrivialExpressionToken(Token::Value token) {
+ switch (token) {
+ case Token::SMI:
+ case Token::NUMBER:
+ case Token::BIGINT:
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::STRING:
+ case Token::IDENTIFIER:
+ case Token::THIS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsTrivialExpressionToken) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsTrivialExpressionToken(token),
+ Token::IsTrivialExpressionToken(token));
+ }
+}
+
TEST(ScanKeywords) {
struct KeywordToken {
const char* keyword;
@@ -253,8 +573,8 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
ScriptResource(const char* data, size_t length)
: data_(data), length_(length) { }
- const char* data() const { return data_; }
- size_t length() const { return length_; }
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
private:
const char* data_;
@@ -1230,8 +1550,10 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"However, the preparser succeeded",
source->ToCString().get(), message_string->ToCString().get());
}
- // Check that preparser and parser produce the same error.
- if (test_preparser && !ignore_error_msg) {
+ // Check that preparser and parser produce the same error, except for cases
+ // where we do not track errors in the preparser.
+ if (test_preparser && !ignore_error_msg &&
+ !pending_error_handler.ErrorUnidentifiableByPreParser()) {
i::Handle<i::String> preparser_message =
pending_error_handler.FormatErrorMessageForTest(CcTest::i_isolate());
if (!i::String::Equals(isolate, message_string, preparser_message)) {
@@ -1721,7 +2043,7 @@ TEST(ErrorsFutureStrictReservedWords) {
{"() => {", "}"},
{nullptr, nullptr}};
const char* invalid_statements[] = {
- FUTURE_STRICT_RESERVED_LEX_BINDINGS("let") nullptr};
+ FUTURE_STRICT_RESERVED_LEX_BINDINGS(let) nullptr};
RunParserSyncTest(non_strict_contexts, invalid_statements, kError);
}
@@ -2280,7 +2602,7 @@ TEST(OptionalCatchBinding) {
{"try {", "} catch ({e}) { }"},
{"try {} catch ({e}) {", "}"},
{"function f() {", "}"},
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -2288,7 +2610,7 @@ TEST(OptionalCatchBinding) {
"try { } catch { } finally { }",
"try { let e; } catch { let e; }",
"try { let e; } catch { let e; } finally { let e; }",
- NULL
+ nullptr
};
// clang-format on
@@ -2301,7 +2623,7 @@ TEST(OptionalCatchBindingInDoExpression) {
// clang-format off
const char* context_data[][2] = {
{"((x = (eval(''), do {", "}))=>{})()"},
- { NULL, NULL }
+ { nullptr, nullptr }
};
const char* statement_data[] = {
@@ -2309,12 +2631,12 @@ TEST(OptionalCatchBindingInDoExpression) {
"try { } catch { } finally { }",
"try { let e; } catch { let e; }",
"try { let e; } catch { let e; } finally { let e; }",
- NULL
+ nullptr
};
// clang-format on
static const ParserFlag do_and_catch_flags[] = {kAllowHarmonyDoExpressions};
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0,
do_and_catch_flags, arraysize(do_and_catch_flags));
}
@@ -3240,272 +3562,276 @@ TEST(MaybeAssignedInsideLoop) {
std::vector<unsigned> top; // Can't use {} in initializers below.
Input module_and_script_tests[] = {
- {1, "for (j=x; j<10; ++j) { foo = j }", top},
- {1, "for (j=x; j<10; ++j) { [foo] = [j] }", top},
- {1, "for (j=x; j<10; ++j) { var foo = j }", top},
- {1, "for (j=x; j<10; ++j) { var [foo] = [j] }", top},
- {0, "for (j=x; j<10; ++j) { let foo = j }", {0}},
- {0, "for (j=x; j<10; ++j) { let [foo] = [j] }", {0}},
- {0, "for (j=x; j<10; ++j) { const foo = j }", {0}},
- {0, "for (j=x; j<10; ++j) { const [foo] = [j] }", {0}},
- {0, "for (j=x; j<10; ++j) { function foo() {return j} }", {0}},
-
- {1, "for ({j}=x; j<10; ++j) { foo = j }", top},
- {1, "for ({j}=x; j<10; ++j) { [foo] = [j] }", top},
- {1, "for ({j}=x; j<10; ++j) { var foo = j }", top},
- {1, "for ({j}=x; j<10; ++j) { var [foo] = [j] }", top},
- {0, "for ({j}=x; j<10; ++j) { let foo = j }", {0}},
- {0, "for ({j}=x; j<10; ++j) { let [foo] = [j] }", {0}},
- {0, "for ({j}=x; j<10; ++j) { const foo = j }", {0}},
- {0, "for ({j}=x; j<10; ++j) { const [foo] = [j] }", {0}},
- {0, "for ({j}=x; j<10; ++j) { function foo() {return j} }", {0}},
-
- {1, "for (var j=x; j<10; ++j) { foo = j }", top},
- {1, "for (var j=x; j<10; ++j) { [foo] = [j] }", top},
- {1, "for (var j=x; j<10; ++j) { var foo = j }", top},
- {1, "for (var j=x; j<10; ++j) { var [foo] = [j] }", top},
- {0, "for (var j=x; j<10; ++j) { let foo = j }", {0}},
- {0, "for (var j=x; j<10; ++j) { let [foo] = [j] }", {0}},
- {0, "for (var j=x; j<10; ++j) { const foo = j }", {0}},
- {0, "for (var j=x; j<10; ++j) { const [foo] = [j] }", {0}},
- {0, "for (var j=x; j<10; ++j) { function foo() {return j} }", {0}},
-
- {1, "for (var {j}=x; j<10; ++j) { foo = j }", top},
- {1, "for (var {j}=x; j<10; ++j) { [foo] = [j] }", top},
- {1, "for (var {j}=x; j<10; ++j) { var foo = j }", top},
- {1, "for (var {j}=x; j<10; ++j) { var [foo] = [j] }", top},
- {0, "for (var {j}=x; j<10; ++j) { let foo = j }", {0}},
- {0, "for (var {j}=x; j<10; ++j) { let [foo] = [j] }", {0}},
- {0, "for (var {j}=x; j<10; ++j) { const foo = j }", {0}},
- {0, "for (var {j}=x; j<10; ++j) { const [foo] = [j] }", {0}},
- {0, "for (var {j}=x; j<10; ++j) { function foo() {return j} }", {0}},
-
- {1, "for (let j=x; j<10; ++j) { foo = j }", top},
- {1, "for (let j=x; j<10; ++j) { [foo] = [j] }", top},
- {1, "for (let j=x; j<10; ++j) { var foo = j }", top},
- {1, "for (let j=x; j<10; ++j) { var [foo] = [j] }", top},
- {0, "for (let j=x; j<10; ++j) { let foo = j }", {0, 0}},
- {0, "for (let j=x; j<10; ++j) { let [foo] = [j] }", {0, 0, 0}},
- {0, "for (let j=x; j<10; ++j) { const foo = j }", {0, 0}},
- {0, "for (let j=x; j<10; ++j) { const [foo] = [j] }", {0, 0, 0}},
- {0, "for (let j=x; j<10; ++j) { function foo() {return j} }", {0, 0, 0}},
-
- {1, "for (let {j}=x; j<10; ++j) { foo = j }", top},
- {1, "for (let {j}=x; j<10; ++j) { [foo] = [j] }", top},
- {1, "for (let {j}=x; j<10; ++j) { var foo = j }", top},
- {1, "for (let {j}=x; j<10; ++j) { var [foo] = [j] }", top},
- {0, "for (let {j}=x; j<10; ++j) { let foo = j }", {0, 0}},
- {0, "for (let {j}=x; j<10; ++j) { let [foo] = [j] }", {0, 0, 0}},
- {0, "for (let {j}=x; j<10; ++j) { const foo = j }", {0, 0}},
- {0, "for (let {j}=x; j<10; ++j) { const [foo] = [j] }", {0, 0, 0}},
- {0, "for (let {j}=x; j<10; ++j) { function foo(){return j} }", {0, 0, 0}},
-
- {1, "for (j of x) { foo = j }", top},
- {1, "for (j of x) { [foo] = [j] }", top},
- {1, "for (j of x) { var foo = j }", top},
- {1, "for (j of x) { var [foo] = [j] }", top},
- {0, "for (j of x) { let foo = j }", {1}},
- {0, "for (j of x) { let [foo] = [j] }", {1}},
- {0, "for (j of x) { const foo = j }", {1}},
- {0, "for (j of x) { const [foo] = [j] }", {1}},
- {0, "for (j of x) { function foo() {return j} }", {1}},
-
- {1, "for ({j} of x) { foo = j }", top},
- {1, "for ({j} of x) { [foo] = [j] }", top},
- {1, "for ({j} of x) { var foo = j }", top},
- {1, "for ({j} of x) { var [foo] = [j] }", top},
- {0, "for ({j} of x) { let foo = j }", {1}},
- {0, "for ({j} of x) { let [foo] = [j] }", {1}},
- {0, "for ({j} of x) { const foo = j }", {1}},
- {0, "for ({j} of x) { const [foo] = [j] }", {1}},
- {0, "for ({j} of x) { function foo() {return j} }", {1}},
-
- {1, "for (var j of x) { foo = j }", top},
- {1, "for (var j of x) { [foo] = [j] }", top},
- {1, "for (var j of x) { var foo = j }", top},
- {1, "for (var j of x) { var [foo] = [j] }", top},
- {0, "for (var j of x) { let foo = j }", {1}},
- {0, "for (var j of x) { let [foo] = [j] }", {1}},
- {0, "for (var j of x) { const foo = j }", {1}},
- {0, "for (var j of x) { const [foo] = [j] }", {1}},
- {0, "for (var j of x) { function foo() {return j} }", {1}},
-
- {1, "for (var {j} of x) { foo = j }", top},
- {1, "for (var {j} of x) { [foo] = [j] }", top},
- {1, "for (var {j} of x) { var foo = j }", top},
- {1, "for (var {j} of x) { var [foo] = [j] }", top},
- {0, "for (var {j} of x) { let foo = j }", {1}},
- {0, "for (var {j} of x) { let [foo] = [j] }", {1}},
- {0, "for (var {j} of x) { const foo = j }", {1}},
- {0, "for (var {j} of x) { const [foo] = [j] }", {1}},
- {0, "for (var {j} of x) { function foo() {return j} }", {1}},
-
- {1, "for (let j of x) { foo = j }", top},
- {1, "for (let j of x) { [foo] = [j] }", top},
- {1, "for (let j of x) { var foo = j }", top},
- {1, "for (let j of x) { var [foo] = [j] }", top},
- {0, "for (let j of x) { let foo = j }", {0, 1, 0}},
- {0, "for (let j of x) { let [foo] = [j] }", {0, 1, 0}},
- {0, "for (let j of x) { const foo = j }", {0, 1, 0}},
- {0, "for (let j of x) { const [foo] = [j] }", {0, 1, 0}},
- {0, "for (let j of x) { function foo() {return j} }", {0, 1, 0}},
-
- {1, "for (let {j} of x) { foo = j }", top},
- {1, "for (let {j} of x) { [foo] = [j] }", top},
- {1, "for (let {j} of x) { var foo = j }", top},
- {1, "for (let {j} of x) { var [foo] = [j] }", top},
- {0, "for (let {j} of x) { let foo = j }", {0, 1, 0}},
- {0, "for (let {j} of x) { let [foo] = [j] }", {0, 1, 0}},
- {0, "for (let {j} of x) { const foo = j }", {0, 1, 0}},
- {0, "for (let {j} of x) { const [foo] = [j] }", {0, 1, 0}},
- {0, "for (let {j} of x) { function foo() {return j} }", {0, 1, 0}},
-
- {1, "for (const j of x) { foo = j }", top},
- {1, "for (const j of x) { [foo] = [j] }", top},
- {1, "for (const j of x) { var foo = j }", top},
- {1, "for (const j of x) { var [foo] = [j] }", top},
- {0, "for (const j of x) { let foo = j }", {0, 1, 0}},
- {0, "for (const j of x) { let [foo] = [j] }", {0, 1, 0}},
- {0, "for (const j of x) { const foo = j }", {0, 1, 0}},
- {0, "for (const j of x) { const [foo] = [j] }", {0, 1, 0}},
- {0, "for (const j of x) { function foo() {return j} }", {0, 1, 0}},
-
- {1, "for (const {j} of x) { foo = j }", top},
- {1, "for (const {j} of x) { [foo] = [j] }", top},
- {1, "for (const {j} of x) { var foo = j }", top},
- {1, "for (const {j} of x) { var [foo] = [j] }", top},
- {0, "for (const {j} of x) { let foo = j }", {0, 1, 0}},
- {0, "for (const {j} of x) { let [foo] = [j] }", {0, 1, 0}},
- {0, "for (const {j} of x) { const foo = j }", {0, 1, 0}},
- {0, "for (const {j} of x) { const [foo] = [j] }", {0, 1, 0}},
- {0, "for (const {j} of x) { function foo() {return j} }", {0, 1, 0}},
-
- {1, "for (j in x) { foo = j }", top},
- {1, "for (j in x) { [foo] = [j] }", top},
- {1, "for (j in x) { var foo = j }", top},
- {1, "for (j in x) { var [foo] = [j] }", top},
- {0, "for (j in x) { let foo = j }", {0}},
- {0, "for (j in x) { let [foo] = [j] }", {0}},
- {0, "for (j in x) { const foo = j }", {0}},
- {0, "for (j in x) { const [foo] = [j] }", {0}},
- {0, "for (j in x) { function foo() {return j} }", {0}},
-
- {1, "for ({j} in x) { foo = j }", top},
- {1, "for ({j} in x) { [foo] = [j] }", top},
- {1, "for ({j} in x) { var foo = j }", top},
- {1, "for ({j} in x) { var [foo] = [j] }", top},
- {0, "for ({j} in x) { let foo = j }", {0}},
- {0, "for ({j} in x) { let [foo] = [j] }", {0}},
- {0, "for ({j} in x) { const foo = j }", {0}},
- {0, "for ({j} in x) { const [foo] = [j] }", {0}},
- {0, "for ({j} in x) { function foo() {return j} }", {0}},
-
- {1, "for (var j in x) { foo = j }", top},
- {1, "for (var j in x) { [foo] = [j] }", top},
- {1, "for (var j in x) { var foo = j }", top},
- {1, "for (var j in x) { var [foo] = [j] }", top},
- {0, "for (var j in x) { let foo = j }", {0}},
- {0, "for (var j in x) { let [foo] = [j] }", {0}},
- {0, "for (var j in x) { const foo = j }", {0}},
- {0, "for (var j in x) { const [foo] = [j] }", {0}},
- {0, "for (var j in x) { function foo() {return j} }", {0}},
-
- {1, "for (var {j} in x) { foo = j }", top},
- {1, "for (var {j} in x) { [foo] = [j] }", top},
- {1, "for (var {j} in x) { var foo = j }", top},
- {1, "for (var {j} in x) { var [foo] = [j] }", top},
- {0, "for (var {j} in x) { let foo = j }", {0}},
- {0, "for (var {j} in x) { let [foo] = [j] }", {0}},
- {0, "for (var {j} in x) { const foo = j }", {0}},
- {0, "for (var {j} in x) { const [foo] = [j] }", {0}},
- {0, "for (var {j} in x) { function foo() {return j} }", {0}},
-
- {1, "for (let j in x) { foo = j }", top},
- {1, "for (let j in x) { [foo] = [j] }", top},
- {1, "for (let j in x) { var foo = j }", top},
- {1, "for (let j in x) { var [foo] = [j] }", top},
- {0, "for (let j in x) { let foo = j }", {0, 0, 0}},
- {0, "for (let j in x) { let [foo] = [j] }", {0, 0, 0}},
- {0, "for (let j in x) { const foo = j }", {0, 0, 0}},
- {0, "for (let j in x) { const [foo] = [j] }", {0, 0, 0}},
- {0, "for (let j in x) { function foo() {return j} }", {0, 0, 0}},
-
- {1, "for (let {j} in x) { foo = j }", top},
- {1, "for (let {j} in x) { [foo] = [j] }", top},
- {1, "for (let {j} in x) { var foo = j }", top},
- {1, "for (let {j} in x) { var [foo] = [j] }", top},
- {0, "for (let {j} in x) { let foo = j }", {0, 0, 0}},
- {0, "for (let {j} in x) { let [foo] = [j] }", {0, 0, 0}},
- {0, "for (let {j} in x) { const foo = j }", {0, 0, 0}},
- {0, "for (let {j} in x) { const [foo] = [j] }", {0, 0, 0}},
- {0, "for (let {j} in x) { function foo() {return j} }", {0, 0, 0}},
-
- {1, "for (const j in x) { foo = j }", top},
- {1, "for (const j in x) { [foo] = [j] }", top},
- {1, "for (const j in x) { var foo = j }", top},
- {1, "for (const j in x) { var [foo] = [j] }", top},
- {0, "for (const j in x) { let foo = j }", {0, 0, 0}},
- {0, "for (const j in x) { let [foo] = [j] }", {0, 0, 0}},
- {0, "for (const j in x) { const foo = j }", {0, 0, 0}},
- {0, "for (const j in x) { const [foo] = [j] }", {0, 0, 0}},
- {0, "for (const j in x) { function foo() {return j} }", {0, 0, 0}},
-
- {1, "for (const {j} in x) { foo = j }", top},
- {1, "for (const {j} in x) { [foo] = [j] }", top},
- {1, "for (const {j} in x) { var foo = j }", top},
- {1, "for (const {j} in x) { var [foo] = [j] }", top},
- {0, "for (const {j} in x) { let foo = j }", {0, 0, 0}},
- {0, "for (const {j} in x) { let [foo] = [j] }", {0, 0, 0}},
- {0, "for (const {j} in x) { const foo = j }", {0, 0, 0}},
- {0, "for (const {j} in x) { const [foo] = [j] }", {0, 0, 0}},
- {0, "for (const {j} in x) { function foo() {return j} }", {0, 0, 0}},
-
- {1, "while (j) { foo = j }", top},
- {1, "while (j) { [foo] = [j] }", top},
- {1, "while (j) { var foo = j }", top},
- {1, "while (j) { var [foo] = [j] }", top},
- {0, "while (j) { let foo = j }", {0}},
- {0, "while (j) { let [foo] = [j] }", {0}},
- {0, "while (j) { const foo = j }", {0}},
- {0, "while (j) { const [foo] = [j] }", {0}},
- {0, "while (j) { function foo() {return j} }", {0}},
-
- {1, "do { foo = j } while (j)", top},
- {1, "do { [foo] = [j] } while (j)", top},
- {1, "do { var foo = j } while (j)", top},
- {1, "do { var [foo] = [j] } while (j)", top},
- {0, "do { let foo = j } while (j)", {0}},
- {0, "do { let [foo] = [j] } while (j)", {0}},
- {0, "do { const foo = j } while (j)", {0}},
- {0, "do { const [foo] = [j] } while (j)", {0}},
- {0, "do { function foo() {return j} } while (j)", {0}},
+ {true, "for (j=x; j<10; ++j) { foo = j }", top},
+ {true, "for (j=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (j=x; j<10; ++j) { var foo = j }", top},
+ {true, "for (j=x; j<10; ++j) { var [foo] = [j] }", top},
+ {false, "for (j=x; j<10; ++j) { let foo = j }", {0}},
+ {false, "for (j=x; j<10; ++j) { let [foo] = [j] }", {0}},
+ {false, "for (j=x; j<10; ++j) { const foo = j }", {0}},
+ {false, "for (j=x; j<10; ++j) { const [foo] = [j] }", {0}},
+ {false, "for (j=x; j<10; ++j) { function foo() {return j} }", {0}},
+
+ {true, "for ({j}=x; j<10; ++j) { foo = j }", top},
+ {true, "for ({j}=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for ({j}=x; j<10; ++j) { var foo = j }", top},
+ {true, "for ({j}=x; j<10; ++j) { var [foo] = [j] }", top},
+ {false, "for ({j}=x; j<10; ++j) { let foo = j }", {0}},
+ {false, "for ({j}=x; j<10; ++j) { let [foo] = [j] }", {0}},
+ {false, "for ({j}=x; j<10; ++j) { const foo = j }", {0}},
+ {false, "for ({j}=x; j<10; ++j) { const [foo] = [j] }", {0}},
+ {false, "for ({j}=x; j<10; ++j) { function foo() {return j} }", {0}},
+
+ {true, "for (var j=x; j<10; ++j) { foo = j }", top},
+ {true, "for (var j=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (var j=x; j<10; ++j) { var foo = j }", top},
+ {true, "for (var j=x; j<10; ++j) { var [foo] = [j] }", top},
+ {false, "for (var j=x; j<10; ++j) { let foo = j }", {0}},
+ {false, "for (var j=x; j<10; ++j) { let [foo] = [j] }", {0}},
+ {false, "for (var j=x; j<10; ++j) { const foo = j }", {0}},
+ {false, "for (var j=x; j<10; ++j) { const [foo] = [j] }", {0}},
+ {false, "for (var j=x; j<10; ++j) { function foo() {return j} }", {0}},
+
+ {true, "for (var {j}=x; j<10; ++j) { foo = j }", top},
+ {true, "for (var {j}=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (var {j}=x; j<10; ++j) { var foo = j }", top},
+ {true, "for (var {j}=x; j<10; ++j) { var [foo] = [j] }", top},
+ {false, "for (var {j}=x; j<10; ++j) { let foo = j }", {0}},
+ {false, "for (var {j}=x; j<10; ++j) { let [foo] = [j] }", {0}},
+ {false, "for (var {j}=x; j<10; ++j) { const foo = j }", {0}},
+ {false, "for (var {j}=x; j<10; ++j) { const [foo] = [j] }", {0}},
+ {false, "for (var {j}=x; j<10; ++j) { function foo() {return j} }", {0}},
+
+ {true, "for (let j=x; j<10; ++j) { foo = j }", top},
+ {true, "for (let j=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (let j=x; j<10; ++j) { var foo = j }", top},
+ {true, "for (let j=x; j<10; ++j) { var [foo] = [j] }", top},
+ {false, "for (let j=x; j<10; ++j) { let foo = j }", {0, 0}},
+ {false, "for (let j=x; j<10; ++j) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let j=x; j<10; ++j) { const foo = j }", {0, 0}},
+ {false, "for (let j=x; j<10; ++j) { const [foo] = [j] }", {0, 0, 0}},
+ {false,
+ "for (let j=x; j<10; ++j) { function foo() {return j} }",
+ {0, 0, 0}},
+
+ {true, "for (let {j}=x; j<10; ++j) { foo = j }", top},
+ {true, "for (let {j}=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (let {j}=x; j<10; ++j) { var foo = j }", top},
+ {true, "for (let {j}=x; j<10; ++j) { var [foo] = [j] }", top},
+ {false, "for (let {j}=x; j<10; ++j) { let foo = j }", {0, 0}},
+ {false, "for (let {j}=x; j<10; ++j) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let {j}=x; j<10; ++j) { const foo = j }", {0, 0}},
+ {false, "for (let {j}=x; j<10; ++j) { const [foo] = [j] }", {0, 0, 0}},
+ {false,
+ "for (let {j}=x; j<10; ++j) { function foo(){return j} }",
+ {0, 0, 0}},
+
+ {true, "for (j of x) { foo = j }", top},
+ {true, "for (j of x) { [foo] = [j] }", top},
+ {true, "for (j of x) { var foo = j }", top},
+ {true, "for (j of x) { var [foo] = [j] }", top},
+ {false, "for (j of x) { let foo = j }", {1}},
+ {false, "for (j of x) { let [foo] = [j] }", {1}},
+ {false, "for (j of x) { const foo = j }", {1}},
+ {false, "for (j of x) { const [foo] = [j] }", {1}},
+ {false, "for (j of x) { function foo() {return j} }", {1}},
+
+ {true, "for ({j} of x) { foo = j }", top},
+ {true, "for ({j} of x) { [foo] = [j] }", top},
+ {true, "for ({j} of x) { var foo = j }", top},
+ {true, "for ({j} of x) { var [foo] = [j] }", top},
+ {false, "for ({j} of x) { let foo = j }", {1}},
+ {false, "for ({j} of x) { let [foo] = [j] }", {1}},
+ {false, "for ({j} of x) { const foo = j }", {1}},
+ {false, "for ({j} of x) { const [foo] = [j] }", {1}},
+ {false, "for ({j} of x) { function foo() {return j} }", {1}},
+
+ {true, "for (var j of x) { foo = j }", top},
+ {true, "for (var j of x) { [foo] = [j] }", top},
+ {true, "for (var j of x) { var foo = j }", top},
+ {true, "for (var j of x) { var [foo] = [j] }", top},
+ {false, "for (var j of x) { let foo = j }", {1}},
+ {false, "for (var j of x) { let [foo] = [j] }", {1}},
+ {false, "for (var j of x) { const foo = j }", {1}},
+ {false, "for (var j of x) { const [foo] = [j] }", {1}},
+ {false, "for (var j of x) { function foo() {return j} }", {1}},
+
+ {true, "for (var {j} of x) { foo = j }", top},
+ {true, "for (var {j} of x) { [foo] = [j] }", top},
+ {true, "for (var {j} of x) { var foo = j }", top},
+ {true, "for (var {j} of x) { var [foo] = [j] }", top},
+ {false, "for (var {j} of x) { let foo = j }", {1}},
+ {false, "for (var {j} of x) { let [foo] = [j] }", {1}},
+ {false, "for (var {j} of x) { const foo = j }", {1}},
+ {false, "for (var {j} of x) { const [foo] = [j] }", {1}},
+ {false, "for (var {j} of x) { function foo() {return j} }", {1}},
+
+ {true, "for (let j of x) { foo = j }", top},
+ {true, "for (let j of x) { [foo] = [j] }", top},
+ {true, "for (let j of x) { var foo = j }", top},
+ {true, "for (let j of x) { var [foo] = [j] }", top},
+ {false, "for (let j of x) { let foo = j }", {0, 1, 0}},
+ {false, "for (let j of x) { let [foo] = [j] }", {0, 1, 0}},
+ {false, "for (let j of x) { const foo = j }", {0, 1, 0}},
+ {false, "for (let j of x) { const [foo] = [j] }", {0, 1, 0}},
+ {false, "for (let j of x) { function foo() {return j} }", {0, 1, 0}},
+
+ {true, "for (let {j} of x) { foo = j }", top},
+ {true, "for (let {j} of x) { [foo] = [j] }", top},
+ {true, "for (let {j} of x) { var foo = j }", top},
+ {true, "for (let {j} of x) { var [foo] = [j] }", top},
+ {false, "for (let {j} of x) { let foo = j }", {0, 1, 0}},
+ {false, "for (let {j} of x) { let [foo] = [j] }", {0, 1, 0}},
+ {false, "for (let {j} of x) { const foo = j }", {0, 1, 0}},
+ {false, "for (let {j} of x) { const [foo] = [j] }", {0, 1, 0}},
+ {false, "for (let {j} of x) { function foo() {return j} }", {0, 1, 0}},
+
+ {true, "for (const j of x) { foo = j }", top},
+ {true, "for (const j of x) { [foo] = [j] }", top},
+ {true, "for (const j of x) { var foo = j }", top},
+ {true, "for (const j of x) { var [foo] = [j] }", top},
+ {false, "for (const j of x) { let foo = j }", {0, 1, 0}},
+ {false, "for (const j of x) { let [foo] = [j] }", {0, 1, 0}},
+ {false, "for (const j of x) { const foo = j }", {0, 1, 0}},
+ {false, "for (const j of x) { const [foo] = [j] }", {0, 1, 0}},
+ {false, "for (const j of x) { function foo() {return j} }", {0, 1, 0}},
+
+ {true, "for (const {j} of x) { foo = j }", top},
+ {true, "for (const {j} of x) { [foo] = [j] }", top},
+ {true, "for (const {j} of x) { var foo = j }", top},
+ {true, "for (const {j} of x) { var [foo] = [j] }", top},
+ {false, "for (const {j} of x) { let foo = j }", {0, 1, 0}},
+ {false, "for (const {j} of x) { let [foo] = [j] }", {0, 1, 0}},
+ {false, "for (const {j} of x) { const foo = j }", {0, 1, 0}},
+ {false, "for (const {j} of x) { const [foo] = [j] }", {0, 1, 0}},
+ {false, "for (const {j} of x) { function foo() {return j} }", {0, 1, 0}},
+
+ {true, "for (j in x) { foo = j }", top},
+ {true, "for (j in x) { [foo] = [j] }", top},
+ {true, "for (j in x) { var foo = j }", top},
+ {true, "for (j in x) { var [foo] = [j] }", top},
+ {false, "for (j in x) { let foo = j }", {0}},
+ {false, "for (j in x) { let [foo] = [j] }", {0}},
+ {false, "for (j in x) { const foo = j }", {0}},
+ {false, "for (j in x) { const [foo] = [j] }", {0}},
+ {false, "for (j in x) { function foo() {return j} }", {0}},
+
+ {true, "for ({j} in x) { foo = j }", top},
+ {true, "for ({j} in x) { [foo] = [j] }", top},
+ {true, "for ({j} in x) { var foo = j }", top},
+ {true, "for ({j} in x) { var [foo] = [j] }", top},
+ {false, "for ({j} in x) { let foo = j }", {0}},
+ {false, "for ({j} in x) { let [foo] = [j] }", {0}},
+ {false, "for ({j} in x) { const foo = j }", {0}},
+ {false, "for ({j} in x) { const [foo] = [j] }", {0}},
+ {false, "for ({j} in x) { function foo() {return j} }", {0}},
+
+ {true, "for (var j in x) { foo = j }", top},
+ {true, "for (var j in x) { [foo] = [j] }", top},
+ {true, "for (var j in x) { var foo = j }", top},
+ {true, "for (var j in x) { var [foo] = [j] }", top},
+ {false, "for (var j in x) { let foo = j }", {0}},
+ {false, "for (var j in x) { let [foo] = [j] }", {0}},
+ {false, "for (var j in x) { const foo = j }", {0}},
+ {false, "for (var j in x) { const [foo] = [j] }", {0}},
+ {false, "for (var j in x) { function foo() {return j} }", {0}},
+
+ {true, "for (var {j} in x) { foo = j }", top},
+ {true, "for (var {j} in x) { [foo] = [j] }", top},
+ {true, "for (var {j} in x) { var foo = j }", top},
+ {true, "for (var {j} in x) { var [foo] = [j] }", top},
+ {false, "for (var {j} in x) { let foo = j }", {0}},
+ {false, "for (var {j} in x) { let [foo] = [j] }", {0}},
+ {false, "for (var {j} in x) { const foo = j }", {0}},
+ {false, "for (var {j} in x) { const [foo] = [j] }", {0}},
+ {false, "for (var {j} in x) { function foo() {return j} }", {0}},
+
+ {true, "for (let j in x) { foo = j }", top},
+ {true, "for (let j in x) { [foo] = [j] }", top},
+ {true, "for (let j in x) { var foo = j }", top},
+ {true, "for (let j in x) { var [foo] = [j] }", top},
+ {false, "for (let j in x) { let foo = j }", {0, 0, 0}},
+ {false, "for (let j in x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let j in x) { const foo = j }", {0, 0, 0}},
+ {false, "for (let j in x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let j in x) { function foo() {return j} }", {0, 0, 0}},
+
+ {true, "for (let {j} in x) { foo = j }", top},
+ {true, "for (let {j} in x) { [foo] = [j] }", top},
+ {true, "for (let {j} in x) { var foo = j }", top},
+ {true, "for (let {j} in x) { var [foo] = [j] }", top},
+ {false, "for (let {j} in x) { let foo = j }", {0, 0, 0}},
+ {false, "for (let {j} in x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let {j} in x) { const foo = j }", {0, 0, 0}},
+ {false, "for (let {j} in x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let {j} in x) { function foo() {return j} }", {0, 0, 0}},
+
+ {true, "for (const j in x) { foo = j }", top},
+ {true, "for (const j in x) { [foo] = [j] }", top},
+ {true, "for (const j in x) { var foo = j }", top},
+ {true, "for (const j in x) { var [foo] = [j] }", top},
+ {false, "for (const j in x) { let foo = j }", {0, 0, 0}},
+ {false, "for (const j in x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const j in x) { const foo = j }", {0, 0, 0}},
+ {false, "for (const j in x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const j in x) { function foo() {return j} }", {0, 0, 0}},
+
+ {true, "for (const {j} in x) { foo = j }", top},
+ {true, "for (const {j} in x) { [foo] = [j] }", top},
+ {true, "for (const {j} in x) { var foo = j }", top},
+ {true, "for (const {j} in x) { var [foo] = [j] }", top},
+ {false, "for (const {j} in x) { let foo = j }", {0, 0, 0}},
+ {false, "for (const {j} in x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const {j} in x) { const foo = j }", {0, 0, 0}},
+ {false, "for (const {j} in x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const {j} in x) { function foo() {return j} }", {0, 0, 0}},
+
+ {true, "while (j) { foo = j }", top},
+ {true, "while (j) { [foo] = [j] }", top},
+ {true, "while (j) { var foo = j }", top},
+ {true, "while (j) { var [foo] = [j] }", top},
+ {false, "while (j) { let foo = j }", {0}},
+ {false, "while (j) { let [foo] = [j] }", {0}},
+ {false, "while (j) { const foo = j }", {0}},
+ {false, "while (j) { const [foo] = [j] }", {0}},
+ {false, "while (j) { function foo() {return j} }", {0}},
+
+ {true, "do { foo = j } while (j)", top},
+ {true, "do { [foo] = [j] } while (j)", top},
+ {true, "do { var foo = j } while (j)", top},
+ {true, "do { var [foo] = [j] } while (j)", top},
+ {false, "do { let foo = j } while (j)", {0}},
+ {false, "do { let [foo] = [j] } while (j)", {0}},
+ {false, "do { const foo = j } while (j)", {0}},
+ {false, "do { const [foo] = [j] } while (j)", {0}},
+ {false, "do { function foo() {return j} } while (j)", {0}},
};
Input script_only_tests[] = {
- {1, "for (j=x; j<10; ++j) { function foo() {return j} }", top},
- {1, "for ({j}=x; j<10; ++j) { function foo() {return j} }", top},
- {1, "for (var j=x; j<10; ++j) { function foo() {return j} }", top},
- {1, "for (var {j}=x; j<10; ++j) { function foo() {return j} }", top},
- {1, "for (let j=x; j<10; ++j) { function foo() {return j} }", top},
- {1, "for (let {j}=x; j<10; ++j) { function foo() {return j} }", top},
- {1, "for (j of x) { function foo() {return j} }", top},
- {1, "for ({j} of x) { function foo() {return j} }", top},
- {1, "for (var j of x) { function foo() {return j} }", top},
- {1, "for (var {j} of x) { function foo() {return j} }", top},
- {1, "for (let j of x) { function foo() {return j} }", top},
- {1, "for (let {j} of x) { function foo() {return j} }", top},
- {1, "for (const j of x) { function foo() {return j} }", top},
- {1, "for (const {j} of x) { function foo() {return j} }", top},
- {1, "for (j in x) { function foo() {return j} }", top},
- {1, "for ({j} in x) { function foo() {return j} }", top},
- {1, "for (var j in x) { function foo() {return j} }", top},
- {1, "for (var {j} in x) { function foo() {return j} }", top},
- {1, "for (let j in x) { function foo() {return j} }", top},
- {1, "for (let {j} in x) { function foo() {return j} }", top},
- {1, "for (const j in x) { function foo() {return j} }", top},
- {1, "for (const {j} in x) { function foo() {return j} }", top},
- {1, "while (j) { function foo() {return j} }", top},
- {1, "do { function foo() {return j} } while (j)", top},
+ {true, "for (j=x; j<10; ++j) { function foo() {return j} }", top},
+ {true, "for ({j}=x; j<10; ++j) { function foo() {return j} }", top},
+ {true, "for (var j=x; j<10; ++j) { function foo() {return j} }", top},
+ {true, "for (var {j}=x; j<10; ++j) { function foo() {return j} }", top},
+ {true, "for (let j=x; j<10; ++j) { function foo() {return j} }", top},
+ {true, "for (let {j}=x; j<10; ++j) { function foo() {return j} }", top},
+ {true, "for (j of x) { function foo() {return j} }", top},
+ {true, "for ({j} of x) { function foo() {return j} }", top},
+ {true, "for (var j of x) { function foo() {return j} }", top},
+ {true, "for (var {j} of x) { function foo() {return j} }", top},
+ {true, "for (let j of x) { function foo() {return j} }", top},
+ {true, "for (let {j} of x) { function foo() {return j} }", top},
+ {true, "for (const j of x) { function foo() {return j} }", top},
+ {true, "for (const {j} of x) { function foo() {return j} }", top},
+ {true, "for (j in x) { function foo() {return j} }", top},
+ {true, "for ({j} in x) { function foo() {return j} }", top},
+ {true, "for (var j in x) { function foo() {return j} }", top},
+ {true, "for (var {j} in x) { function foo() {return j} }", top},
+ {true, "for (let j in x) { function foo() {return j} }", top},
+ {true, "for (let {j} in x) { function foo() {return j} }", top},
+ {true, "for (const j in x) { function foo() {return j} }", top},
+ {true, "for (const {j} in x) { function foo() {return j} }", top},
+ {true, "while (j) { function foo() {return j} }", top},
+ {true, "do { function foo() {return j} } while (j)", top},
};
for (unsigned i = 0; i < arraysize(module_and_script_tests); ++i) {
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index e7ad2d84f1..e4ab19f05f 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -29,7 +29,7 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
#elif V8_HOST_ARCH_MIPS64
__asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
#elif defined(__s390x__) || defined(_ARCH_S390X)
- __asm__ __volatile__("stg 15, %0" : "=m"(sp_addr));
+ __asm__ __volatile__("stg %%r15, %0" : "=m"(sp_addr));
#elif defined(__s390__) || defined(_ARCH_S390)
__asm__ __volatile__("st 15, %0" : "=m"(sp_addr));
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm.cc b/deps/v8/test/cctest/test-poison-disasm-arm.cc
new file mode 100644
index 0000000000..7a3238eea1
--- /dev/null
+++ b/deps/v8/test/cctest/test-poison-disasm-arm.cc
@@ -0,0 +1,123 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The C++ style guide recommends using <re2> instead of <regex>. However, the
+// former isn't available in V8.
+#include <regex> // NOLINT(build/c++11)
+
+#include "src/api-inl.h"
+#include "src/disassembler.h"
+#include "src/objects-inl.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+std::string DisassembleFunction(const char* function) {
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CcTest::global()->Get(context, v8_str(function)).ToLocalChecked())));
+
+ Address begin = f->code()->raw_instruction_start();
+ Address end = f->code()->raw_instruction_end();
+ Isolate* isolate = CcTest::i_isolate();
+ std::ostringstream os;
+ Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
+ reinterpret_cast<byte*>(end),
+ CodeReference(handle(f->code(), isolate)));
+ return os.str();
+}
+
+TEST(DisasmPoisonMonomorphicLoad) {
+#ifdef ENABLE_DISASSEMBLER
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_untrusted_code_mitigations = true;
+
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function mono(o) { return o.x; };"
+ "mono({ x : 1 });"
+ "mono({ x : 1 });"
+ "%OptimizeFunctionOnNextCall(mono);"
+ "mono({ x : 1 });");
+
+ std::string start("0x[0-9a-f]+ +[0-9a-f]+ +[0-9a-f]+ +");
+ std::regex map_load_re(start + "ldr r([0-9]+), \\[r([0-9]+), #-1\\]");
+ std::regex load_const_re(start + "ldr r([0-9]+), \\[pc, .*");
+ std::regex cmp_re(start + "cmp r([0-9]+), r([0-9]+)");
+ std::regex bne_re(start + "bne(.*)");
+ std::regex eorne_re(start + "eorne r([0-9]+), r([0-9]+), r([0-9]+)");
+ std::regex csdb_re(start + "csdb");
+ std::regex load_field_re(start +
+ "ldr r([0-9]+), \\[r([0-9]+), #\\+[0-9]+\\]");
+ std::regex mask_re(start + "and r([0-9]+), r([0-9]+), r([0-9]+)");
+
+ std::string poison_reg = "9";
+
+ std::smatch match;
+ std::string line;
+ std::istringstream reader(DisassembleFunction("mono"));
+ bool poisoning_sequence_found = false;
+ while (std::getline(reader, line)) {
+ if (std::regex_match(line, match, map_load_re)) {
+ std::string map_reg = match[1];
+ std::string object_reg = match[2];
+ // Matches that the property access sequence is instrumented with
+ // poisoning. We match the following sequence:
+ //
+ // ldr r1, [r0, #-1] ; load map
+ // ldr r2, [pc, #+104] ; load expected map constant
+ // cmp r1, r2 ; compare maps
+ // bne ... ; deopt if different
+ // eorne r9, r9, r9 ; update the poison
+ // csdb ; speculation barrier
+ // ldr r0, [r0, #+11] ; load the field
+ // and r0, r0, r9 ; apply the poison
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, load_const_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, cmp_re));
+ CHECK_EQ(match[1], map_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, bne_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, eorne_re));
+ CHECK_EQ(match[1], poison_reg);
+ CHECK_EQ(match[2], poison_reg);
+ CHECK_EQ(match[3], poison_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, csdb_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, load_field_re));
+ CHECK_EQ(match[2], object_reg);
+ std::string field_reg = match[1];
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, mask_re));
+ CHECK_EQ(match[1], field_reg);
+ CHECK_EQ(match[2], field_reg);
+ CHECK_EQ(match[3], poison_reg);
+
+ poisoning_sequence_found = true;
+ break;
+ }
+ }
+
+ CHECK(poisoning_sequence_found);
+#endif // ENABLE_DISASSEMBLER
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index c65714a930..301aaf2968 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -1968,7 +1968,7 @@ class UncachedExternalString
public:
const char* data() const override { return "abcdefghijklmnopqrstuvwxyz"; }
size_t length() const override { return 26; }
- bool IsCompressible() const override { return true; }
+ bool IsCacheable() const override { return false; }
};
TEST(UncachedExternalString) {
@@ -1978,9 +1978,9 @@ TEST(UncachedExternalString) {
v8::Local<v8::String> external =
v8::String::NewExternalOneByte(isolate, new UncachedExternalString())
.ToLocalChecked();
- CHECK(
- v8::Utils::OpenHandle(*external)->map() ==
- ReadOnlyRoots(CcTest::i_isolate()).short_external_one_byte_string_map());
+ CHECK(v8::Utils::OpenHandle(*external)->map() ==
+ ReadOnlyRoots(CcTest::i_isolate())
+ .uncached_external_one_byte_string_map());
v8::Local<v8::Object> global = env->Global();
global->Set(env.local(), v8_str("external"), external).FromJust();
CompileRun("var re = /y(.)/; re.test('ab');");
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index f99b9df399..7dcbe998cd 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -18,57 +18,16 @@ AllocationSpace GetSpaceFromObject(Object* object) {
}
} // namespace
-#define CHECK_IN_RO_SPACE(name) \
- HeapObject* name = roots.name(); \
+#define CHECK_IN_RO_SPACE(type, name, CamelName) \
+ HeapObject* name = roots.name(); \
CHECK_EQ(RO_SPACE, GetSpaceFromObject(name));
// The following tests check that all the roots accessible via ReadOnlyRoots are
// in RO_SPACE.
-TEST(TestStrongReadOnlyRoots) {
+TEST(TestReadOnlyRoots) {
ReadOnlyRoots roots(CcTest::i_isolate());
-#define TEST_ROOT(type, name, camel_name) CHECK_IN_RO_SPACE(name)
- STRONG_READ_ONLY_ROOT_LIST(TEST_ROOT)
-#undef TEST_ROOT
-}
-
-TEST(TestInternalizedStrings) {
- ReadOnlyRoots roots(CcTest::i_isolate());
-
-#define TEST_ROOT(name, str) CHECK_IN_RO_SPACE(name)
- INTERNALIZED_STRING_LIST(TEST_ROOT)
-#undef TEST_ROOT
-}
-
-TEST(TestPrivateSymbols) {
- ReadOnlyRoots roots(CcTest::i_isolate());
-
- PRIVATE_SYMBOL_LIST(CHECK_IN_RO_SPACE)
-}
-
-TEST(TestPublicSymbols) {
- ReadOnlyRoots roots(CcTest::i_isolate());
-
-#define TEST_ROOT(name, description) CHECK_IN_RO_SPACE(name)
- PUBLIC_SYMBOL_LIST(TEST_ROOT)
- WELL_KNOWN_SYMBOL_LIST(TEST_ROOT)
-#undef TEST_ROOT
-}
-
-TEST(TestStructMaps) {
- ReadOnlyRoots roots(CcTest::i_isolate());
-
-#define TEST_ROOT(NAME, Name, name) CHECK_IN_RO_SPACE(name##_map)
- STRUCT_LIST(TEST_ROOT)
-#undef TEST_ROOT
-}
-
-TEST(TestAllocationSiteMaps) {
- ReadOnlyRoots roots(CcTest::i_isolate());
-
-#define TEST_ROOT(NAME, Name, Size, name) CHECK_IN_RO_SPACE(name##_map)
- ALLOCATION_SITE_LIST(TEST_ROOT)
-#undef TEST_ROOT
+ READ_ONLY_ROOT_LIST(CHECK_IN_RO_SPACE)
}
#undef CHECK_IN_RO_SPACE
@@ -82,7 +41,6 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
V(detached_contexts) \
V(feedback_vectors_for_profiling_tools) \
V(materialized_objects) \
- V(microtask_queue) \
V(noscript_shared_function_infos) \
V(retained_maps) \
V(retaining_path_targets) \
@@ -101,7 +59,7 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
// The CHECK_EQ line is there just to ensure that the root is publicly
// accessible from Heap, but ultimately the factory is used as it provides
// handles that have the address in the root table.
-#define CHECK_NOT_IN_RO_SPACE(name) \
+#define CHECK_NOT_IN_RO_SPACE(type, name, CamelName) \
Handle<Object> name = factory->name(); \
CHECK_EQ(*name, heap->name()); \
if (name->IsHeapObject() && IsInitiallyMutable(factory, name.address())) \
@@ -115,18 +73,7 @@ TEST(TestHeapRootsNotReadOnly) {
Factory* factory = CcTest::i_isolate()->factory();
Heap* heap = CcTest::i_isolate()->heap();
-#define TEST_ROOT(type, name, camel_name) CHECK_NOT_IN_RO_SPACE(name)
- MUTABLE_ROOT_LIST(TEST_ROOT)
-#undef TEST_ROOT
-}
-
-TEST(TestAccessorInfosNotReadOnly) {
- Factory* factory = CcTest::i_isolate()->factory();
- Heap* heap = CcTest::i_isolate()->heap();
-
-#define TEST_ROOT(name, AccessorName) CHECK_NOT_IN_RO_SPACE(name##_accessor)
- ACCESSOR_INFO_LIST(TEST_ROOT)
-#undef TEST_ROOT
+ MUTABLE_ROOT_LIST(CHECK_NOT_IN_RO_SPACE)
}
#undef CHECK_NOT_IN_RO_SPACE
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index a73c9765df..eec773e0de 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -17,7 +17,7 @@ class Sample {
public:
enum { kFramesLimit = 255 };
- Sample() {}
+ Sample() = default;
typedef const void* const* const_iterator;
const_iterator begin() const { return data_.start(); }
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index d3fd665a66..848678d43f 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -785,6 +785,17 @@ TEST(CustomSnapshotDataBlob1) {
delete[] data1.data; // We can dispose of the snapshot blob now.
}
+TEST(SnapshotChecksum) {
+ DisableAlwaysOpt();
+ const char* source1 = "function f() { return 42; }";
+
+ v8::StartupData data1 = CreateSnapshotDataBlob(source1);
+ CHECK(i::Snapshot::VerifyChecksum(&data1));
+ const_cast<char*>(data1.data)[142] = data1.data[142] ^ 4; // Flip a bit.
+ CHECK(!i::Snapshot::VerifyChecksum(&data1));
+ delete[] data1.data; // We can dispose of the snapshot blob now.
+}
+
struct InternalFieldData {
uint32_t data;
};
@@ -1301,7 +1312,7 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
CHECK(IsCompiled("Math.abs"));
CHECK(!IsCompiled("g"));
CHECK(IsCompiled("String.raw"));
- CHECK(!IsCompiled("Array.prototype.lastIndexOf"));
+ CHECK(IsCompiled("Array.prototype.lastIndexOf"));
CHECK_EQ(5, CompileRun("a")->Int32Value(context).FromJust());
}
isolate->Dispose();
@@ -1821,9 +1832,9 @@ class SerializerOneByteResource
public:
SerializerOneByteResource(const char* data, size_t length)
: data_(data), length_(length), dispose_count_(0) {}
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
- virtual void Dispose() { dispose_count_++; }
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
+ void Dispose() override { dispose_count_++; }
int dispose_count() { return dispose_count_; }
private:
@@ -1837,11 +1848,11 @@ class SerializerTwoByteResource : public v8::String::ExternalStringResource {
public:
SerializerTwoByteResource(const char* data, size_t length)
: data_(AsciiToTwoByteString(data)), length_(length), dispose_count_(0) {}
- ~SerializerTwoByteResource() { DeleteArray<const uint16_t>(data_); }
+ ~SerializerTwoByteResource() override { DeleteArray<const uint16_t>(data_); }
- virtual const uint16_t* data() const { return data_; }
- virtual size_t length() const { return length_; }
- virtual void Dispose() { dispose_count_++; }
+ const uint16_t* data() const override { return data_; }
+ size_t length() const override { return length_; }
+ void Dispose() override { dispose_count_++; }
int dispose_count() { return dispose_count_; }
private:
@@ -3582,11 +3593,10 @@ void CheckSFIsAreWeak(WeakFixedArray* sfis, Isolate* isolate) {
for (int i = 0; i < sfis->length(); ++i) {
MaybeObject* maybe_object = sfis->Get(i);
HeapObject* heap_object;
- CHECK(maybe_object->IsWeakHeapObject() ||
- maybe_object->IsClearedWeakHeapObject() ||
- (maybe_object->ToStrongHeapObject(&heap_object) &&
+ CHECK(maybe_object->IsWeakOrCleared() ||
+ (maybe_object->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsUndefined(isolate)));
- if (maybe_object->IsWeakHeapObject()) {
+ if (maybe_object->IsWeak()) {
++no_of_weak;
}
}
diff --git a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
new file mode 100644
index 0000000000..58617fd5c2
--- /dev/null
+++ b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
@@ -0,0 +1,79 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <set>
+
+#include "src/objects-inl.h"
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void AddSigned(std::set<Smi*>& smis, int64_t x) {
+ if (!Smi::IsValid(x)) return;
+
+ smis.insert(Smi::FromInt(static_cast<int>(x)));
+ smis.insert(Smi::FromInt(static_cast<int>(-x)));
+}
+
+// Uses std::lexicographical_compare twice to convert the result to -1, 0 or 1.
+int ExpectedCompareResult(Smi* a, Smi* b) {
+ std::string str_a = std::to_string(a->value());
+ std::string str_b = std::to_string(b->value());
+ bool expected_a_lt_b = std::lexicographical_compare(
+ str_a.begin(), str_a.end(), str_b.begin(), str_b.end());
+ bool expected_b_lt_a = std::lexicographical_compare(
+ str_b.begin(), str_b.end(), str_a.begin(), str_a.end());
+
+ if (!expected_a_lt_b && !expected_b_lt_a) {
+ return 0;
+ } else if (expected_a_lt_b) {
+ return -1;
+ } else {
+ CHECK(expected_b_lt_a);
+ return 1;
+ }
+}
+
+bool Test(Isolate* isolate, Smi* a, Smi* b) {
+ int actual = Smi::LexicographicCompare(isolate, a, b)->value();
+ int expected = ExpectedCompareResult(a, b);
+
+ return actual == expected;
+}
+
+} // namespace
+
+TEST(TestSmiLexicographicCompare) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+
+ std::set<Smi*> smis;
+
+ for (int64_t xb = 1; xb <= Smi::kMaxValue; xb *= 10) {
+ for (int64_t xf = 0; xf <= 9; ++xf) {
+ for (int64_t xo = -1; xo <= 1; ++xo) {
+ AddSigned(smis, xb * xf + xo);
+ }
+ }
+ }
+
+ for (int64_t yb = 1; yb <= Smi::kMaxValue; yb *= 2) {
+ for (int64_t yo = -2; yo <= 2; ++yo) {
+ AddSigned(smis, yb + yo);
+ }
+ }
+
+ for (Smi* a : smis) {
+ for (Smi* b : smis) {
+ CHECK(Test(isolate, a, b));
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 2c66421831..9326c347ec 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -105,9 +105,9 @@ static const int SUPER_DEEP_DEPTH = 80 * 1024;
class Resource: public v8::String::ExternalStringResource {
public:
Resource(const uc16* data, size_t length): data_(data), length_(length) {}
- ~Resource() { i::DeleteArray(data_); }
- virtual const uint16_t* data() const { return data_; }
- virtual size_t length() const { return length_; }
+ ~Resource() override { i::DeleteArray(data_); }
+ const uint16_t* data() const override { return data_; }
+ size_t length() const override { return length_; }
private:
const uc16* data_;
@@ -119,9 +119,9 @@ class OneByteResource : public v8::String::ExternalOneByteStringResource {
public:
OneByteResource(const char* data, size_t length)
: data_(data), length_(length) {}
- ~OneByteResource() { i::DeleteArray(data_); }
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
+ ~OneByteResource() override { i::DeleteArray(data_); }
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
private:
const char* data_;
@@ -1108,6 +1108,98 @@ TEST(JSONStringifySliceMadeExternal) {
CompileRun("JSON.stringify(slice)"))));
}
+TEST(JSONStringifyWellFormed) {
+ FLAG_harmony_json_stringify = true;
+ CcTest::InitializeVM();
+ v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+
+ // Test some leading surrogates (U+D800 to U+DBFF).
+ { // U+D800
+ CHECK_EQ(
+ 0, strcmp("\"\\ud800\"", *v8::String::Utf8Value(
+ CcTest::isolate(),
+ CompileRun("JSON.stringify('\\uD800')"))));
+ v8::Local<v8::String> json = v8_str("\"\\ud800\"");
+ v8::Local<v8::Value> parsed =
+ v8::JSON::Parse(context, json).ToLocalChecked();
+ CHECK(v8::JSON::Stringify(context, parsed)
+ .ToLocalChecked()
+ ->Equals(context, json)
+ .FromJust());
+ }
+
+ { // U+DAAA
+ CHECK_EQ(
+ 0, strcmp("\"\\udaaa\"", *v8::String::Utf8Value(
+ CcTest::isolate(),
+ CompileRun("JSON.stringify('\\uDAAA')"))));
+ v8::Local<v8::String> json = v8_str("\"\\udaaa\"");
+ v8::Local<v8::Value> parsed =
+ v8::JSON::Parse(context, json).ToLocalChecked();
+ CHECK(v8::JSON::Stringify(context, parsed)
+ .ToLocalChecked()
+ ->Equals(context, json)
+ .FromJust());
+ }
+
+ { // U+DBFF
+ CHECK_EQ(
+ 0, strcmp("\"\\udbff\"", *v8::String::Utf8Value(
+ CcTest::isolate(),
+ CompileRun("JSON.stringify('\\uDBFF')"))));
+ v8::Local<v8::String> json = v8_str("\"\\udbff\"");
+ v8::Local<v8::Value> parsed =
+ v8::JSON::Parse(context, json).ToLocalChecked();
+ CHECK(v8::JSON::Stringify(context, parsed)
+ .ToLocalChecked()
+ ->Equals(context, json)
+ .FromJust());
+ }
+
+ // Test some trailing surrogates (U+DC00 to U+DFFF).
+ { // U+DC00
+ CHECK_EQ(
+ 0, strcmp("\"\\udc00\"", *v8::String::Utf8Value(
+ CcTest::isolate(),
+ CompileRun("JSON.stringify('\\uDC00')"))));
+ v8::Local<v8::String> json = v8_str("\"\\udc00\"");
+ v8::Local<v8::Value> parsed =
+ v8::JSON::Parse(context, json).ToLocalChecked();
+ CHECK(v8::JSON::Stringify(context, parsed)
+ .ToLocalChecked()
+ ->Equals(context, json)
+ .FromJust());
+ }
+
+ { // U+DDDD
+ CHECK_EQ(
+ 0, strcmp("\"\\udddd\"", *v8::String::Utf8Value(
+ CcTest::isolate(),
+ CompileRun("JSON.stringify('\\uDDDD')"))));
+ v8::Local<v8::String> json = v8_str("\"\\udddd\"");
+ v8::Local<v8::Value> parsed =
+ v8::JSON::Parse(context, json).ToLocalChecked();
+ CHECK(v8::JSON::Stringify(context, parsed)
+ .ToLocalChecked()
+ ->Equals(context, json)
+ .FromJust());
+ }
+
+ { // U+DFFF
+ CHECK_EQ(
+ 0, strcmp("\"\\udfff\"", *v8::String::Utf8Value(
+ CcTest::isolate(),
+ CompileRun("JSON.stringify('\\uDFFF')"))));
+ v8::Local<v8::String> json = v8_str("\"\\udfff\"");
+ v8::Local<v8::Value> parsed =
+ v8::JSON::Parse(context, json).ToLocalChecked();
+ CHECK(v8::JSON::Stringify(context, parsed)
+ .ToLocalChecked()
+ ->Equals(context, json)
+ .FromJust());
+ }
+}
TEST(CachedHashOverflow) {
CcTest::InitializeVM();
@@ -1186,9 +1278,9 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
public:
explicit OneByteVectorResource(i::Vector<const char> vector)
: data_(vector) {}
- virtual ~OneByteVectorResource() {}
- virtual size_t length() const { return data_.length(); }
- virtual const char* data() const { return data_.start(); }
+ ~OneByteVectorResource() override = default;
+ size_t length() const override { return data_.length(); }
+ const char* data() const override { return data_.start(); }
private:
i::Vector<const char> data_;
};
@@ -1464,15 +1556,15 @@ TEST(Latin1IgnoreCase) {
class DummyResource: public v8::String::ExternalStringResource {
public:
- virtual const uint16_t* data() const { return nullptr; }
- virtual size_t length() const { return 1 << 30; }
+ const uint16_t* data() const override { return nullptr; }
+ size_t length() const override { return 1 << 30; }
};
class DummyOneByteResource: public v8::String::ExternalOneByteStringResource {
public:
- virtual const char* data() const { return nullptr; }
- virtual size_t length() const { return 1 << 30; }
+ const char* data() const override { return nullptr; }
+ size_t length() const override { return 1 << 30; }
};
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 902295447b..16a18c51d0 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -166,7 +166,7 @@ class TerminatorThread : public v8::base::Thread {
explicit TerminatorThread(i::Isolate* isolate)
: Thread(Options("TerminatorThread")),
isolate_(reinterpret_cast<v8::Isolate*>(isolate)) {}
- void Run() {
+ void Run() override {
semaphore->Wait();
CHECK(!isolate_->IsExecutionTerminating());
isolate_->TerminateExecution();
@@ -800,7 +800,7 @@ class TerminatorSleeperThread : public v8::base::Thread {
: Thread(Options("TerminatorSlepperThread")),
isolate_(isolate),
sleep_ms_(sleep_ms) {}
- void Run() {
+ void Run() override {
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(sleep_ms_));
CHECK(!isolate_->IsExecutionTerminating());
isolate_->TerminateExecution();
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index d5c94eff0d..aaecababd2 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -43,7 +43,7 @@ class ThreadIdValidationThread : public v8::base::Thread {
thread_to_start_(thread_to_start),
semaphore_(semaphore) {}
- void Run() {
+ void Run() override {
i::ThreadId thread_id = i::ThreadId::Current();
for (int i = 0; i < thread_no_; i++) {
CHECK(!(*refs_)[i].Equals(thread_id));
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 10b837aaed..f73641d9cf 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -39,7 +39,7 @@ typedef std::vector<MockTraceObject*> MockTraceObjectList;
class MockTracingController : public v8::TracingController {
public:
MockTracingController() = default;
- ~MockTracingController() {
+ ~MockTracingController() override {
for (size_t i = 0; i < trace_object_list_.size(); ++i) {
delete trace_object_list_[i];
}
@@ -98,7 +98,7 @@ class MockTracingPlatform : public TestPlatform {
// Now that it's completely constructed, make this the current platform.
i::V8::SetPlatformForTesting(this);
}
- virtual ~MockTracingPlatform() {}
+ ~MockTracingPlatform() override = default;
v8::TracingController* GetTracingController() override {
return &tracing_controller_;
@@ -289,7 +289,6 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
LocalContext env;
v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
@@ -308,7 +307,7 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue(context).ToChecked());
+ CHECK(result->BooleanValue(isolate));
}
{
@@ -318,7 +317,7 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(!result->BooleanValue(context).ToChecked());
+ CHECK(!result->BooleanValue(isolate));
}
{
@@ -328,7 +327,7 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue(context).ToChecked());
+ CHECK(result->BooleanValue(isolate));
}
}
@@ -362,7 +361,7 @@ TEST(BuiltinsTrace) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(!result->BooleanValue(context).ToChecked());
+ CHECK(!result->BooleanValue(isolate));
CHECK_EQ(0, GET_TRACE_OBJECTS_LIST->size());
}
@@ -381,7 +380,7 @@ TEST(BuiltinsTrace) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue(context).ToChecked());
+ CHECK(result->BooleanValue(isolate));
CHECK_EQ(1, GET_TRACE_OBJECTS_LIST->size());
CHECK_EQ(123, GET_TRACE_OBJECT(0)->id);
@@ -405,7 +404,7 @@ TEST(BuiltinsTrace) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue(context).ToChecked());
+ CHECK(result->BooleanValue(isolate));
CHECK_EQ(2, GET_TRACE_OBJECTS_LIST->size());
CHECK_EQ(123, GET_TRACE_OBJECT(1)->id);
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index b574fdd94a..a0f9385bf1 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -86,8 +86,6 @@ TEST(AllocateNotExternal) {
void TestSpeciesProtector(char* code,
bool invalidates_species_protector = true) {
- // Make BigInt64Array/BigUint64Array available for testing.
- FLAG_harmony_bigint = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
std::string typed_array_constructors[] = {
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
index 5e37991252..4c6c72a28d 100644
--- a/deps/v8/test/cctest/test-usecounters.cc
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -60,6 +60,27 @@ TEST(AssigmentExpressionLHSIsCall) {
use_counts[v8::Isolate::kAssigmentExpressionLHSIsCallInStrict] = 0;
}
+TEST(AtomicsWakeAndAtomicsNotify) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ i::FLAG_harmony_sharedarraybuffer = true;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+
+ CompileRun("Atomics.wake(new Int32Array(new SharedArrayBuffer(16)), 0);");
+ CHECK_EQ(1, use_counts[v8::Isolate::kAtomicsWake]);
+ CHECK_EQ(0, use_counts[v8::Isolate::kAtomicsNotify]);
+
+ use_counts[v8::Isolate::kAtomicsWake] = 0;
+ use_counts[v8::Isolate::kAtomicsNotify] = 0;
+
+ CompileRun("Atomics.notify(new Int32Array(new SharedArrayBuffer(16)), 0);");
+ CHECK_EQ(0, use_counts[v8::Isolate::kAtomicsWake]);
+ CHECK_EQ(1, use_counts[v8::Isolate::kAtomicsNotify]);
+}
+
} // namespace test_usecounters
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 08d7bea874..b6fe4e7597 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -88,7 +88,7 @@ TEST(Weakness) {
CHECK_EQ(2, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
// Force a full GC.
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_EQ(0, NumberOfWeakCalls);
CHECK_EQ(2, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(
@@ -101,7 +101,7 @@ TEST(Weakness) {
&WeakPointerCallback, v8::WeakCallbackType::kParameter);
CHECK(global_handles->IsWeak(key.location()));
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_EQ(1, NumberOfWeakCalls);
CHECK_EQ(0, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(
@@ -138,7 +138,7 @@ TEST(Shrinking) {
CHECK_EQ(32, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(
0, EphemeronHashTable::cast(weakmap->table())->NumberOfDeletedElements());
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_EQ(0, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(
32,
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 8a3c1323a3..763a809f87 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -99,7 +99,7 @@ TEST(WeakSet_Weakness) {
CHECK_EQ(1, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
// Force a full GC.
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_EQ(0, NumberOfWeakCalls);
CHECK_EQ(1, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
@@ -112,7 +112,7 @@ TEST(WeakSet_Weakness) {
&WeakPointerCallback, v8::WeakCallbackType::kParameter);
CHECK(global_handles->IsWeak(key.location()));
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_EQ(1, NumberOfWeakCalls);
CHECK_EQ(0, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
@@ -149,7 +149,7 @@ TEST(WeakSet_Shrinking) {
CHECK_EQ(32, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
0, EphemeronHashTable::cast(weakset->table())->NumberOfDeletedElements());
- CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::PreciseCollectAllGarbage();
CHECK_EQ(0, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
32,
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index d79d6e4eb4..562f44098a 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -66,6 +66,18 @@ class TestCase(testcase.TestCase):
def _get_files_params(self):
return [self.path]
+ def _get_resources(self):
+ # Bytecode-generator tests are the only ones requiring extra files on
+ # Android.
+ parts = self.name.split('/')
+ if parts[0] == 'test-bytecode-generator':
+ expectation_file = os.path.join(
+ self.suite.root, 'interpreter', 'bytecode_expectations',
+ '%s.golden' % parts[1])
+ if os.path.exists(expectation_file):
+ return [expectation_file]
+ return []
+
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 439fe043b8..c339aa4134 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -259,6 +259,33 @@ TEST(TestGenericOverload) {
ft.Call();
}
+TEST(TestLogicalOperators) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ {
+ m.TestLogicalOperators();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestOtherwiseAndLabels) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ {
+ m.TestOtherwiseWithCode1();
+ m.TestOtherwiseWithCode2();
+ m.TestOtherwiseWithCode3();
+ m.TestForwardLabel();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
index bfebd34c06..385f0c23c2 100644
--- a/deps/v8/test/cctest/trace-extension.h
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -38,8 +38,8 @@ namespace internal {
class TraceExtension : public v8::Extension {
public:
TraceExtension() : v8::Extension("v8/trace", kSource) { }
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
static void Trace(const v8::FunctionCallbackInfo<v8::Value>& args);
static void JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args);
static void JSEntrySP(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/test/cctest/unicode-helpers.cc b/deps/v8/test/cctest/unicode-helpers.cc
new file mode 100644
index 0000000000..524e5936fc
--- /dev/null
+++ b/deps/v8/test/cctest/unicode-helpers.cc
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/unicode-helpers.h"
+
+int Ucs2CharLength(unibrow::uchar c) {
+ if (c == unibrow::Utf8::kIncomplete || c == unibrow::Utf8::kBufferEmpty) {
+ return 0;
+ } else if (c < 0xFFFF) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+int Utf8LengthHelper(const char* s) {
+ unibrow::Utf8::Utf8IncrementalBuffer buffer(unibrow::Utf8::kBufferEmpty);
+ unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
+
+ int length = 0;
+ size_t i = 0;
+ while (s[i] != '\0') {
+ unibrow::uchar tmp =
+ unibrow::Utf8::ValueOfIncremental(s[i], &i, &state, &buffer);
+ length += Ucs2CharLength(tmp);
+ }
+ unibrow::uchar tmp = unibrow::Utf8::ValueOfIncrementalFinish(&state);
+ length += Ucs2CharLength(tmp);
+ return length;
+}
diff --git a/deps/v8/test/cctest/unicode-helpers.h b/deps/v8/test/cctest/unicode-helpers.h
index ca75fb65d7..06c3fcd8ea 100644
--- a/deps/v8/test/cctest/unicode-helpers.h
+++ b/deps/v8/test/cctest/unicode-helpers.h
@@ -7,30 +7,7 @@
#include "src/unicode.h"
-static int Ucs2CharLength(unibrow::uchar c) {
- if (c == unibrow::Utf8::kIncomplete || c == unibrow::Utf8::kBufferEmpty) {
- return 0;
- } else if (c < 0xFFFF) {
- return 1;
- } else {
- return 2;
- }
-}
-
-static int Utf8LengthHelper(const char* s) {
- unibrow::Utf8::Utf8IncrementalBuffer buffer(unibrow::Utf8::kBufferEmpty);
- unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
-
- int length = 0;
- size_t i = 0;
- while (s[i] != '\0') {
- unibrow::uchar tmp =
- unibrow::Utf8::ValueOfIncremental(s[i], &i, &state, &buffer);
- length += Ucs2CharLength(tmp);
- }
- unibrow::uchar tmp = unibrow::Utf8::ValueOfIncrementalFinish(&state);
- length += Ucs2CharLength(tmp);
- return length;
-}
+int Ucs2CharLength(unibrow::uchar c);
+int Utf8LengthHelper(const char* s);
#endif // V8_CCTEST_UNICODE_HELPERS_H_
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index e56060bdd9..ca6662c90c 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -62,10 +62,11 @@ class CWasmEntryArgTester {
Handle<Object> buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate_);
CHECK(!buffer_obj->IsHeapObject());
- Handle<Object> call_args[]{
- Handle<Object>::cast(isolate_->factory()->NewForeign(
- wasm_code_->instruction_start(), TENURED)),
- runner_.builder().instance_object(), buffer_obj};
+ Handle<Object> code_entry_obj(
+ reinterpret_cast<Object*>(wasm_code_->instruction_start()), isolate_);
+ CHECK(!code_entry_obj->IsHeapObject());
+ Handle<Object> call_args[]{code_entry_obj,
+ runner_.builder().instance_object(), buffer_obj};
static_assert(
arraysize(call_args) == compiler::CWasmEntryParameters::kNumParameters,
"adapt this test");
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index 96877fd571..8eddaa0224 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -32,24 +32,12 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
}
}
-WASM_EXEC_TEST(I32AtomicAdd) {
- RunU32BinOp(execution_tier, kExprI32AtomicAdd, Add);
-}
-WASM_EXEC_TEST(I32AtomicSub) {
- RunU32BinOp(execution_tier, kExprI32AtomicSub, Sub);
-}
-WASM_EXEC_TEST(I32AtomicAnd) {
- RunU32BinOp(execution_tier, kExprI32AtomicAnd, And);
-}
-WASM_EXEC_TEST(I32AtomicOr) {
- RunU32BinOp(execution_tier, kExprI32AtomicOr, Or);
-}
-WASM_EXEC_TEST(I32AtomicXor) {
- RunU32BinOp(execution_tier, kExprI32AtomicXor, Xor);
-}
-WASM_EXEC_TEST(I32AtomicExchange) {
- RunU32BinOp(execution_tier, kExprI32AtomicExchange, Exchange);
-}
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I32Atomic##Name) { \
+ RunU32BinOp(execution_tier, kExprI32Atomic##Name, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
Uint16BinOp expected_op) {
@@ -73,24 +61,12 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
}
}
-WASM_EXEC_TEST(I32AtomicAdd16U) {
- RunU16BinOp(execution_tier, kExprI32AtomicAdd16U, Add);
-}
-WASM_EXEC_TEST(I32AtomicSub16U) {
- RunU16BinOp(execution_tier, kExprI32AtomicSub16U, Sub);
-}
-WASM_EXEC_TEST(I32AtomicAnd16U) {
- RunU16BinOp(execution_tier, kExprI32AtomicAnd16U, And);
-}
-WASM_EXEC_TEST(I32AtomicOr16U) {
- RunU16BinOp(execution_tier, kExprI32AtomicOr16U, Or);
-}
-WASM_EXEC_TEST(I32AtomicXor16U) {
- RunU16BinOp(execution_tier, kExprI32AtomicXor16U, Xor);
-}
-WASM_EXEC_TEST(I32AtomicExchange16U) {
- RunU16BinOp(execution_tier, kExprI32AtomicExchange16U, Exchange);
-}
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I32Atomic##Name##16U) { \
+ RunU16BinOp(execution_tier, kExprI32Atomic##Name##16U, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
@@ -113,24 +89,12 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
}
}
-WASM_EXEC_TEST(I32AtomicAdd8U) {
- RunU8BinOp(execution_tier, kExprI32AtomicAdd8U, Add);
-}
-WASM_EXEC_TEST(I32AtomicSub8U) {
- RunU8BinOp(execution_tier, kExprI32AtomicSub8U, Sub);
-}
-WASM_EXEC_TEST(I32AtomicAnd8U) {
- RunU8BinOp(execution_tier, kExprI32AtomicAnd8U, And);
-}
-WASM_EXEC_TEST(I32AtomicOr8U) {
- RunU8BinOp(execution_tier, kExprI32AtomicOr8U, Or);
-}
-WASM_EXEC_TEST(I32AtomicXor8U) {
- RunU8BinOp(execution_tier, kExprI32AtomicXor8U, Xor);
-}
-WASM_EXEC_TEST(I32AtomicExchange8U) {
- RunU8BinOp(execution_tier, kExprI32AtomicExchange8U, Exchange);
-}
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I32Atomic##Name##8U) { \
+ RunU8BinOp(execution_tier, kExprI32Atomic##Name##8U, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
WASM_EXEC_TEST(I32AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 21b943595a..570c48d240 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -32,24 +32,12 @@ void RunU64BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
}
}
-WASM_EXEC_TEST(I64AtomicAdd) {
- RunU64BinOp(execution_tier, kExprI64AtomicAdd, Add);
-}
-WASM_EXEC_TEST(I64AtomicSub) {
- RunU64BinOp(execution_tier, kExprI64AtomicSub, Sub);
-}
-WASM_EXEC_TEST(I64AtomicAnd) {
- RunU64BinOp(execution_tier, kExprI64AtomicAnd, And);
-}
-WASM_EXEC_TEST(I64AtomicOr) {
- RunU64BinOp(execution_tier, kExprI64AtomicOr, Or);
-}
-WASM_EXEC_TEST(I64AtomicXor) {
- RunU64BinOp(execution_tier, kExprI64AtomicXor, Xor);
-}
-WASM_EXEC_TEST(I64AtomicExchange) {
- RunU64BinOp(execution_tier, kExprI64AtomicExchange, Exchange);
-}
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I64Atomic##Name) { \
+ RunU64BinOp(execution_tier, kExprI64Atomic##Name, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
@@ -73,24 +61,12 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
}
}
-WASM_EXEC_TEST(I64AtomicAdd32U) {
- RunU32BinOp(execution_tier, kExprI64AtomicAdd32U, Add);
-}
-WASM_EXEC_TEST(I64AtomicSub32U) {
- RunU32BinOp(execution_tier, kExprI64AtomicSub32U, Sub);
-}
-WASM_EXEC_TEST(I64AtomicAnd32U) {
- RunU32BinOp(execution_tier, kExprI64AtomicAnd32U, And);
-}
-WASM_EXEC_TEST(I64AtomicOr32U) {
- RunU32BinOp(execution_tier, kExprI64AtomicOr32U, Or);
-}
-WASM_EXEC_TEST(I64AtomicXor32U) {
- RunU32BinOp(execution_tier, kExprI64AtomicXor32U, Xor);
-}
-WASM_EXEC_TEST(I64AtomicExchange32U) {
- RunU32BinOp(execution_tier, kExprI64AtomicExchange32U, Exchange);
-}
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I64Atomic##Name##32U) { \
+ RunU32BinOp(execution_tier, kExprI64Atomic##Name##32U, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
Uint16BinOp expected_op) {
@@ -114,24 +90,12 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
}
}
-WASM_EXEC_TEST(I64AtomicAdd16U) {
- RunU16BinOp(execution_tier, kExprI64AtomicAdd16U, Add);
-}
-WASM_EXEC_TEST(I64AtomicSub16U) {
- RunU16BinOp(execution_tier, kExprI64AtomicSub16U, Sub);
-}
-WASM_EXEC_TEST(I64AtomicAnd16U) {
- RunU16BinOp(execution_tier, kExprI64AtomicAnd16U, And);
-}
-WASM_EXEC_TEST(I64AtomicOr16U) {
- RunU16BinOp(execution_tier, kExprI64AtomicOr16U, Or);
-}
-WASM_EXEC_TEST(I64AtomicXor16U) {
- RunU16BinOp(execution_tier, kExprI64AtomicXor16U, Xor);
-}
-WASM_EXEC_TEST(I64AtomicExchange16U) {
- RunU16BinOp(execution_tier, kExprI64AtomicExchange16U, Exchange);
-}
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I64Atomic##Name##16U) { \
+ RunU16BinOp(execution_tier, kExprI64Atomic##Name##16U, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
@@ -154,24 +118,12 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
}
}
-WASM_EXEC_TEST(I64AtomicAdd8U) {
- RunU8BinOp(execution_tier, kExprI64AtomicAdd8U, Add);
-}
-WASM_EXEC_TEST(I64AtomicSub8U) {
- RunU8BinOp(execution_tier, kExprI64AtomicSub8U, Sub);
-}
-WASM_EXEC_TEST(I64AtomicAnd8U) {
- RunU8BinOp(execution_tier, kExprI64AtomicAnd8U, And);
-}
-WASM_EXEC_TEST(I64AtomicOr8U) {
- RunU8BinOp(execution_tier, kExprI64AtomicOr8U, Or);
-}
-WASM_EXEC_TEST(I64AtomicXor8U) {
- RunU8BinOp(execution_tier, kExprI64AtomicXor8U, Xor);
-}
-WASM_EXEC_TEST(I64AtomicExchange8U) {
- RunU8BinOp(execution_tier, kExprI64AtomicExchange8U, Exchange);
-}
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I64Atomic##Name##8U) { \
+ RunU8BinOp(execution_tier, kExprI64Atomic##Name##8U, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
@@ -402,6 +354,172 @@ WASM_EXEC_TEST(I64AtomicStoreLoad8U) {
}
}
+// Drop tests verify atomic operations are run correctly when the
+// entire 64-bit output is optimized out
+void RunDropTest(ExecutionTier execution_tier, WasmOpcode wasm_op,
+ Uint64BinOp op) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r,
+ WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord64),
+ WASM_DROP, WASM_GET_LOCAL(0));
+
+ uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(local, r.Call(local));
+ uint64_t expected = op(initial, local);
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+}
+
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I64Atomic##Name##Drop) { \
+ RunDropTest(execution_tier, kExprI64Atomic##Name, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
+
+WASM_EXEC_TEST(I64AtomicSub16UDrop) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
+ uint16_t* memory =
+ r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r,
+ WASM_ATOMICS_BINOP(kExprI64AtomicSub16U, WASM_I32V_1(0),
+ WASM_GET_LOCAL(0), MachineRepresentation::kWord16),
+ WASM_DROP, WASM_GET_LOCAL(0));
+
+ uint16_t initial = 0x7, local = 0xffe0;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(local, r.Call(local));
+ uint16_t expected = Sub(initial, local);
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+}
+
+WASM_EXEC_TEST(I64AtomicCompareExchangeDrop) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
+ r.builder().SetHasSharedMemory();
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ BUILD(r,
+ WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange, WASM_I32V_1(0),
+ WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ MachineRepresentation::kWord64),
+ WASM_DROP, WASM_GET_LOCAL(1));
+
+ uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(local, r.Call(initial, local));
+ uint64_t expected = CompareExchange(initial, initial, local);
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+}
+
+WASM_EXEC_TEST(I64AtomicStoreLoadDrop) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
+ r.builder().SetHasSharedMemory();
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+
+ BUILD(r,
+ WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord64),
+ WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord64),
+ WASM_DROP, WASM_GET_LOCAL(1));
+
+ uint64_t store_value = 0x1111111111111111, expected = 0xC0DE;
+ CHECK_EQ(expected, r.Call(store_value, expected));
+ CHECK_EQ(store_value, r.builder().ReadMemory(&memory[0]));
+}
+
+WASM_EXEC_TEST(I64AtomicAddConvertDrop) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r,
+ WASM_ATOMICS_BINOP(kExprI64AtomicAdd, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord64),
+ kExprI32ConvertI64, WASM_DROP, WASM_GET_LOCAL(0));
+
+ uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(local, r.Call(local));
+ uint64_t expected = Add(initial, local);
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+}
+
+WASM_EXEC_TEST(I64AtomicLoadConvertDrop) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint64_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
+ kExprI64AtomicLoad, WASM_ZERO, MachineRepresentation::kWord64)));
+
+ uint64_t initial = 0x1111222233334444;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(static_cast<uint32_t>(initial), r.Call(initial));
+}
+
+// Convert tests verify atomic operations are run correctly when the
+// upper half of the 64-bit output is optimized out
+void RunConvertTest(ExecutionTier execution_tier, WasmOpcode wasm_op,
+ Uint64BinOp op) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint64_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_BINOP(
+ kExprI64AtomicAdd, WASM_ZERO, WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord64)));
+
+ uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(static_cast<uint32_t>(initial), r.Call(local));
+ uint64_t expected = Add(initial, local);
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+}
+
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I64AtomicConvert##Name) { \
+ RunConvertTest(execution_tier, kExprI64Atomic##Name, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
+
+WASM_EXEC_TEST(I64AtomicConvertCompareExchange) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint64_t, uint64_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), MachineRepresentation::kWord64)));
+
+ uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(static_cast<uint32_t>(initial), r.Call(initial, local));
+ uint64_t expected = CompareExchange(initial, initial, local);
+ CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
+}
+
} // namespace test_run_wasm_atomics_64
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index d25aeafa33..21d92cbada 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -198,8 +198,8 @@ TEST(Run_WasmModule_Global) {
TestSignatures sigs;
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint32_t global1 = builder->AddGlobal(kWasmI32, 0);
- uint32_t global2 = builder->AddGlobal(kWasmI32, 0);
+ uint32_t global1 = builder->AddGlobal(kWasmI32, false);
+ uint32_t global2 = builder->AddGlobal(kWasmI32, false);
WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_v());
byte code1[] = {
WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
@@ -284,7 +284,7 @@ class InterruptThread : public v8::base::Thread {
WriteLittleEndianValue<int32_t>(ptr, interrupt_value_);
}
- virtual void Run() {
+ void Run() override {
// Wait for the main thread to write the signal value.
int32_t val = 0;
do {
@@ -771,7 +771,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4);
CHECK_EQ(16, result);
CHECK(buffer1.buffer_->was_neutered()); // growing always neuters
- CHECK_EQ(0, buffer1.buffer_->byte_length()->Number());
+ CHECK_EQ(0, buffer1.buffer_->byte_length());
CHECK_NE(*buffer1.buffer_, memory_object->array_buffer());
@@ -782,7 +782,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
CHECK_EQ(26, result);
CHECK(buffer2.buffer_->was_neutered()); // growing always neuters
- CHECK_EQ(0, buffer2.buffer_->byte_length()->Number());
+ CHECK_EQ(0, buffer2.buffer_->byte_length());
CHECK_NE(*buffer2.buffer_, memory_object->array_buffer());
}
Cleanup();
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index f60c65b727..b0f3dcf8ce 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -1080,8 +1080,6 @@ WASM_SIMD_TEST(I32x4ShrU) {
LogicalShiftRight);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Tests both signed and unsigned conversion from I8x16 (unpacking).
WASM_SIMD_TEST(I16x8ConvertI8x16) {
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t> r(execution_tier,
@@ -1124,8 +1122,6 @@ WASM_SIMD_TEST(I16x8ConvertI8x16) {
CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned, 0));
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
void RunI16x8UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int16UnOp expected_op) {
@@ -1144,8 +1140,6 @@ WASM_SIMD_TEST(I16x8Neg) {
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg, Negate);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Tests both signed and unsigned conversion from I32x4 (packing).
WASM_SIMD_TEST(I16x8ConvertI32x4) {
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t> r(
@@ -1190,8 +1184,6 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
}
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
void RunI16x8BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int16BinOp expected_op) {
@@ -1374,8 +1366,6 @@ WASM_SIMD_TEST(I8x16Neg) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg, Negate);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_TEST(I8x16ConvertI16x8) {
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t> r(
@@ -1422,8 +1412,6 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
}
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
void RunI8x16BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int8BinOp expected_op) {
@@ -2012,6 +2000,8 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
}
}
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
// result. Use relational ops on numeric vectors to create the boolean vector
@@ -2099,8 +2089,6 @@ WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
WASM_I32V(1), WASM_I32V(0)));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index ec93639e17..26e98a1ba4 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -72,7 +72,7 @@ class BreakHandler : public debug::DebugDelegate {
: isolate_(isolate), expected_breaks_(expected_breaks) {
v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_), this);
}
- ~BreakHandler() {
+ ~BreakHandler() override {
// Check that all expected breakpoints have been hit.
CHECK_EQ(count_, expected_breaks_.size());
v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_),
@@ -181,7 +181,7 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
: isolate_(isolate), expected_values_(expected_values) {
v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_), this);
}
- ~CollectValuesBreakHandler() {
+ ~CollectValuesBreakHandler() override {
v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_),
nullptr);
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 5e70edf830..5d383bb9c5 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -56,7 +56,7 @@ class SharedEngineIsolate {
public:
explicit SharedEngineIsolate(SharedEngine* engine)
: isolate_(v8::Isolate::Allocate()) {
- isolate()->set_wasm_engine(engine->ExportEngineForSharing());
+ isolate()->SetWasmEngine(engine->ExportEngineForSharing());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate::Initialize(isolate_, create_params);
@@ -116,7 +116,7 @@ class SharedEngineThread : public v8::base::Thread {
engine_(engine),
callback_(callback) {}
- virtual void Run() {
+ void Run() override {
SharedEngineIsolate isolate(engine_);
callback_(isolate);
}
@@ -145,10 +145,10 @@ class MockInstantiationResolver : public InstantiationResultResolver {
public:
explicit MockInstantiationResolver(Handle<Object>* out_instance)
: out_instance_(out_instance) {}
- virtual void OnInstantiationSucceeded(Handle<WasmInstanceObject> result) {
+ void OnInstantiationSucceeded(Handle<WasmInstanceObject> result) override {
*out_instance_->location() = *result;
}
- virtual void OnInstantiationFailed(Handle<Object> error_reason) {
+ void OnInstantiationFailed(Handle<Object> error_reason) override {
UNREACHABLE();
}
@@ -161,13 +161,13 @@ class MockCompilationResolver : public CompilationResultResolver {
MockCompilationResolver(SharedEngineIsolate& isolate,
Handle<Object>* out_instance)
: isolate_(isolate), out_instance_(out_instance) {}
- virtual void OnCompilationSucceeded(Handle<WasmModuleObject> result) {
+ void OnCompilationSucceeded(Handle<WasmModuleObject> result) override {
isolate_.isolate()->wasm_engine()->AsyncInstantiate(
isolate_.isolate(),
base::make_unique<MockInstantiationResolver>(out_instance_), result,
{});
}
- virtual void OnCompilationFailed(Handle<Object> error_reason) {
+ void OnCompilationFailed(Handle<Object> error_reason) override {
UNREACHABLE();
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 2bed7e64db..303fb75878 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -30,7 +30,7 @@ namespace {
"Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, \
exp_, found_ ? found_ : "<null>"); \
} \
- } while (0)
+ } while (false)
void PrintStackTrace(v8::Isolate* isolate, v8::Local<v8::StackTrace> stack) {
printf("Stack Trace (length %d):\n", stack->GetFrameCount());
@@ -157,7 +157,8 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
int unreachable_pos = 1 << (8 * pos_shift);
TestSignatures sigs;
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<int> r(execution_tier, 0, "main", kRuntimeExceptionSupport);
+ WasmRunner<int> r(execution_tier, nullptr, "main",
+ kRuntimeExceptionSupport);
std::vector<byte> code(unreachable_pos + 1, kExprNop);
code[unreachable_pos] = kExprUnreachable;
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index ad9b6d3b56..7b34ed824b 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -31,7 +31,7 @@ namespace {
"Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, \
exp_, found_ ? found_ : "<null>"); \
} \
- } while (0)
+ } while (false)
struct ExceptionInfo {
const char* func_name;
@@ -69,7 +69,7 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
// Trigger a trap for executing unreachable.
WASM_EXEC_TEST(Unreachable) {
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<void> r(execution_tier, 0, "main", kRuntimeExceptionSupport);
+ WasmRunner<void> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
TestSignatures sigs;
BUILD(r, WASM_UNREACHABLE);
@@ -103,7 +103,7 @@ WASM_EXEC_TEST(Unreachable) {
// Trigger a trap for loading from out-of-bounds.
WASM_EXEC_TEST(IllegalLoad) {
- WasmRunner<void> r(execution_tier, 0, "main", kRuntimeExceptionSupport);
+ WasmRunner<void> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
TestSignatures sigs;
r.builder().AddMemory(0L);
diff --git a/deps/v8/test/cctest/wasm/wasm-atomics-utils.h b/deps/v8/test/cctest/wasm/wasm-atomics-utils.h
index 1a0dd345b7..615deab208 100644
--- a/deps/v8/test/cctest/wasm/wasm-atomics-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-atomics-utils.h
@@ -13,6 +13,14 @@ namespace v8 {
namespace internal {
namespace wasm {
+#define OPERATION_LIST(V) \
+ V(Add) \
+ V(Sub) \
+ V(And) \
+ V(Or) \
+ V(Xor) \
+ V(Exchange)
+
typedef uint64_t (*Uint64BinOp)(uint64_t, uint64_t);
typedef uint32_t (*Uint32BinOp)(uint32_t, uint32_t);
typedef uint16_t (*Uint16BinOp)(uint16_t, uint16_t);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 5f623a46cc..4ce07089e2 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -41,13 +41,15 @@ TestingModuleBuilder::TestingModuleBuilder(
if (maybe_import) {
// Manually compile a wasm to JS wrapper and insert it into the instance.
CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
- MaybeHandle<Code> code = compiler::CompileWasmToJSWrapper(
- isolate_, maybe_import->js_function, maybe_import->sig,
- maybe_import_index, test_module_->origin,
+ auto kind = compiler::GetWasmImportCallKind(maybe_import->js_function,
+ maybe_import->sig);
+ MaybeHandle<Code> code = compiler::CompileWasmImportCallWrapper(
+ isolate_, kind, maybe_import->sig, maybe_import_index,
+ test_module_->origin,
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
: kNoTrapHandler);
- auto wasm_to_js_wrapper = native_module_->AddCodeCopy(
- code.ToHandleChecked(), WasmCode::kWasmToJsWrapper, maybe_import_index);
+ auto wasm_to_js_wrapper = native_module_->AddImportWrapper(
+ code.ToHandleChecked(), maybe_import_index);
ImportedFunctionEntry(instance_object_, maybe_import_index)
.set_wasm_to_js(*maybe_import->js_function, wasm_to_js_wrapper);
@@ -123,9 +125,8 @@ Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
// Wrap the code so it can be called as a JS function.
Link();
FunctionSig* sig = test_module_->functions[index].sig;
- MaybeHandle<Code> maybe_ret_code = compiler::CompileJSToWasmWrapper(
- isolate_, native_module_, sig, false,
- trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler);
+ MaybeHandle<Code> maybe_ret_code =
+ compiler::CompileJSToWasmWrapper(isolate_, sig, false);
Handle<Code> ret_code = maybe_ret_code.ToHandleChecked();
Handle<JSFunction> ret = WasmExportedFunction::New(
isolate_, instance_object(), MaybeHandle<String>(),
@@ -363,7 +364,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
Code::C_WASM_ENTRY);
code_ = compiler::Pipeline::GenerateCodeForTesting(
&info, isolate, call_descriptor, graph(),
- AssemblerOptions::Default(isolate), nullptr);
+ AssemblerOptions::Default(isolate));
code = code_.ToHandleChecked();
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
@@ -419,18 +420,13 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
ScopedVector<uint8_t> func_wire_bytes(function_->code.length());
memcpy(func_wire_bytes.start(), wire_bytes.start() + function_->code.offset(),
func_wire_bytes.length());
- WireBytesRef func_name_ref =
- module_env.module->LookupFunctionName(wire_bytes, function_->func_index);
- ScopedVector<char> func_name(func_name_ref.length());
- memcpy(func_name.start(), wire_bytes.start() + func_name_ref.offset(),
- func_name_ref.length());
FunctionBody func_body{function_->sig, function_->code.offset(),
func_wire_bytes.start(), func_wire_bytes.end()};
NativeModule* native_module =
builder_->instance_object()->module_object()->native_module();
WasmCompilationUnit unit(isolate()->wasm_engine(), &module_env, native_module,
- func_body, func_name, function_->func_index,
+ func_body, function_->func_index,
isolate()->counters(), tier);
WasmFeatures unused_detected_features;
unit.ExecuteCompilation(&unused_detected_features);
@@ -458,7 +454,7 @@ WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
function_ = builder_->GetFunctionAt(index);
}
-WasmFunctionCompiler::~WasmFunctionCompiler() {}
+WasmFunctionCompiler::~WasmFunctionCompiler() = default;
FunctionSig* WasmRunnerBase::CreateSig(MachineType return_type,
Vector<MachineType> param_types) {
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 899dc06268..aba43f3a08 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -461,10 +461,12 @@ class WasmRunner : public WasmRunnerBase {
wrapper_code, wrapper_.signature());
int32_t result;
{
- trap_handler::ThreadInWasmScope scope;
+ trap_handler::SetThreadInWasm();
result = runner.Call(static_cast<void*>(&p)...,
static_cast<void*>(&return_value));
+
+ trap_handler::ClearThreadInWasm();
}
CHECK_EQ(WASM_WRAPPER_RETURN_VALUE, result);
return WasmRunnerBase::trap_happened
diff --git a/deps/v8/test/common/assembler-tester.h b/deps/v8/test/common/assembler-tester.h
index 0291e48efb..eca34c5521 100644
--- a/deps/v8/test/common/assembler-tester.h
+++ b/deps/v8/test/common/assembler-tester.h
@@ -17,7 +17,8 @@ static inline uint8_t* AllocateAssemblerBuffer(
size_t page_size = v8::internal::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
void* result = v8::internal::AllocatePages(
- address, alloc_size, page_size, v8::PageAllocator::kReadWriteExecute);
+ GetPlatformPageAllocator(), address, alloc_size, page_size,
+ v8::PageAllocator::kReadWriteExecute);
CHECK(result);
*allocated = alloc_size;
return static_cast<uint8_t*>(result);
@@ -25,18 +26,24 @@ static inline uint8_t* AllocateAssemblerBuffer(
static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
size_t allocated) {
- bool result = v8::internal::SetPermissions(buffer, allocated,
- v8::PageAllocator::kReadExecute);
- CHECK(result);
-
// Flush the instruction cache as part of making the buffer executable.
+ // Note: we do this before setting permissions to ReadExecute because on
+ // some older Arm64 kernels there is a bug which causes an access error on
+ // cache flush instructions to trigger access error on non-writable memory.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
Assembler::FlushICache(buffer, allocated);
+
+ bool result =
+ v8::internal::SetPermissions(GetPlatformPageAllocator(), buffer,
+ allocated, v8::PageAllocator::kReadExecute);
+ CHECK(result);
}
static inline void MakeAssemblerBufferWritable(uint8_t* buffer,
size_t allocated) {
- bool result = v8::internal::SetPermissions(buffer, allocated,
- v8::PageAllocator::kReadWrite);
+ bool result =
+ v8::internal::SetPermissions(GetPlatformPageAllocator(), buffer,
+ allocated, v8::PageAllocator::kReadWrite);
CHECK(result);
}
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 1015701e3b..f722062662 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -20,6 +20,7 @@
#define IMPORT_SIG_INDEX(v) U32V_1(v)
#define FUNC_INDEX(v) U32V_1(v)
#define TABLE_INDEX(v) U32V_1(v)
+#define EXCEPTION_INDEX(v) U32V_1(v)
#define NO_NAME U32V_1(0)
#define NAME_LENGTH(v) U32V_1(v)
#define ENTRY_COUNT(v) U32V_1(v)
diff --git a/deps/v8/test/debugger/debug/debug-bigint.js b/deps/v8/test/debugger/debug/debug-bigint.js
index 2abdc928d9..0ed09b04e8 100644
--- a/deps/v8/test/debugger/debug/debug-bigint.js
+++ b/deps/v8/test/debugger/debug/debug-bigint.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint
-
Debug = debug.Debug
let exceptionThrown = false;
diff --git a/deps/v8/test/debugger/debug/debug-break-class-fields.js b/deps/v8/test/debugger/debug/debug-break-class-fields.js
new file mode 100644
index 0000000000..b6b9c93235
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-break-class-fields.js
@@ -0,0 +1,139 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields --harmony-static-fields --allow-natives-syntax
+
+Debug = debug.Debug
+
+Debug.setListener(function() {});
+
+class Y {
+ x = 1;
+ y = 2;
+ z = 3;
+}
+
+var initializer = %GetInitializerFunction(Y);
+var b1, b2, b3;
+
+// class Y {
+// x = [B0]1;
+// y = [B1]2;
+// z = [B2]3;
+// }
+b1 = Debug.setBreakPoint(initializer, 0, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("x = [B0]1;") === 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("x = [B0]1;") === -1);
+
+b2 = Debug.setBreakPoint(initializer, 1, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B0]2;") > 0);
+Debug.clearBreakPoint(b2);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B0]2;") === -1);
+
+b3 = Debug.setBreakPoint(initializer, 2, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B0]3") > 0);
+Debug.clearBreakPoint(b3);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B0]3") === -1);
+
+b1 = Debug.setBreakPoint(initializer, 0, 0);
+b2 = Debug.setBreakPoint(initializer, 1, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("x = [B0]1;") === 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B1]2;") > 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("x = [B0]1;") === -1);
+Debug.clearBreakPoint(b2);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B1]2;") === -1);
+
+b1 = Debug.setBreakPoint(initializer, 0, 0);
+b3 = Debug.setBreakPoint(initializer, 2, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("x = [B0]1;") === 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B1]3") > 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("x = [B0]1;") === -1);
+Debug.clearBreakPoint(b3);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B1]3") === -1);
+
+b2 = Debug.setBreakPoint(initializer, 1, 0);
+b3 = Debug.setBreakPoint(initializer, 2, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B0]2;") > 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B1]3") > 0);
+Debug.clearBreakPoint(b2);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B0]2;") === -1);
+Debug.clearBreakPoint(b3);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B1]3") === -1);
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ [foo()] = 1;
+ [bar] = 2;
+ baz = foo();
+}
+
+// The computed properties are evaluated during class construction,
+// not as part of the initializer function. As a consequence of which,
+// they aren't breakable here in the initializer function, but
+// instead, are part of the enclosing function.
+//
+// class X {
+// [foo()] = [B0]1;
+// [bar] = [B1]2;
+// [baz] = [B2]foo();
+// }
+
+initializer = %GetInitializerFunction(X);
+b1 = Debug.setBreakPoint(initializer, 0, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
+
+b2 = Debug.setBreakPoint(initializer, 1, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") > 0);
+Debug.clearBreakPoint(b2);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") === -1);
+
+b3 = Debug.setBreakPoint(initializer, 2, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B0]foo()") > 0);
+Debug.clearBreakPoint(b3);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B0]foo()") === -1);
+
+b1 = Debug.setBreakPoint(initializer, 0, 0);
+b2 = Debug.setBreakPoint(initializer, 1, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B1]2;") > 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
+Debug.clearBreakPoint(b2);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B1]2;") === -1);
+
+b1 = Debug.setBreakPoint(initializer, 0, 0);
+b3 = Debug.setBreakPoint(initializer, 2, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") > 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
+Debug.clearBreakPoint(b3);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") === -1);
+
+b2 = Debug.setBreakPoint(initializer, 1, 0);
+b3 = Debug.setBreakPoint(initializer, 2, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") > 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") > 0);
+Debug.clearBreakPoint(b2);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") === -1);
+Debug.clearBreakPoint(b3);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") === -1);
+
+function t() {
+ class X {
+ [foo()] = 1;
+ }
+}
+
+b1 = Debug.setBreakPoint(t, 0, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf("[[B0]foo()] = 1;")> 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(initializer).indexOf("[[B0]foo()] = 1;") === -1);
diff --git a/deps/v8/test/debugger/debug/debug-live-edit-recursion.js b/deps/v8/test/debugger/debug/debug-liveedit-recursion.js
index 6328a9b6de..6328a9b6de 100644
--- a/deps/v8/test/debugger/debug/debug-live-edit-recursion.js
+++ b/deps/v8/test/debugger/debug/debug-liveedit-recursion.js
diff --git a/deps/v8/test/debugger/debug/es6/generators-debug-scopes.js b/deps/v8/test/debugger/debug/es6/generators-debug-scopes.js
index a46dc8b22e..14752afc15 100644
--- a/deps/v8/test/debugger/debug/es6/generators-debug-scopes.js
+++ b/deps/v8/test/debugger/debug/es6/generators-debug-scopes.js
@@ -42,8 +42,7 @@ function RunTest(name, formals_and_body, args, handler, continuation) {
run(function () { return fun.apply(null, args) });
run(function () { return gen.apply(null, args).next().value });
- // TODO(wingo): Uncomment after bug 2838 is fixed.
- // Debug.setListener(null);
+ Debug.setListener(null);
}
// Check that two scope are the same.
diff --git a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
index 9a108be4a4..938461690e 100644
--- a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --no-enable-one-shot-optimization
+
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js
index 7450cb2206..7a0f373be7 100644
--- a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-runtime-check.js
@@ -50,8 +50,7 @@ success([1], `return_array_use_spread([1])`);
// CallAccessorSetter
var array = [1,2,3];
fail(`array.length = 2`);
-// TODO(7515): this one should be side effect free
-fail(`[1,2,3].length = 2`);
+success(2, `[1,2,3].length = 2`);
// StaDataPropertyInLiteral
function return_literal_with_data_property(a) {
diff --git a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js
index fa14b4b862..5504cef16d 100644
--- a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js
@@ -9,6 +9,7 @@ let a = 1;
var object = { property : 2,
get getter() { return 3; }
};
+var string0 = new String("string");
var string1 = { toString() { return "x"; } };
var string2 = { toString() { print("x"); return "x"; } };
var array = [4, 5];
@@ -19,6 +20,9 @@ function set_a() { a = 2; }
function get_a() { return a; }
var bound = get_a.bind(0);
+function return_arg0() { return return_arg0.arguments[0]; }
+function return_caller_name() { return return_caller_name.caller.name; }
+
var global_eval = eval;
function listener(event, exec_state, event_data, data) {
@@ -32,6 +36,7 @@ function listener(event, exec_state, event_data, data) {
assertThrows(() => exec_state.frame(0).evaluate(source, true),
EvalError);
}
+
// Simple test.
success(3, "1 + 2");
// Dymanic load.
@@ -62,8 +67,9 @@ function listener(event, exec_state, event_data, data) {
success("set_a", "set_a.name");
success(0, "bound.length");
success("bound get_a", "bound.name");
+ success(1, "return_arg0(1)");
+ success("f", "(function f() { return return_caller_name() })()");
// Non-evaluated call.
- success("abc", "['abc'].join('foo')");
// Constructed literals.
success([1], "[1]");
success({x: 1}, "({x: 1})");
@@ -82,13 +88,25 @@ function listener(event, exec_state, event_data, data) {
fail("try { set_a() } catch (e) {}");
// Test that call to set accessor fails.
fail("array.length = 4");
- fail("'x'.length = 1");
fail("set_a.name = 'set_b'");
fail("set_a.length = 1");
fail("bound.name = 'bound'");
fail("bound.length = 1");
+ fail("set_a.prototype = null");
// Test that call to non-whitelisted get accessor fails.
fail("error.stack");
+ // Call to set accessors with receiver check.
+ success(1, "[].length = 1");
+ success(1, "'x'.length = 1");
+ fail("string0.length = 1");
+ success(1, "(new String('abc')).length = 1");
+ success("g", "(function(){}).name = 'g'");
+ success(1, "(function(){}).length = 1");
+ success("g", "get_a.bind(0).name = 'g'");
+ success(1, "get_a.bind(0).length = 1");
+ success(null, "(function(){}).prototype = null");
+ success(true, "(new Error()).stack.length > 1");
+ success("a", "(new Error()).stack = 'a'");
// Eval is not allowed.
fail("eval('Math.sin(1)')");
fail("eval('exception = 1')");
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index 8500344fb5..e85f1bef03 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -70,9 +70,28 @@
'debug/es8/async-debug-caught-exception-cases2': [SKIP],
'debug/es8/async-debug-caught-exception-cases3': [SKIP],
'debug/es8/async-function-debug-scopes': [SKIP],
+
+ # https://crbug.com/v8/8141
+ 'debug/debug-liveedit-1': [SKIP],
+ 'debug/debug-liveedit-double-call': [SKIP],
+ 'debug/es6/debug-liveedit-new-target-3': [SKIP],
+ 'debug/side-effect/debug-evaluate-no-side-effect-control': [SKIP],
}], # 'gc_stress == True'
##############################################################################
+['gc_fuzzer', {
+ # Slow tests.
+ 'regress/regress-2318': [SKIP],
+}], # 'gc_fuzzer'
+
+##############################################################################
+['predictable == True', {
+ # https://crbug.com/v8/8147
+ 'debug/debug-liveedit-*': [SKIP],
+ 'debug/debug-set-variable-value': [SKIP],
+}], # 'predictable == True'
+
+##############################################################################
['variant == no_wasm_traps', {
'*': [SKIP],
}], # variant == no_wasm_traps
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index da923ff63b..61893e9bbd 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -32,7 +32,7 @@ class TestSuite(testsuite.TestSuite):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 0be812c8dd..a7f4ca06ca 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -83,21 +83,6 @@ MachineType RandomType(InputProvider* input) {
return kTypes[input->NextInt8(kNumTypes)];
}
-int num_registers(MachineType type) {
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- switch (type.representation()) {
- case MachineRepresentation::kWord32:
- case MachineRepresentation::kWord64:
- return config->num_allocatable_general_registers();
- case MachineRepresentation::kFloat32:
- return config->num_allocatable_float_registers();
- case MachineRepresentation::kFloat64:
- return config->num_allocatable_double_registers();
- default:
- UNREACHABLE();
- }
-}
-
int index(MachineType type) { return static_cast<int>(type.representation()); }
Node* Constant(RawMachineAssembler& m, MachineType type, int value) {
@@ -264,9 +249,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
std::unique_ptr<wasm::NativeModule> module =
AllocateNativeModule(i_isolate, code->raw_instruction_size());
- byte* code_start = module->AddCodeCopy(code, wasm::WasmCode::kFunction, 0)
- ->instructions()
- .start();
+ byte* code_start = module->AddCodeForTesting(code)->instructions().start();
// Generate wrapper.
int expect = 0;
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 4a86148ca7..17bc70e91c 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -31,7 +31,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
class WasmExecutionFuzzer {
public:
- virtual ~WasmExecutionFuzzer() {}
+ virtual ~WasmExecutionFuzzer() = default;
int FuzzWasmModule(Vector<const uint8_t> data, bool require_valid = false);
protected:
diff --git a/deps/v8/test/inspector/debugger/break-on-exception-compiler-errors-expected.txt b/deps/v8/test/inspector/debugger/break-on-exception-compiler-errors-expected.txt
index 467da2c2f8..43021bd29c 100644
--- a/deps/v8/test/inspector/debugger/break-on-exception-compiler-errors-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-on-exception-compiler-errors-expected.txt
@@ -4,7 +4,7 @@ Running test: testUnexpectedEndOfInput
Runs '+++'
Runtime.evaluate exceptionDetails:
{
- columnNumber : 2
+ columnNumber : 3
exception : {
className : SyntaxError
description : SyntaxError: Unexpected end of input
@@ -49,7 +49,7 @@ paused on exception:
}
Runtime.evaluate exceptionDetails:
{
- columnNumber : 2
+ columnNumber : 3
exception : {
className : SyntaxError
description : SyntaxError: Unexpected end of input at <anonymous>:1:1
diff --git a/deps/v8/test/inspector/debugger/es6-module-liveedit-expected.txt b/deps/v8/test/inspector/debugger/es6-module-liveedit-expected.txt
new file mode 100644
index 0000000000..8ce34728a4
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/es6-module-liveedit-expected.txt
@@ -0,0 +1,17 @@
+Checks liveedit with ES6 modules.
+console.log message from function before patching:
+{
+ type : string
+ value : module1
+}
+Debugger.setScriptSource result:
+{
+ callFrames : [
+ ]
+ stackChanged : false
+}
+console.log message from function after patching:
+{
+ type : string
+ value : patched module1
+}
diff --git a/deps/v8/test/inspector/debugger/es6-module-liveedit.js b/deps/v8/test/inspector/debugger/es6-module-liveedit.js
new file mode 100644
index 0000000000..397a4303a7
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/es6-module-liveedit.js
@@ -0,0 +1,50 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Checks liveedit with ES6 modules.');
+
+const moduleSource = `
+export function foo() {
+ console.log('module1');
+ return 42;
+}
+foo()`;
+
+const newModuleSource = `
+export function foo() {
+ console.log('patched module1');
+ return 42;
+}
+foo()`;
+
+const callFooSource = `
+import { foo } from 'module';
+foo();`;
+
+(async function test() {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ contextGroup.addModule(moduleSource, 'module');
+ const [{ params: { scriptId } }, { params: { args }}] = [
+ await Protocol.Debugger.onceScriptParsed(),
+ await Protocol.Runtime.onceConsoleAPICalled()
+ ];
+ InspectorTest.log('console.log message from function before patching:')
+ InspectorTest.logMessage(args[0]);
+
+ const {result} = await Protocol.Debugger.setScriptSource({
+ scriptId,
+ scriptSource: newModuleSource
+ });
+ InspectorTest.log('Debugger.setScriptSource result:');
+ InspectorTest.logMessage(result);
+
+ contextGroup.addModule(callFooSource, 'callFoo');
+ const { params: {args: patchedArgs } } =
+ await Protocol.Runtime.onceConsoleAPICalled();
+ InspectorTest.log('console.log message from function after patching:')
+ InspectorTest.logMessage(patchedArgs[0]);
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/es6-module-set-script-source-expected.txt b/deps/v8/test/inspector/debugger/es6-module-set-script-source-expected.txt
deleted file mode 100644
index cd0ef1fa6e..0000000000
--- a/deps/v8/test/inspector/debugger/es6-module-set-script-source-expected.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Checks that Debugger.setScriptSource doesn't crash with modules
-{
- error : {
- code : -32000
- message : Editing module's script is not supported.
- }
- id : <messageId>
-}
diff --git a/deps/v8/test/inspector/debugger/es6-module-set-script-source.js b/deps/v8/test/inspector/debugger/es6-module-set-script-source.js
deleted file mode 100644
index 81d97b6d78..0000000000
--- a/deps/v8/test/inspector/debugger/es6-module-set-script-source.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start('Checks that Debugger.setScriptSource doesn\'t crash with modules');
-
-var module1 = `
-export function foo() {
- return 42;
-}`;
-
-var editedModule1 = `
-export function foo() {
- return 239;
-}`;
-
-var module2 = `
-import { foo } from 'module1';
-console.log(foo());
-`;
-
-var module1Id;
-Protocol.Debugger.onScriptParsed(message => {
- if (message.params.url === 'module1')
- module1Id = message.params.scriptId;
-});
-Protocol.Debugger.enable()
- .then(() => contextGroup.addModule(module1, 'module1'))
- .then(() => contextGroup.addModule(module2, 'module2'))
- .then(() => InspectorTest.waitForPendingTasks())
- .then(() => Protocol.Debugger.setScriptSource({ scriptId: module1Id, scriptSource: editedModule1 }))
- .then(InspectorTest.logMessage)
- .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/eval-scopes-expected.txt b/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
index 71d6618c8e..4c93498c68 100644
--- a/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
@@ -2,6 +2,12 @@ Tests that variables introduced in eval scopes are accessible
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/debugger/evaluate-at-first-module-line-expected.txt b/deps/v8/test/inspector/debugger/evaluate-at-first-module-line-expected.txt
new file mode 100644
index 0000000000..fe6e253104
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-at-first-module-line-expected.txt
@@ -0,0 +1,11 @@
+Evaluate at first line of module should not crash
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/evaluate-at-first-module-line.js b/deps/v8/test/inspector/debugger/evaluate-at-first-module-line.js
new file mode 100644
index 0000000000..9f21e9fc39
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-at-first-module-line.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Evaluate at first line of module should not crash');
+
+const utilsModule = `export function identity(value) {
+ return value;
+}`;
+
+const mainModule = `import {identity} from 'utils';
+console.log(identity(0));`;
+
+(async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 1,
+ url: 'main'
+ });
+
+ contextGroup.addModule(utilsModule, 'utils');
+ contextGroup.addModule(mainModule, 'main');
+ const { params: { callFrames } } = await Protocol.Debugger.oncePaused();
+ const result = await Protocol.Debugger.evaluateOnCallFrame({
+ callFrameId: callFrames[0].callFrameId,
+ expression: 'identity(0)'
+ });
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields-expected.txt
new file mode 100644
index 0000000000..8b6ff0d324
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields-expected.txt
@@ -0,0 +1,206 @@
+Checks Debugger.getPossibleBreakpoints for class fields
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let x = |R|class {}
+
+|_|x = |R|class {
+ x = |_|1;
+ y = |_|2|R|;
+}
+
+|_|x = |R|class {
+ x = |C|foo();
+ y = |_|2;
+ z = |C|bar()|R|;
+}
+
+|_|x = class {
+ x = |C|foo();
+ y = |_|2;
+ z = |C|bar()|R|;
+ constructor() {
+ this.|_|x;
+ |R|}
+}
+
+|_|x = class {
+ x = |C|foo();
+ y = |_|2;
+ constructor() {
+ this.|_|x;
+ |R|}
+ z = |C|bar()|R|;
+}
+
+|_|x = class {
+ x = |C|foo();
+ y = |_|2;
+ constructor() {
+ this.|_|x;
+ |R|}
+ z = |C|bar()|R|;
+}
+
+|_|x = |R|class {
+ x = |_|1;
+ foo() {|R|}
+ y = |_|2|R|;
+}
+
+|_|x = |R|class {
+ x = (function() {
+ |C|foo();
+ |R|})|C|();
+ y = (() => {
+ |C|bar();
+ |R|})|C|()|R|;
+}
+
+|_|x = |R|class {
+ x = |_|function() {
+ |C|foo();
+ |R|}|R|;
+}
+
+|_|x = |R|class {
+ x = |_|async function() {
+ |_|await |C|foo();
+ |R|}|R|;
+}
+
+|_|x = |R|class {
+ x = |_|() => {
+ |C|foo();
+ |R|};
+ y = |_|() => |C|bar()|R|;
+}
+
+|_|x = |R|class {
+ x = |_|async () => {
+ |_|await |C|foo();
+ |R|};
+ y = |_|async () => |_|await |C|bar()|R|;
+}
+
+|_|x = |R|class {
+ [|_|x] = |_|1;
+ [|C|foo()] = |_|2|R|;
+}
+
+|_|x = |R|class {
+ [|_|x] = |_|[...this]|R|;
+}
+
+|_|x = |R|class {
+ x;
+ [|C|foo()]|R|;
+}
+
+|_|x = |R|class {
+ x = |_|function*|_|() {
+ |_|yield 1;
+ |R|}|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|1;
+ static y = |_|2|R|;
+}
+
+|_|x = |R|class {
+ static x = |C|foo();
+ static y = |_|2;
+ static z = |C|bar()|R|;
+}
+
+|_|x = class {
+ static x = |C|foo();
+ static y = |_|2;
+ static z = |C|bar()|R|;
+ constructor() {
+ this.|_|x;
+ |R|}
+}
+
+|_|x = class {
+ static x = |C|foo();
+ static y = |_|2;
+ constructor() {
+ this.|_|x;
+ |R|}
+ static z = |C|bar()|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|1;
+ static foo() {|R|}
+ bar() {|R|}
+ static y = |_|2|R|;
+}
+
+|_|x = |R|class {
+ static x = (function() {
+ |C|foo();
+ |R|})|C|();
+ static y = (() => {
+ |C|bar();
+ |R|})|C|()|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|function() {
+ |C|foo();
+ |R|}|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|async function() {
+ |_|await |C|foo();
+ |R|}|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|() => {
+ |C|foo();
+ |R|};
+ static y = |_|() => |C|bar()|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|async () => {
+ |_|await |C|foo();
+ |R|};
+ static y = |_|async () => |_|await |C|bar()|R|;
+}
+
+|_|x = |R|class {
+ static [|_|x] = |_|1;
+ static [|C|foo()] = |_|2|R|;
+}
+
+|_|x = |R|class {
+ static [|_|x] = |_|[...this]|R|;
+}
+
+|_|x = |R|class {
+ static x;
+ static [|C|foo()]|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|function*|_|() {
+ |_|yield 1;
+ |R|}|R|;
+}
+
+|_|x = |R|class {
+ static x = |_|1;
+ y = |_|2;
+ static [|_|z] = |_|3;
+ [|_|p] = |_|4;
+ static [|C|foo()] = |_|5|R|;
+ [|C|bar()] = |_|6|R|;
+}
+|R|
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields.js
new file mode 100644
index 0000000000..068fce2557
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields.js
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields --harmony-static-fields
+
+let { session, contextGroup, Protocol } = InspectorTest.start(
+ "Checks Debugger.getPossibleBreakpoints for class fields"
+);
+
+(async function() {
+ session.setupScriptMap();
+ await Protocol.Debugger.enable();
+
+ const source = utils.read(
+ "test/inspector/debugger/resources/break-locations-class-fields.js"
+ );
+
+ contextGroup.addScript(source);
+
+ const {
+ params: { scriptId }
+ } = await Protocol.Debugger.onceScriptParsed();
+
+ const {
+ result: { locations }
+ } = await Protocol.Debugger.getPossibleBreakpoints({
+ start: {
+ lineNumber: 0,
+ columnNumber: 0,
+ scriptId
+ }
+ });
+
+ session.logBreakLocations(locations);
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
index a8e6bef637..fc7dabac1a 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(luoe): remove flag when it is on by default.
-// Flags: --harmony-bigint
let {session, contextGroup, Protocol} = InspectorTest.start("Check internal properties reported in object preview.");
diff --git a/deps/v8/test/inspector/debugger/pause-on-promise-rejections-expected.txt b/deps/v8/test/inspector/debugger/pause-on-promise-rejections-expected.txt
new file mode 100644
index 0000000000..e046eb3147
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-promise-rejections-expected.txt
@@ -0,0 +1,22 @@
+Test Debugger.paused reason for promise rejections
+Check Promise.reject in script:
+promiseRejection
+
+Check Promise.reject in Runtime.evaluate:
+promiseRejection
+
+Check Promise.reject in async function:
+promiseRejection
+
+Check throw in async function:
+promiseRejection
+
+Check reject from constructor:
+promiseRejection
+
+Check reject from thenable job:
+promiseRejection
+
+Check caught exception in async function (should be exception):
+exception
+
diff --git a/deps/v8/test/inspector/debugger/pause-on-promise-rejections.js b/deps/v8/test/inspector/debugger/pause-on-promise-rejections.js
new file mode 100644
index 0000000000..f5342db529
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-promise-rejections.js
@@ -0,0 +1,68 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Test Debugger.paused reason for promise rejections');
+
+(async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setPauseOnExceptions({state: 'all'});
+ InspectorTest.log('Check Promise.reject in script:');
+ contextGroup.addScript(`Promise.reject(new Error())`);
+ await logPausedReason();
+ await Protocol.Debugger.resume();
+
+ InspectorTest.log('Check Promise.reject in Runtime.evaluate:');
+ Protocol.Runtime.evaluate({expression: `Promise.reject(new Error())`});
+ await logPausedReason();
+ await Protocol.Debugger.resume();
+
+ InspectorTest.log('Check Promise.reject in async function:');
+ Protocol.Runtime.evaluate(
+ {expression: `(async function() { await Promise.reject(); })()`});
+ await logPausedReason();
+ await Protocol.Debugger.resume();
+
+ InspectorTest.log('Check throw in async function:');
+ Protocol.Runtime.evaluate({
+ expression: `(async function() { await Promise.resolve(); throw 42; })()`
+ });
+ await logPausedReason();
+ await Protocol.Debugger.resume();
+
+ InspectorTest.log('Check reject from constructor:');
+ Protocol.Runtime.evaluate({
+ expression: 'new Promise((_, reject) => reject(new Error())).catch(e => {})'
+ });
+ await logPausedReason();
+ await Protocol.Debugger.resume();
+
+ InspectorTest.log('Check reject from thenable job:');
+ Protocol.Runtime.evaluate({
+ expression:
+ `Promise.resolve().then(() => Promise.reject(new Error())).catch(e => 0)`
+ });
+ await logPausedReason();
+ await Protocol.Debugger.resume();
+
+ InspectorTest.log(
+ 'Check caught exception in async function (should be exception):');
+ Protocol.Runtime.evaluate({
+ expression: `(async function() {
+ await Promise.resolve();
+ try {
+ throw 42;
+ } catch (e) {}
+ })()`
+ });
+ await logPausedReason();
+ await Protocol.Debugger.resume();
+
+ InspectorTest.completeTest();
+})();
+
+async function logPausedReason() {
+ const {params: {reason}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(reason + '\n');
+}
diff --git a/deps/v8/test/inspector/debugger/resources/break-locations-class-fields.js b/deps/v8/test/inspector/debugger/resources/break-locations-class-fields.js
new file mode 100644
index 0000000000..c576c0ed6e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/resources/break-locations-class-fields.js
@@ -0,0 +1,204 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let x = class {}
+
+x = class {
+ x = 1;
+ y = 2;
+}
+
+x = class {
+ x = foo();
+ y = 2;
+ z = bar();
+}
+
+x = class {
+ x = foo();
+ y = 2;
+ z = bar();
+ constructor() {
+ this.x;
+ }
+}
+
+x = class {
+ x = foo();
+ y = 2;
+ constructor() {
+ this.x;
+ }
+ z = bar();
+}
+
+x = class {
+ x = foo();
+ y = 2;
+ constructor() {
+ this.x;
+ }
+ z = bar();
+}
+
+x = class {
+ x = 1;
+ foo() {}
+ y = 2;
+}
+
+x = class {
+ x = (function() {
+ foo();
+ })();
+ y = (() => {
+ bar();
+ })();
+}
+
+x = class {
+ x = function() {
+ foo();
+ };
+}
+
+x = class {
+ x = async function() {
+ await foo();
+ };
+}
+
+x = class {
+ x = () => {
+ foo();
+ };
+ y = () => bar();
+}
+
+x = class {
+ x = async () => {
+ await foo();
+ };
+ y = async () => await bar();
+}
+
+x = class {
+ [x] = 1;
+ [foo()] = 2;
+}
+
+x = class {
+ [x] = [...this];
+}
+
+x = class {
+ x;
+ [foo()];
+}
+
+x = class {
+ x = function*() {
+ yield 1;
+ };
+}
+
+x = class {
+ static x = 1;
+ static y = 2;
+}
+
+x = class {
+ static x = foo();
+ static y = 2;
+ static z = bar();
+}
+
+x = class {
+ static x = foo();
+ static y = 2;
+ static z = bar();
+ constructor() {
+ this.x;
+ }
+}
+
+x = class {
+ static x = foo();
+ static y = 2;
+ constructor() {
+ this.x;
+ }
+ static z = bar();
+}
+
+x = class {
+ static x = 1;
+ static foo() {}
+ bar() {}
+ static y = 2;
+}
+
+x = class {
+ static x = (function() {
+ foo();
+ })();
+ static y = (() => {
+ bar();
+ })();
+}
+
+x = class {
+ static x = function() {
+ foo();
+ };
+}
+
+x = class {
+ static x = async function() {
+ await foo();
+ };
+}
+
+x = class {
+ static x = () => {
+ foo();
+ };
+ static y = () => bar();
+}
+
+x = class {
+ static x = async () => {
+ await foo();
+ };
+ static y = async () => await bar();
+}
+
+x = class {
+ static [x] = 1;
+ static [foo()] = 2;
+}
+
+x = class {
+ static [x] = [...this];
+}
+
+x = class {
+ static x;
+ static [foo()];
+}
+
+x = class {
+ static x = function*() {
+ yield 1;
+ };
+}
+
+x = class {
+ static x = 1;
+ y = 2;
+ static [z] = 3;
+ [p] = 4;
+ static [foo()] = 5;
+ [bar()] = 6;
+}
diff --git a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
index 626f9787c3..6fbe355eff 100644
--- a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
+++ b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
@@ -2,6 +2,12 @@ Tests that scopes do not report variables with empty names
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
index 702026b2e0..2079518424 100644
--- a/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
@@ -658,6 +658,24 @@ scriptParsed
}
scriptFailedToParse
{
+ scriptSource : {a:2:<nl>//# sourceURL=http://a.js
+}
+{
+ endColumn : 25
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : 33c5612558c02e5a3bfa0d098c82865f38f98df2
+ isModule : false
+ length : 31
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : http://a.js
+}
+scriptFailedToParse
+{
scriptSource : }//# sourceURL=failed.js<nl>//# sourceMappingURL=failed-map
}
{
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile.js b/deps/v8/test/inspector/debugger/script-on-after-compile.js
index 544dbaaae2..b37bfddf44 100644
--- a/deps/v8/test/inspector/debugger/script-on-after-compile.js
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile.js
@@ -33,6 +33,8 @@ function addScripts() {
.then(() => addScript("function foo13(){}"))
// script in eval
.then(() => addScript("function foo15(){}; eval(\"function foo14(){}//# sourceURL=eval.js\")//# sourceURL=eval-wrapper.js"))
+ // // inside sourceURL
+ .then(() => addScript("{a:2:\n//# sourceURL=http://a.js"))
// sourceURL and sourceMappingURL works even for script with syntax error
.then(() => addScript("}//# sourceURL=failed.js\n//# sourceMappingURL=failed-map"))
// empty lines at end
diff --git a/deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt b/deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt
index 3532183b66..69bc9dde41 100644
--- a/deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt
+++ b/deps/v8/test/inspector/debugger/this-in-arrow-function-expected.txt
@@ -98,10 +98,10 @@ This on callFrame:
}
This in evaluateOnCallFrame:
{
- className : Object
- description : Object
+ className : global
+ description : global
objectId : <objectId>
type : object
}
-Values equal: false
+Values equal: true
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
new file mode 100644
index 0000000000..34f9cf1df9
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
@@ -0,0 +1,308 @@
+Tests stepping through wasm scripts with source maps
+Installing code an global variable and instantiate.
+Got wasm script: wasm-9b4bf87e
+Script sourceMapURL: abc
+Requesting source for wasm-9b4bf87e...
+Source retrieved without error: true
+Setting breakpoint on offset 54 (on the setlocal before the call), url wasm-9b4bf87e
+{
+ columnNumber : 54
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+Paused at wasm-9b4bf87e:0:54
+at wasm_B (0:54):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":4}
+ stack: {"0":3}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:56
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":3}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:38
+at wasm_A (0:38):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {}
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":3}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOver called
+Paused at wasm-9b4bf87e:0:39
+at wasm_A (0:39):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {}
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":3}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOut called
+Paused at wasm-9b4bf87e:0:58
+at wasm_B (0:58):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":3}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOut called
+Paused at wasm-9b4bf87e:0:54
+at wasm_B (0:54):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":3}
+ stack: {"0":2}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOver called
+Paused at wasm-9b4bf87e:0:56
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":2}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOver called
+Paused at wasm-9b4bf87e:0:58
+at wasm_B (0:58):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":2}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.resume called
+Paused at wasm-9b4bf87e:0:54
+at wasm_B (0:54):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":2}
+ stack: {"0":1}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:56
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:38
+at wasm_A (0:38):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {}
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOut called
+Paused at wasm-9b4bf87e:0:58
+at wasm_B (0:58):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:43
+at wasm_B (0:43):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:45
+at wasm_B (0:45):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:47
+at wasm_B (0:47):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {"0":1}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:49
+at wasm_B (0:49):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:51
+at wasm_B (0:51):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {"0":1}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:53
+at wasm_B (0:53):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {"0":1,"1":1}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:54
+at wasm_B (0:54):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":1}
+ stack: {"0":0}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:56
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":0}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:38
+at wasm_A (0:38):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {}
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":0}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:39
+at wasm_A (0:39):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {}
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":0}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:40
+at wasm_A (0:40):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {}
+at wasm_B (0:56):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":0}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
+Paused at wasm-9b4bf87e:0:58
+at wasm_B (0:58):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ locals: {"arg#0":0}
+ stack: {}
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.resume called
+exports.main returned!
+Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
new file mode 100644
index 0000000000..7732e1396e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -0,0 +1,123 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests stepping through wasm scripts with source maps');
+
+utils.load('test/mjsunit/wasm/wasm-constants.js');
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+var func_a_idx =
+ builder.addFunction('wasm_A', kSig_v_v).addBody([kExprNop, kExprNop]).index;
+
+// wasm_B calls wasm_A <param0> times.
+builder.addFunction('wasm_B', kSig_v_i)
+ .addBody([
+ // clang-format off
+ kExprLoop, kWasmStmt, // while
+ kExprGetLocal, 0, // -
+ kExprIf, kWasmStmt, // if <param0> != 0
+ kExprGetLocal, 0, // -
+ kExprI32Const, 1, // -
+ kExprI32Sub, // -
+ kExprSetLocal, 0, // decrease <param0>
+ kExprCallFunction, func_a_idx, // -
+ kExprBr, 1, // continue
+ kExprEnd, // -
+ kExprEnd, // break
+ // clang-format on
+ ])
+ .exportAs('main');
+
+builder.addCustomSection('sourceMappingURL', [3, 97, 98, 99]);
+
+var module_bytes = builder.toArray();
+
+function instantiate(bytes) {
+ var buffer = new ArrayBuffer(bytes.length);
+ var view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; ++i) {
+ view[i] = bytes[i] | 0;
+ }
+
+ var module = new WebAssembly.Module(buffer);
+ // Set global variable.
+ instance = new WebAssembly.Instance(module);
+}
+
+(async function test() {
+ for (const action of ['stepInto', 'stepOver', 'stepOut', 'resume'])
+ InspectorTest.logProtocolCommandCalls('Debugger.' + action);
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Installing code an global variable and instantiate.');
+ Protocol.Runtime.evaluate({
+ expression: `var instance;(${instantiate.toString()})(${JSON.stringify(module_bytes)})`});
+ const [, {params: wasmScript}] = await Protocol.Debugger.onceScriptParsed(2);
+
+ InspectorTest.log('Got wasm script: ' + wasmScript.url);
+ InspectorTest.log('Script sourceMapURL: ' + wasmScript.sourceMapURL);
+ InspectorTest.log('Requesting source for ' + wasmScript.url + '...');
+ const msg =
+ await Protocol.Debugger.getScriptSource({scriptId: wasmScript.scriptId});
+ InspectorTest.log(`Source retrieved without error: ${!msg.error}`);
+ InspectorTest.log(
+ `Setting breakpoint on offset 54 (on the setlocal before the call), url ${wasmScript.url}`);
+ const {result: {actualLocation}} = await Protocol.Debugger.setBreakpoint({
+ location:{scriptId: wasmScript.scriptId, lineNumber: 0, columnNumber: 54}});
+ InspectorTest.logMessage(actualLocation);
+ Protocol.Runtime.evaluate({expression: 'instance.exports.main(4)'});
+ await waitForPauseAndStep('stepInto'); // == stepOver, to call instruction
+ await waitForPauseAndStep('stepInto'); // into call to wasm_A
+ await waitForPauseAndStep('stepOver'); // over first nop
+ await waitForPauseAndStep('stepOut'); // out of wasm_A
+ await waitForPauseAndStep('stepOut'); // out of wasm_B, stop on breakpoint again
+ await waitForPauseAndStep('stepOver'); // to call
+ await waitForPauseAndStep('stepOver'); // over call
+ await waitForPauseAndStep('resume'); // to next breakpoint (third iteration)
+ await waitForPauseAndStep('stepInto'); // to call
+ await waitForPauseAndStep('stepInto'); // into wasm_A
+ await waitForPauseAndStep('stepOut'); // out to wasm_B
+ // now step 9 times, until we are in wasm_A again.
+ for (let i = 0; i < 9; ++i) await waitForPauseAndStep('stepInto');
+ // 3 more times, back to wasm_B.
+ for (let i = 0; i < 3; ++i) await waitForPauseAndStep('stepInto');
+ // then just resume.
+ await waitForPauseAndStep('resume');
+ InspectorTest.log('exports.main returned!');
+ InspectorTest.log('Finished!');
+ InspectorTest.completeTest();
+})();
+
+async function waitForPauseAndStep(stepAction) {
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ const topFrame = callFrames[0];
+ InspectorTest.log(
+ `Paused at ${topFrame.url}:${topFrame.location.lineNumber}:${topFrame.location.columnNumber}`);
+ for (var frame of callFrames) {
+ const functionName = frame.functionName || '(anonymous)';
+ const lineNumber = frame.location.lineNumber;
+ const columnNumber = frame.location.columnNumber;
+ InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
+ for (var scope of frame.scopeChain) {
+ InspectorTest.logObject(' - scope (' + scope.type + '):');
+ if (scope.type === 'global') {
+ InspectorTest.logObject(' -- skipped');
+ } else {
+ const {result: {result: {value}}} =
+ await Protocol.Runtime.callFunctionOn({
+ objectId: scope.object.objectId,
+ functionDeclaration: 'function() { return this; }',
+ returnByValue: true
+ });
+ if (value.locals)
+ InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
+ InspectorTest.log(` stack: ${JSON.stringify(value.stack)}`);
+ }
+ }
+ }
+ Protocol.Debugger[stepAction]();
+}
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index dbb4493e66..d69ee6f4e2 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -86,7 +86,7 @@ class FrontendChannelImpl : public v8_inspector::V8Inspector::Channel {
: task_runner_(task_runner),
context_group_id_(context_group_id),
function_(isolate, function) {}
- virtual ~FrontendChannelImpl() = default;
+ ~FrontendChannelImpl() override = default;
void set_session_id(int session_id) { session_id_ = session_id; }
@@ -109,7 +109,7 @@ class FrontendChannelImpl : public v8_inspector::V8Inspector::Channel {
SendMessageTask(FrontendChannelImpl* channel,
const std::vector<uint16_t>& message)
: channel_(channel), message_(message) {}
- virtual ~SendMessageTask() {}
+ ~SendMessageTask() override = default;
bool is_priority_task() final { return false; }
private:
@@ -142,7 +142,7 @@ void RunSyncTask(TaskRunner* task_runner, T callback) {
public:
SyncTask(v8::base::Semaphore* ready_semaphore, T callback)
: ready_semaphore_(ready_semaphore), callback_(callback) {}
- virtual ~SyncTask() = default;
+ ~SyncTask() override = default;
bool is_priority_task() final { return true; }
private:
@@ -182,7 +182,7 @@ void RunAsyncTask(TaskRunner* task_runner,
class AsyncTask : public TaskRunner::Task {
public:
explicit AsyncTask(TaskRunner::Task* inner) : inner_(inner) {}
- virtual ~AsyncTask() = default;
+ ~AsyncTask() override = default;
bool is_priority_task() override { return inner_->is_priority_task(); }
void Run(IsolateData* data) override {
data->AsyncTaskStarted(inner_.get());
@@ -216,8 +216,7 @@ class ExecuteStringTask : public TaskRunner::Task {
ExecuteStringTask(const std::string& expression, int context_group_id)
: expression_utf8_(expression), context_group_id_(context_group_id) {}
- virtual ~ExecuteStringTask() {
- }
+ ~ExecuteStringTask() override = default;
bool is_priority_task() override { return false; }
void Run(IsolateData* data) override {
v8::MicrotasksScope microtasks_scope(data->isolate(),
@@ -595,7 +594,7 @@ class SetTimeoutTask : public TaskRunner::Task {
SetTimeoutTask(int context_group_id, v8::Isolate* isolate,
v8::Local<v8::Function> function)
: function_(isolate, function), context_group_id_(context_group_id) {}
- virtual ~SetTimeoutTask() {}
+ ~SetTimeoutTask() override = default;
bool is_priority_task() final { return false; }
private:
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index 25ffec6180..56a92c0dfc 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -3,6 +3,21 @@
# found in the LICENSE file.
[
+##############################################################################
+[ALWAYS, {
+ # https://crbug.com/v8/7767
+ 'debugger/wasm-imports': [SKIP],
+ # https://crbug.com/v8/7932
+ 'runtime/command-line-api-without-side-effects': [SKIP],
+}], # ALWAYS
+
+##############################################################################
+['system == android', {
+ # https://crbug.com/v8/8160
+ 'debugger/stepping-with-exposed-injected-script': [FAIL],
+ # https://crbug.com/v8/8197
+ 'debugger/get-possible-breakpoints-class-fields': [SKIP],
+}], # 'system == android'
##############################################################################
['variant != default', {
@@ -24,10 +39,4 @@
# Stack manipulations in LiveEdit is not implemented for this arch.
'debugger/set-script-source-stack-padding': [SKIP],
}], # 'arch == s390 or arch == s390x'
-
-##############################################################################
-[ALWAYS, {
- # https://crbug.com/v8/7767
- 'debugger/wasm-imports': [SKIP],
-}], # ALWAYS
]
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 57b9af57c2..15690370cc 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -44,7 +44,7 @@ class Inspectable : public v8_inspector::V8InspectorSession::Inspectable {
public:
Inspectable(v8::Isolate* isolate, v8::Local<v8::Value> object)
: object_(isolate, object) {}
- ~Inspectable() override {}
+ ~Inspectable() override = default;
v8::Local<v8::Value> get(v8::Local<v8::Context> context) override {
return object_.Get(context->GetIsolate());
}
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index a941280672..d395808b91 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -45,6 +45,8 @@ InspectorTest.logMessage = function(originalMessage) {
var objects = [ message ];
while (objects.length) {
var object = objects.shift();
+ if (object && object.name === '[[StableObjectId]]')
+ object.value = '<StablectObjectId>';
for (var key in object) {
if (nonStableFields.has(key))
object[key] = `<${key}>`;
@@ -335,7 +337,8 @@ InspectorTest.Session = class {
var eventName = match[2];
eventName = eventName.charAt(0).toLowerCase() + eventName.slice(1);
if (match[1])
- return () => this._waitForEventPromise(`${agentName}.${eventName}`);
+ return numOfEvents => this._waitForEventPromise(
+ `${agentName}.${eventName}`, numOfEvents || 1);
return listener => this._eventHandlers.set(`${agentName}.${eventName}`, listener);
}
})});
@@ -369,11 +372,16 @@ InspectorTest.Session = class {
}
};
- _waitForEventPromise(eventName) {
+ _waitForEventPromise(eventName, numOfEvents) {
+ let events = [];
return new Promise(fulfill => {
this._eventHandlers.set(eventName, result => {
- delete this._eventHandlers.delete(eventName);
- fulfill(result);
+ --numOfEvents;
+ events.push(result);
+ if (numOfEvents === 0) {
+ delete this._eventHandlers.delete(eventName);
+ fulfill(events.length > 1 ? events : events[0]);
+ }
});
});
}
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async.js b/deps/v8/test/inspector/runtime/call-function-on-async.js
index c5f1152979..ab146e1c4d 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async.js
+++ b/deps/v8/test/inspector/runtime/call-function-on-async.js
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(luoe): remove flag when it is on by default.
-// Flags: --harmony-bigint
let {session, contextGroup, Protocol} = InspectorTest.start('Tests that Runtime.callFunctionOn works with awaitPromise flag.');
let callFunctionOn = Protocol.Runtime.callFunctionOn.bind(Protocol.Runtime);
diff --git a/deps/v8/test/inspector/runtime/compile-script-expected.txt b/deps/v8/test/inspector/runtime/compile-script-expected.txt
index 23e6a64dc5..7058da9c75 100644
--- a/deps/v8/test/inspector/runtime/compile-script-expected.txt
+++ b/deps/v8/test/inspector/runtime/compile-script-expected.txt
@@ -6,7 +6,7 @@ compilation result:
id : <messageId>
result : {
exceptionDetails : {
- columnNumber : 2
+ columnNumber : 3
exception : {
className : SyntaxError
description : SyntaxError: Unexpected end of input
diff --git a/deps/v8/test/inspector/runtime/console-context-expected.txt b/deps/v8/test/inspector/runtime/console-context-expected.txt
index f2d414bb49..658238aaa2 100644
--- a/deps/v8/test/inspector/runtime/console-context-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-context-expected.txt
@@ -28,9 +28,10 @@ console.context() methods:
[15] : assert
[16] : profile
[17] : profileEnd
- [18] : time
- [19] : timeEnd
- [20] : timeStamp
+ [18] : timeLog
+ [19] : time
+ [20] : timeEnd
+ [21] : timeStamp
]
Running test: testDefaultConsoleContext
diff --git a/deps/v8/test/inspector/runtime/console-time-log-expected.txt b/deps/v8/test/inspector/runtime/console-time-log-expected.txt
new file mode 100644
index 0000000000..4cf9e408a3
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-time-log-expected.txt
@@ -0,0 +1,37 @@
+Test for console.timeLog
+[
+ [0] : {
+ type : string
+ value : 42: 1ms
+ }
+ [1] : {
+ type : string
+ value : a
+ }
+]
+[
+ [0] : {
+ type : string
+ value : 42: 2ms
+ }
+ [1] : {
+ type : string
+ value : a
+ }
+ [2] : {
+ type : string
+ value : b
+ }
+]
+[
+ [0] : {
+ type : string
+ value : 42: 3ms
+ }
+]
+[
+ [0] : {
+ type : string
+ value : Timer '42' does not exist
+ }
+]
diff --git a/deps/v8/test/inspector/runtime/console-time-log.js b/deps/v8/test/inspector/runtime/console-time-log.js
new file mode 100644
index 0000000000..83683ec26a
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-time-log.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Test for console.timeLog');
+
+(async function test() {
+ Protocol.Runtime.enable();
+ utils.setCurrentTimeMSForTest(0.0);
+ await Protocol.Runtime.evaluate({expression: `console.time('42')`});
+ utils.setCurrentTimeMSForTest(1.0);
+ Protocol.Runtime.evaluate({expression: `console.timeLog('42', 'a')`});
+ logArgs(await Protocol.Runtime.onceConsoleAPICalled());
+ utils.setCurrentTimeMSForTest(2.0);
+ Protocol.Runtime.evaluate({expression: `console.timeLog('42', 'a', 'b')`});
+ logArgs(await Protocol.Runtime.onceConsoleAPICalled());
+ utils.setCurrentTimeMSForTest(3.0);
+ Protocol.Runtime.evaluate({expression: `console.timeEnd('42')`});
+ logArgs(await Protocol.Runtime.onceConsoleAPICalled());
+ utils.setCurrentTimeMSForTest(4.0);
+ Protocol.Runtime.evaluate({expression: `console.timeLog('42', 'text')`});
+ logArgs(await Protocol.Runtime.onceConsoleAPICalled());
+ InspectorTest.completeTest();
+})()
+
+function logArgs(message) {
+ InspectorTest.logMessage(message.params.args);
+}
diff --git a/deps/v8/test/inspector/runtime/error-preview-expected.txt b/deps/v8/test/inspector/runtime/error-preview-expected.txt
new file mode 100644
index 0000000000..a3ebab2ff4
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/error-preview-expected.txt
@@ -0,0 +1,8 @@
+Checks preview for Error object
+{
+ className : TypeError
+ description : TypeError: []. at is not a function at <anonymous>:1:15
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
diff --git a/deps/v8/test/inspector/runtime/error-preview.js b/deps/v8/test/inspector/runtime/error-preview.js
new file mode 100644
index 0000000000..bb08edb687
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/error-preview.js
@@ -0,0 +1,15 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks preview for Error object');
+
+(async function test() {
+ const { result: { result } } = await Protocol.Runtime.evaluate({
+ expression: `[]['\\n at']()`,
+ generatePreview: true
+ });
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index 25ba52e034..051ef6ceae 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -128,6 +128,12 @@ console.log(239)
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/runtime/evaluate-unserializable.js b/deps/v8/test/inspector/runtime/evaluate-unserializable.js
index 462af53f09..8876db5add 100644
--- a/deps/v8/test/inspector/runtime/evaluate-unserializable.js
+++ b/deps/v8/test/inspector/runtime/evaluate-unserializable.js
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(luoe): remove flag when it is on by default.
-// Flags: --harmony-bigint
let {session, contextGroup, Protocol} =
InspectorTest.start("Tests Runtime.evaluate with unserializable results.");
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
index 081a93c64a..6d6787ab56 100644
--- a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
+++ b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(luoe): remove flag when it is on by default.
-// Flags: --harmony-bigint
let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.evaluate will generate correct previews.");
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 8b48e65c3b..5707ffc5af 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -5,6 +5,7 @@ Running test: testObject5
foo own string cat
Internal properties
[[PrimitiveValue]] number 5
+ [[StableObjectId]]: <stableObjectId>
Running test: testNotOwn
__defineGetter__ inherited function undefined
@@ -23,6 +24,8 @@ Running test: testNotOwn
toLocaleString inherited function undefined
toString inherited function undefined
valueOf inherited function undefined
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
Running test: testAccessorsOnly
b own no value, getter, setter
@@ -34,6 +37,8 @@ Running test: testArray
2 own string blue
__proto__ own object undefined
length own number 3
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
Running test: testBound
__proto__ own function undefined
@@ -42,14 +47,19 @@ Running test: testBound
Internal properties
[[BoundArgs]] object undefined
[[BoundThis]] object undefined
+ [[StableObjectId]]: <stableObjectId>
[[TargetFunction]] function undefined
Running test: testObjectThrowsLength
__proto__ own object undefined
length own no value, getter
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
Running test: testTypedArrayWithoutLength
__proto__ own object undefined
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
Running test: testArrayBuffer
[[Int8Array]]
@@ -62,6 +72,8 @@ Running test: testArrayBuffer
6 own number 1
7 own number 1
__proto__ own object undefined
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
[[Uint8Array]]
0 own number 1
1 own number 1
@@ -72,18 +84,26 @@ Running test: testArrayBuffer
6 own number 1
7 own number 1
__proto__ own object undefined
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
[[Int16Array]]
0 own number 257
1 own number 257
2 own number 257
3 own number 257
__proto__ own object undefined
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
[[Int32Array]]
0 own number 16843009
1 own number 16843009
__proto__ own object undefined
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
Running test: testArrayBufferWithBrokenUintCtor
[[Int8Array]] own object undefined
[[Uint8Array]] own object undefined
__proto__ own object undefined
+Internal properties
+ [[StableObjectId]]: <stableObjectId>
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
index a0437f4af6..efde782ae3 100644
--- a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
@@ -54,6 +54,10 @@ Testing regular Proxy
value : false
}
}
+ [3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
result : [
]
@@ -114,6 +118,10 @@ Testing revocable Proxy
value : false
}
}
+ [3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
result : [
]
@@ -166,6 +174,10 @@ Testing revocable Proxy
value : true
}
}
+ [3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
result : [
]
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index d2b2c754a3..0386fdea6d 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -94,7 +94,10 @@ async function logGetPropertiesResult(objectId, flags = { ownProperties: true })
for (var i = 0; i < internalPropertyArray.length; i++) {
var p = internalPropertyArray[i];
var v = p.value;
- InspectorTest.log(" " + p.name + " " + v.type + " " + v.value);
+ if (p.name !== '[[StableObjectId]]')
+ InspectorTest.log(" " + p.name + " " + v.type + " " + v.value);
+ else
+ InspectorTest.log(" [[StableObjectId]]: <stableObjectId>");
}
}
diff --git a/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt b/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt
index d395067efe..1d09e8dc1e 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt
+++ b/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt
@@ -15,6 +15,12 @@ expression: new Map([[1,2],[3,4]])
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -65,6 +71,12 @@ expression: new Map()
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : false
@@ -97,6 +109,12 @@ expression: new Map([[1,2],[3,4]]).entries()
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -151,6 +169,12 @@ expression: it = new Map([[1,2],[3,4]]).entries(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -190,6 +214,12 @@ expression: it = new Map([[1,2],[3,4]]).keys(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -229,6 +259,12 @@ expression: it = new Map([[1,2],[3,4]]).values(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -265,6 +301,12 @@ expression: it = new Map([[1,2],[3,4]]).entries(); it.next(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : false
@@ -295,6 +337,12 @@ expression: new Set([1,2])
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -345,6 +393,12 @@ expression: new Set()
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : false
@@ -375,6 +429,12 @@ expression: new Set([1,2]).values()
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -428,6 +488,12 @@ expression: it = new Set([1,2]).values(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -467,6 +533,12 @@ expression: it = new Set([1,2]).keys(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -506,6 +578,12 @@ expression: it = new Set([1,2]).entries(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -542,6 +620,12 @@ expression: it = new Set([1,2]).values(); it.next(); it.next(); it
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : false
@@ -566,6 +650,12 @@ expression: new WeakMap()
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : false
@@ -594,6 +684,12 @@ expression: new WeakMap([[{ a: 2 }, 42]])
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -632,6 +728,12 @@ expression: new WeakSet()
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : false
@@ -659,6 +761,12 @@ expression: new WeakSet([{a:2}])
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/runtime/internal-properties-expected.txt b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
index a2e38ab013..c114696eb8 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
@@ -7,6 +7,10 @@ expression: (function* foo() { yield 1 })
result : {
internalProperties : [
[0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [1] : {
name : [[FunctionLocation]]
value : {
description : Object
@@ -19,14 +23,14 @@ expression: (function* foo() { yield 1 })
}
}
}
- [1] : {
+ [2] : {
name : [[IsGenerator]]
value : {
type : boolean
value : true
}
}
- [2] : {
+ [3] : {
name : [[Scopes]]
value : {
className : Array
@@ -47,6 +51,10 @@ expression: (function foo() {})
result : {
internalProperties : [
[0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [1] : {
name : [[FunctionLocation]]
value : {
description : Object
@@ -59,7 +67,7 @@ expression: (function foo() {})
}
}
}
- [1] : {
+ [2] : {
name : [[Scopes]]
value : {
className : Array
@@ -87,6 +95,10 @@ expression: new Number(239)
value : 239
}
}
+ [1] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
}
}
@@ -102,6 +114,10 @@ expression: new Boolean(false)
value : false
}
}
+ [1] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
}
}
@@ -117,6 +133,10 @@ expression: new String('abc')
value : abc
}
}
+ [1] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
}
}
@@ -133,6 +153,10 @@ expression: Object(Symbol(42))
type : symbol
}
}
+ [1] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
}
}
@@ -149,6 +173,10 @@ expression: Object(BigInt(2))
unserializableValue : 2n
}
}
+ [1] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
}
}
@@ -174,6 +202,10 @@ expression: Promise.resolve(42)
value : 42
}
}
+ [2] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
}
}
@@ -195,6 +227,10 @@ expression: new Promise(() => undefined)
type : undefined
}
}
+ [2] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
]
}
}
@@ -231,6 +267,10 @@ expression: gen1
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[GeneratorLocation]]
value : {
description : Object
@@ -243,7 +283,7 @@ expression: gen1
}
}
}
- [4] : {
+ [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -287,6 +327,10 @@ expression: gen1.next();gen1
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[GeneratorLocation]]
value : {
description : Object
@@ -299,7 +343,7 @@ expression: gen1.next();gen1
}
}
}
- [4] : {
+ [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -343,6 +387,10 @@ expression: gen1.next();gen1
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[GeneratorLocation]]
value : {
description : Object
@@ -391,6 +439,10 @@ expression: gen2
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[GeneratorLocation]]
value : {
description : Object
@@ -403,7 +455,7 @@ expression: gen2
}
}
}
- [4] : {
+ [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -447,6 +499,10 @@ expression: gen2.next();gen2
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[GeneratorLocation]]
value : {
description : Object
@@ -459,7 +515,7 @@ expression: gen2.next();gen2
}
}
}
- [4] : {
+ [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -503,6 +559,10 @@ expression: gen2.next();gen2
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[GeneratorLocation]]
value : {
description : Object
@@ -548,6 +608,10 @@ expression: (new Map([[1,2]])).entries()
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[Entries]]
value : {
className : Array
@@ -588,6 +652,10 @@ expression: (new Set([[1,2]])).entries()
}
}
[3] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ [4] : {
name : [[Entries]]
value : {
className : Array
diff --git a/deps/v8/test/inspector/runtime/internal-properties.js b/deps/v8/test/inspector/runtime/internal-properties.js
index 470dfb5049..b4b0bc47fb 100644
--- a/deps/v8/test/inspector/runtime/internal-properties.js
+++ b/deps/v8/test/inspector/runtime/internal-properties.js
@@ -1,8 +1,6 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(luoe): remove flag when it is on by default.
-// Flags: --harmony-bigint
let {session, contextGroup, Protocol} = InspectorTest.start('Checks internal properties in Runtime.getProperties output');
diff --git a/deps/v8/test/inspector/runtime/stable-object-id-expected.txt b/deps/v8/test/inspector/runtime/stable-object-id-expected.txt
new file mode 100644
index 0000000000..d4e3fab7ee
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/stable-object-id-expected.txt
@@ -0,0 +1,15 @@
+Checks that protocol returns the same RemoteObjectId for the same object
+
+Running test: testGlobal
+Compare global evaluated twice: true
+
+Running test: testObject
+Compare object evaluated twice: true
+
+Running test: testObjectInArray
+Compare first and second element: true
+
+Running test: testObjectOnPause
+Compare global and this: true
+Compare global and global on pause: true
+Compare a and a on pause: true
diff --git a/deps/v8/test/inspector/runtime/stable-object-id.js b/deps/v8/test/inspector/runtime/stable-object-id.js
new file mode 100644
index 0000000000..944bae0d3a
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/stable-object-id.js
@@ -0,0 +1,87 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks that protocol returns the same RemoteObjectId for the same object');
+
+InspectorTest.runAsyncTestSuite([
+ async function testGlobal() {
+ const {result:{result:{objectId:firstId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this'});
+ const firstStableId = await stableObjectId(firstId);
+ const {result:{result:{objectId:secondId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this'});
+ const secondStableId = await stableObjectId(secondId);
+ InspectorTest.log(
+ `Compare global evaluated twice: ${firstStableId === secondStableId}`);
+ },
+
+ async function testObject() {
+ const {result:{result:{objectId:firstId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this.a = {}, this.a'});
+ const firstStableId = await stableObjectId(firstId);
+ const {result:{result:{objectId:secondId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this.a'});
+ const secondStableId = await stableObjectId(secondId);
+ InspectorTest.log(
+ `Compare object evaluated twice: ${firstStableId === secondStableId}`);
+ },
+
+ async function testObjectInArray() {
+ await Protocol.Runtime.evaluate({expression: 'this.b = [this.a, this.a]'});
+ const {result:{result:{objectId:firstId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this.b[0]'});
+ const firstStableId = await stableObjectId(firstId);
+ const {result:{result:{objectId:secondId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this.b[1]'});
+ const secondStableId = await stableObjectId(secondId);
+ InspectorTest.log(
+ `Compare first and second element: ${firstStableId === secondStableId}`);
+ },
+
+ async function testObjectOnPause() {
+ const {result:{result:{objectId:globalId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this'});
+ const globalStableId = await stableObjectId(globalId);
+ const {result:{result:{objectId:aId}}} =
+ await Protocol.Runtime.evaluate({expression: 'this.a'});
+ const aStableId = await stableObjectId(aId);
+ await Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: 'debugger'});
+ const {params:{callFrames:[topFrame]}} =
+ await Protocol.Debugger.oncePaused();
+ const topFrameThisStableId = await stableObjectId(topFrame.this.objectId);
+ InspectorTest.log(
+ `Compare global and this: ${globalStableId === topFrameThisStableId}`);
+
+ const {result:{result:{objectId:globalIdOnPause}}} =
+ await Protocol.Debugger.evaluateOnCallFrame({
+ callFrameId: topFrame.callFrameId,
+ expression: 'this'
+ });
+ const globalStableIdOnPause = await stableObjectId(globalIdOnPause);
+ InspectorTest.log(
+ `Compare global and global on pause: ${
+ globalStableId === globalStableIdOnPause}`);
+
+ const {result:{result: props}} = await Protocol.Runtime.getProperties({
+ objectId: topFrame.scopeChain[0].object.objectId
+ });
+ const {value:{objectId: aIdOnPause}} = props.find(prop => prop.name === 'a');
+ const aStableIdOnPause = await stableObjectId(aIdOnPause);
+ InspectorTest.log(`Compare a and a on pause: ${
+ aStableId === aStableIdOnPause}`);
+ }
+]);
+
+async function stableObjectId(objectId) {
+ const {result:{
+ internalProperties: props
+ }} = await Protocol.Runtime.getProperties({
+ objectId,
+ ownProperties: true,
+ generatePreview: false
+ });
+ return props.find(prop => prop.name === '[[StableObjectId]]').value.value;
+}
diff --git a/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt b/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt
index a8d0ec0c20..7c6e69e05d 100644
--- a/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt
+++ b/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt
@@ -5,6 +5,12 @@ Retrieving properties in 2
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -38,6 +44,12 @@ Retrieving properties in 1
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
@@ -72,6 +84,12 @@ Retrieving properties in 1
{
id : <messageId>
result : {
+ internalProperties : [
+ [0] : {
+ name : [[StableObjectId]]
+ value : <StablectObjectId>
+ }
+ ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
index eea9f19e7c..8df1f394a5 100644
--- a/deps/v8/test/inspector/task-runner.h
+++ b/deps/v8/test/inspector/task-runner.h
@@ -20,7 +20,7 @@ class TaskRunner : public v8::base::Thread {
public:
class Task {
public:
- virtual ~Task() {}
+ virtual ~Task() = default;
virtual bool is_priority_task() = 0;
virtual void Run(IsolateData* data) = 0;
};
@@ -28,7 +28,7 @@ class TaskRunner : public v8::base::Thread {
TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
bool catch_exceptions, v8::base::Semaphore* ready_semaphore,
v8::StartupData* startup_data, bool with_inspector);
- virtual ~TaskRunner();
+ ~TaskRunner() override;
IsolateData* data() const { return data_.get(); }
// Thread implementation.
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index 66db34b562..50b7795dfc 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -58,6 +58,13 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'inspector-test'
+ def _get_resources(self):
+ return [
+ os.path.join('src', 'inspector', 'injected-script-source.js'),
+ os.path.join(
+ 'test', 'inspector', 'debugger', 'resources', 'break-locations.js'),
+ ]
+
@property
def output_proc(self):
return outproc.ExpectedOutProc(
diff --git a/deps/v8/test/intl/break-iterator/options.js b/deps/v8/test/intl/break-iterator/options.js
new file mode 100644
index 0000000000..e2feae22a7
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/options.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => new Intl.v8BreakIterator('en', null));
+assertDoesNotThrow(() => new Intl.v8BreakIterator('en', undefined));
+
+for (let key of [false, true, "foo", Symbol, 1]) {
+ assertDoesNotThrow(() => new Intl.v8BreakIterator('en', key));
+}
+
+assertDoesNotThrow(() => new Intl.v8BreakIterator('en', {}));
+assertDoesNotThrow(() => new Intl.v8BreakIterator('en', new Proxy({}, {})));
diff --git a/deps/v8/test/intl/break-iterator/subclass.js b/deps/v8/test/intl/break-iterator/subclass.js
new file mode 100644
index 0000000000..b5ffe61a48
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/subclass.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+var locales = ["tlh", "id", "en"];
+var input = "foo and bar";
+var refBreakIterator = new Intl.v8BreakIterator(locales);
+refBreakIterator.adoptText(input);
+
+class MyBreakIterator extends Intl.v8BreakIterator {
+ constructor(locales, options) {
+ super(locales, options);
+ }
+}
+
+var myBreakIterator = new MyBreakIterator(locales);
+myBreakIterator.adoptText(input);
+
+let expectedPos = refBreakIterator.first();
+let actualPos = myBreakIterator.first();
+assertEquals(expectedPos, actualPos);
+
+while (expectedPos != -1) {
+ expectedPos = refBreakIterator.next();
+ actualPos = myBreakIterator.next();
+ assertEquals(expectedPos, actualPos);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-splice-large-index.js b/deps/v8/test/intl/break-iterator/supported-locales-is-method.js
index 1f4eb9ce59..c5d00f54fa 100644
--- a/deps/v8/test/mjsunit/regress/regress-splice-large-index.js
+++ b/deps/v8/test/intl/break-iterator/supported-locales-is-method.js
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2018 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,17 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-var a = [];
-a[0xfffffffe] = 10;
-assertThrows("a.unshift(1);", RangeError);
-assertEquals(0xffffffff, a.length);
-assertEquals(10, a[0xffffffff]);
-assertEquals(0xffffffff, a.length);
-assertEquals(undefined, a[0xfffffffe]);
+// Test that supportedLocalesOf is not a constructor.
-a = [1,2,3];
-a[0xfffffffe] = 10;
-assertThrows("a.splice(1,1,7,7,7,7,7);", RangeError);
-assertEquals([1,7,7,7,7,7,3], a.slice(0, 7));
-assertEquals(0xffffffff, a.length);
-assertEquals(10, a[0xfffffffe + 5 - 1]);
+var iterator = new Intl.v8BreakIterator();
+
+assertThrows(() => new Intl.v8BreakIterator.supportedLocalesOf(), TypeError);
diff --git a/deps/v8/test/intl/collator/de-sort.js b/deps/v8/test/intl/collator/de-sort.js
index 278b9492d3..005620c3be 100644
--- a/deps/v8/test/intl/collator/de-sort.js
+++ b/deps/v8/test/intl/collator/de-sort.js
@@ -42,3 +42,17 @@ assertEquals('flüße', result[5]);
assertEquals('FUSSE', result[6]);
assertEquals('Fuße', result[7]);
assertEquals('März', result[8]);
+
+result = ["AE", "Ä"].sort(new Intl.Collator("de", {usage: "sort"}).compare)
+assertEquals("Ä", result[0]);
+assertEquals("AE", result[1]);
+result = ["AE", "Ä"].sort(new Intl.Collator("de", {usage: "search"}).compare)
+assertEquals("AE", result[0]);
+assertEquals("Ä", result[1]);
+
+
+var collator = new Intl.Collator("de", {usage: "search"});
+collator.resolvedOptions() // This triggers the code that removes the u-co-search keyword
+result = ["AE", "Ä"].sort(collator.compare)
+assertEquals("AE", result[0]);
+assertEquals("Ä", result[1]);
diff --git a/deps/v8/test/intl/collator/options.js b/deps/v8/test/intl/collator/options.js
new file mode 100644
index 0000000000..f03ff2cafc
--- /dev/null
+++ b/deps/v8/test/intl/collator/options.js
@@ -0,0 +1,121 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// No locale
+var collatorWithOptions = new Intl.Collator(undefined);
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag(%GetDefaultICULocale(), locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator(undefined, {usage: 'sort'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag(%GetDefaultICULocale(), locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator(undefined, {usage: 'search'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertEquals('search', usage);
+assertEquals('default', collation);
+assertLanguageTag(%GetDefaultICULocale(), locale);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator(locale);
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag(%GetDefaultICULocale(), locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+// With Locale
+collatorWithOptions = new Intl.Collator('en-US');
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US', {usage: 'sort'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US', {usage: 'search'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertEquals('search', usage);
+assertEquals('default', collation);
+assertLanguageTag('en-US', locale);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+// With invalid collation value = 'search'
+collatorWithOptions = new Intl.Collator('en-US-u-co-search');
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US-u-co-search', {usage: 'sort'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US-u-co-search', {usage: 'search'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('search', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+// With invalid collation value = 'standard'
+collatorWithOptions = new Intl.Collator('en-US-u-co-standard');
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US-u-co-standard', {usage: 'sort'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US-u-co-standard', {usage: 'search'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('search', usage);
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+// With valid collation value = 'emoji'
+collatorWithOptions = new Intl.Collator('en-US-u-co-emoji');
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('emoji', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US-u-co-emoji', {usage: 'sort'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('sort', usage);
+assertEquals('emoji', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
+
+collatorWithOptions = new Intl.Collator('en-US-u-co-emoji', {usage: 'search'});
+var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
+assertLanguageTag('en-US', locale);
+assertEquals('search', usage);
+// usage = search overwrites emoji as a collation value.
+assertEquals('default', collation);
+assertEquals(locale.indexOf('-co-search'), -1);
diff --git a/deps/v8/test/intl/date-format/constructor-order.js b/deps/v8/test/intl/date-format/constructor-order.js
new file mode 100644
index 0000000000..54f3796629
--- /dev/null
+++ b/deps/v8/test/intl/date-format/constructor-order.js
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+let weekday = new Array();
+let year = new Array();
+let month = new Array();
+let day = new Array();
+let hour = new Array();
+let minute = new Array();
+let second = new Array();
+let localeMatcher = new Array();
+let hour12 = new Array();
+let hourCycle = new Array();
+let timeZone = new Array();
+let era = new Array();
+let timeZoneName = new Array();
+let formatMatcher = new Array();
+
+new Intl.DateTimeFormat(['en-US'], {
+ get weekday() {
+ weekday.push(++getCount);
+ },
+ get year() {
+ year.push(++getCount);
+ },
+ get month() {
+ month.push(++getCount);
+ },
+ get day() {
+ day.push(++getCount);
+ },
+ get hour() {
+ hour.push(++getCount);
+ },
+ get minute() {
+ minute.push(++getCount);
+ },
+ get second() {
+ second.push(++getCount);
+ },
+ get localeMatcher() {
+ localeMatcher.push(++getCount);
+ },
+ get hour12() {
+ hour12.push(++getCount);
+ },
+ get hourCycle() {
+ hourCycle.push(++getCount);
+ },
+ get timeZone() {
+ timeZone.push(++getCount);
+ },
+ get era() {
+ era.push(++getCount);
+ },
+ get timeZoneName() {
+ timeZoneName.push(++getCount);
+ },
+ get formatMatcher() {
+ formatMatcher.push(++getCount);
+ }
+});
+
+assertEquals(2, weekday.length);
+assertEquals(1, weekday[0]);
+assertEquals(1, year.length);
+assertEquals(2, year[0]);
+assertEquals(1, month.length);
+assertEquals(3, month[0]);
+assertEquals(1, day.length);
+assertEquals(4, day[0]);
+assertEquals(2, hour.length);
+assertEquals(5, hour[0]);
+assertEquals(2, minute.length);
+assertEquals(6, minute[0]);
+assertEquals(2, second.length);
+assertEquals(7, second[0]);
+assertEquals(1, localeMatcher.length);
+assertEquals(8, localeMatcher[0]);
+assertEquals(1, hour12.length);
+assertEquals(9, hour12[0]);
+assertEquals(1, hourCycle.length);
+assertEquals(10, hourCycle[0]);
+assertEquals(1, timeZone.length);
+assertEquals(11, timeZone[0]);
+assertEquals(12, weekday[1]);
+assertEquals(1, era.length);
+assertEquals(13, era[0]);
+assertEquals(14, hour[1]);
+assertEquals(15, minute[1]);
+assertEquals(16, second[1]);
+assertEquals(1, timeZoneName.length);
+assertEquals(17, timeZoneName[0]);
+assertEquals(1, formatMatcher.length);
+assertEquals(18, formatMatcher[0]);
diff --git a/deps/v8/test/intl/date-format/date-format-to-parts.js b/deps/v8/test/intl/date-format/date-format-to-parts.js
index fd04dc5bd0..9b2e41bc96 100644
--- a/deps/v8/test/intl/date-format/date-format-to-parts.js
+++ b/deps/v8/test/intl/date-format/date-format-to-parts.js
@@ -5,14 +5,16 @@
var d = new Date(2016, 11, 15, 14, 10, 34);
var df = Intl.DateTimeFormat("ja",
{hour: 'numeric', minute: 'numeric', second: 'numeric', year: 'numeric',
- month: 'numeric', day: 'numeric', timeZoneName: 'short', era: 'short'});
+ month: 'numeric', day: 'numeric', timeZoneName: 'short', era: 'short',
+ hour12: true});
var formattedParts = df.formatToParts(d);
var formattedReconstructedFromParts = formattedParts.map((part) => part.value)
.reduce((accumulated, part) => accumulated + part);
assertEquals(df.format(d), formattedReconstructedFromParts);
-// 西暦2016年11月15日 14:10:34 GMT-7
+// 西暦2016年11月15日 午後02:10:34 GMT-7
assertEquals(["era", "year", "literal", "month", "literal", "day", "literal",
- "hour", "literal", "minute", "literal", "second", "literal",
- "timeZoneName"], formattedParts.map((part) => part.type));
+ "dayPeriod", "hour", "literal", "minute", "literal", "second",
+ "literal", "timeZoneName"
+ ], formattedParts.map((part) => part.type));
diff --git a/deps/v8/test/intl/date-format/format-is-bound.js b/deps/v8/test/intl/date-format/format-is-bound.js
index b744b65b91..fdec1eab01 100644
--- a/deps/v8/test/intl/date-format/format-is-bound.js
+++ b/deps/v8/test/intl/date-format/format-is-bound.js
@@ -37,3 +37,10 @@ dateArray.forEach(dtf.format);
// Formatting a date should work in a direct call.
dtf.format();
+
+// format should be bound properly even if created from a non-instance
+var legacy = Intl.DateTimeFormat.call(
+ Object.create(Intl.DateTimeFormat));
+var boundFormat = legacy.format;
+assertEquals(dtf.format(12345), legacy.format(12345));
+assertEquals(dtf.format(54321), boundFormat(54321));
diff --git a/deps/v8/test/intl/date-format/resolved-options-unwrap.js b/deps/v8/test/intl/date-format/resolved-options-unwrap.js
new file mode 100644
index 0000000000..2aa4064802
--- /dev/null
+++ b/deps/v8/test/intl/date-format/resolved-options-unwrap.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test the Intl.DateTimeFormat.prototype.resolvedOptions will properly handle
+// 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
+var x = Object.create(Intl.DateTimeFormat.prototype);
+x = Intl.DateTimeFormat.call(x, 'en');
+
+var resolvedOptions = Intl.DateTimeFormat.prototype.resolvedOptions.call(x);
+assertEquals(resolvedOptions.locale, 'en')
diff --git a/deps/v8/test/intl/date-format/timezone-conversion.js b/deps/v8/test/intl/date-format/timezone-conversion.js
new file mode 100644
index 0000000000..1638346dee
--- /dev/null
+++ b/deps/v8/test/intl/date-format/timezone-conversion.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Tests time zone support with conversion.
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'America/Los_Angeles'});
+assertEquals('America/Los_Angeles', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: {toString() { return 'America/Los_Angeles'}}});
+assertEquals('America/Los_Angeles', df.resolvedOptions().timeZone);
+
+assertThrows(() => Intl.DateTimeFormat(
+ undefined, {timeZone: {toString() { throw new Error("should throw"); }}}));
+
+assertThrows(() => Intl.DateTimeFormat(
+ undefined, {get timeZone() { throw new Error("should throw"); }}));
diff --git a/deps/v8/test/intl/general/getCanonicalLocales.js b/deps/v8/test/intl/general/getCanonicalLocales.js
index 0df6846ce6..65c7fc6e3a 100644
--- a/deps/v8/test/intl/general/getCanonicalLocales.js
+++ b/deps/v8/test/intl/general/getCanonicalLocales.js
@@ -7,7 +7,10 @@ assertDoesNotThrow(() => Intl.getCanonicalLocales("foobar-foobar"));
// Ignore duplicate subtags in different namespaces; eg, 'a' vs 'u'.
assertDoesNotThrow(() => Intl.getCanonicalLocales("en-a-ca-Chinese-u-ca-Chinese"));
+// Ignore duplicate subtags in U-extension as well. Only the first count.
+// See RFC 6067 for details.
+assertDoesNotThrow(() => Intl.getCanonicalLocales("en-u-ca-gregory-ca-chinese"));
+assertEquals("en-u-ca-gregory", Intl.getCanonicalLocales("en-u-ca-gregory-ca-chinese")[0]);
// Check duplicate subtags (after the first tag) are detected.
assertThrows(() => Intl.getCanonicalLocales("en-foobar-foobar"), RangeError);
-assertThrows(() => Intl.getCanonicalLocales("en-u-ca-gregory-ca-chinese"), RangeError);
diff --git a/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js b/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
index b4d529652f..808e50d208 100644
--- a/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
+++ b/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
@@ -23,5 +23,5 @@
].forEach(([inputLocale, expectedLocale]) => {
const canonicalLocales = Intl.getCanonicalLocales(inputLocale);
assertEquals(canonicalLocales.length, 1);
- assertEquals(canonicalLocales[0], expectedLocale);
+ assertEquals(expectedLocale, canonicalLocales[0]);
})
diff --git a/deps/v8/test/intl/general/language_tags_with_preferred_values.js b/deps/v8/test/intl/general/language_tags_with_preferred_values.js
index 073a6c9aff..4f2fbbfb2e 100644
--- a/deps/v8/test/intl/general/language_tags_with_preferred_values.js
+++ b/deps/v8/test/intl/general/language_tags_with_preferred_values.js
@@ -29,6 +29,6 @@
["aam-u-ca-gregory", "aas-u-ca-gregory"],
].forEach(([inputLocale, expectedLocale]) => {
const canonicalLocales = Intl.getCanonicalLocales(inputLocale);
- assertEquals(canonicalLocales.length, 1);
- assertEquals(canonicalLocales[0], expectedLocale);
+ assertEquals(1, canonicalLocales.length);
+ assertEquals(expectedLocale, canonicalLocales[0]);
})
diff --git a/deps/v8/test/intl/general/supported-locales-of.js b/deps/v8/test/intl/general/supported-locales-of.js
index e16bb6702f..556e525828 100644
--- a/deps/v8/test/intl/general/supported-locales-of.js
+++ b/deps/v8/test/intl/general/supported-locales-of.js
@@ -27,55 +27,67 @@
// Tests supportedLocalesOf method.
-var undef = Intl.DateTimeFormat.supportedLocalesOf();
-assertEquals([], undef);
+var services = [
+ Intl.DateTimeFormat,
+ Intl.Collator,
+ Intl.NumberFormat,
+ Intl.PluralRules
+];
-var empty = Intl.DateTimeFormat.supportedLocalesOf([]);
-assertEquals([], empty);
+for (const service of services) {
+ let undef = service.supportedLocalesOf();
+ assertEquals([], undef);
-var strLocale = Intl.DateTimeFormat.supportedLocalesOf('sr');
-assertEquals('sr', strLocale[0]);
+ let empty = service.supportedLocalesOf([]);
+ assertEquals([], empty);
-var multiLocale =
- Intl.DateTimeFormat.supportedLocalesOf(['sr-Thai-RS', 'de', 'zh-CN']);
-assertEquals('sr-Thai-RS', multiLocale[0]);
-assertEquals('de', multiLocale[1]);
-assertEquals('zh-CN', multiLocale[2]);
+ let strLocale = service.supportedLocalesOf("sr");
+ assertEquals("sr", strLocale[0]);
-collatorUndef = Intl.Collator.supportedLocalesOf();
-assertEquals([], collatorUndef);
+ var locales = ["sr-Thai-RS", "de", "zh-CN"];
+ let multiLocale = service.supportedLocalesOf(locales);
+ assertEquals("sr-Thai-RS", multiLocale[0]);
+ assertEquals("de", multiLocale[1]);
+ assertEquals("zh-CN", multiLocale[2]);
-collatorEmpty = Intl.Collator.supportedLocalesOf([]);
-assertEquals([], collatorEmpty);
+ let numLocale = service.supportedLocalesOf(1);
+ assertEquals([], numLocale);
+ assertThrows(function() {
+ numLocale = Intl.Collator.supportedLocalesOf([1]);
+ }, TypeError);
-collatorStrLocale = Intl.Collator.supportedLocalesOf('sr');
-assertEquals('sr', collatorStrLocale[0]);
+ extensionLocale = service.supportedLocalesOf("id-u-co-pinyin");
+ assertEquals("id-u-co-pinyin", extensionLocale[0]);
-collatorMultiLocale =
- Intl.Collator.supportedLocalesOf(['sr-Thai-RS', 'de', 'zh-CN']);
-assertEquals('sr-Thai-RS', collatorMultiLocale[0]);
-assertEquals('de', collatorMultiLocale[1]);
-assertEquals('zh-CN', collatorMultiLocale[2]);
+ bestFitLocale = service.supportedLocalesOf("de", {
+ localeMatcher: "best fit"
+ });
+ assertEquals("de", bestFitLocale[0]);
-numLocale = Intl.Collator.supportedLocalesOf(1);
-assertEquals([], numLocale);
+ // Need a better test for "lookup" once it differs from "best fit".
+ lookupLocale = service.supportedLocalesOf("zh-CN", {
+ localeMatcher: "lookup"
+ });
+ assertEquals("zh-CN", lookupLocale[0]);
-assertThrows(function() {
- numLocale = Intl.Collator.supportedLocalesOf([1]);
-}, TypeError);
+ assertThrows(function() {
+ service.supportedLocalesOf("id-u-co-pinyin", { localeMatcher: "xyz" });
+ }, RangeError);
-extensionLocale = Intl.Collator.supportedLocalesOf('id-u-co-pinyin');
-assertEquals('id-u-co-pinyin', extensionLocale[0]);
+ privateuseLocale = service.supportedLocalesOf("en-US-x-twain");
+ assertEquals("en-US-x-twain", privateuseLocale[0]);
-bestFitLocale =
- Intl.Collator.supportedLocalesOf('de', {localeMatcher: 'best fit'});
-assertEquals('de', bestFitLocale[0]);
+ privateuseLocale2 = service.supportedLocalesOf("x-twain");
+ assertEquals(undefined, privateuseLocale2[0]);
-// Need a better test for "lookup" once it differs from "best fit".
-lookupLocale =
- Intl.Collator.supportedLocalesOf('zh-CN', {localeMatcher: 'lookup'});
-assertEquals('zh-CN', lookupLocale[0]);
+ grandfatheredLocale = service.supportedLocalesOf("art-lojban");
+ assertEquals(undefined, grandfatheredLocale[0]);
-assertThrows(function() {
- Intl.Collator.supportedLocalesOf('id-u-co-pinyin', {localeMatcher: 'xyz'});
-}, RangeError);
+ grandfatheredLocale2 = service.supportedLocalesOf("i-pwn");
+ assertEquals(undefined, grandfatheredLocale2[0]);
+
+ unicodeInPrivateuseLocale = service.supportedLocalesOf(
+ "en-US-x-u-co-phonebk"
+ );
+ assertEquals("en-US-x-u-co-phonebk", unicodeInPrivateuseLocale[0]);
+}
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index b0e4340ece..42807597a0 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -29,6 +29,13 @@
[ALWAYS, {
# TODO(jochen): The following test is flaky.
'overrides/caching': [PASS, FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=6891
+ 'segmenter/segment': [FAIL],
+ 'segmenter/segment-iterator': [FAIL],
+ 'segmenter/segment-iterator-following': [FAIL],
+ 'segmenter/segment-iterator-next': [FAIL],
+ 'segmenter/segment-iterator-preceding': [FAIL],
}], # ALWAYS
['variant == no_wasm_traps', {
@@ -41,4 +48,12 @@
'relative-time-format/default-locale-pt-BR': [SKIP],
}], # system == windows'
+['system == android', {
+ # Android's ICU data file does not have the Chinese/Japanese dictionary
+ # required for the test to pass.
+ 'break-iterator/zh-break': [FAIL],
+ # Unable to change locale on Android:
+ 'relative-time-format/default-locale-fr-CA': [FAIL],
+ 'relative-time-format/default-locale-pt-BR': [FAIL],
+}], # 'system == android'
]
diff --git a/deps/v8/test/intl/list-format/supported-locale.js b/deps/v8/test/intl/list-format/supported-locale.js
new file mode 100644
index 0000000000..1eac25d618
--- /dev/null
+++ b/deps/v8/test/intl/list-format/supported-locale.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+assertEquals(typeof Intl.ListFormat.supportedLocalesOf, "function",
+ "Intl.ListFormat.supportedLocalesOf should be a function");
+
+var undef = Intl.ListFormat.supportedLocalesOf();
+assertEquals([], undef);
+
+var empty = Intl.ListFormat.supportedLocalesOf([]);
+assertEquals([], empty);
+
+var strLocale = Intl.ListFormat.supportedLocalesOf('sr');
+assertEquals('sr', strLocale[0]);
+
+var multiLocale = ['sr-Thai-RS', 'de', 'zh-CN'];
+assertEquals(multiLocale, Intl.ListFormat.supportedLocalesOf(multiLocale));
diff --git a/deps/v8/test/intl/locale/locale-properties.js b/deps/v8/test/intl/locale/locale-properties.js
index 9432cb8a77..9800e8d6cf 100644
--- a/deps/v8/test/intl/locale/locale-properties.js
+++ b/deps/v8/test/intl/locale/locale-properties.js
@@ -24,7 +24,7 @@ assertEquals('buddhist', locale.calendar);
assertEquals('phonebk', locale.collation);
assertEquals('h23', locale.hourCycle);
assertEquals('upper', locale.caseFirst);
-assertEquals('true', locale.numeric);
+assertEquals(true, locale.numeric);
assertEquals('roman', locale.numberingSystem);
// Not defined, expected to undefined.
assertEquals(undefined, locale.currency);
diff --git a/deps/v8/test/intl/number-format/format-is-bound.js b/deps/v8/test/intl/number-format/format-is-bound.js
index edb6a4b817..defb8982e2 100644
--- a/deps/v8/test/intl/number-format/format-is-bound.js
+++ b/deps/v8/test/intl/number-format/format-is-bound.js
@@ -42,3 +42,9 @@ nf.format(12345);
// Reading the format doesn't add any additional property keys
assertEquals(beforeCount, Object.getOwnPropertyNames(nf).length);
+
+// format should be bound properly even if created from a non-instance
+var legacy = Intl.NumberFormat.call(Object.create(Intl.NumberFormat));
+var boundFormat = legacy.format;
+assertEquals(nf.format(12345), legacy.format(12345));
+assertEquals(nf.format(54321), boundFormat(54321));
diff --git a/deps/v8/test/intl/number-format/resolved-options-unwrap.js b/deps/v8/test/intl/number-format/resolved-options-unwrap.js
new file mode 100644
index 0000000000..70b40bbea4
--- /dev/null
+++ b/deps/v8/test/intl/number-format/resolved-options-unwrap.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let nf = Object.create(Intl.NumberFormat.prototype);
+nf = Intl.NumberFormat.call(nf);
+const actual = Intl.NumberFormat.prototype.resolvedOptions.call(nf);
+
+const expected = new Intl.NumberFormat().resolvedOptions();
+Object.keys(expected).forEach(key => assertEquals(expected[key], actual[key]));
+assertEquals(Object.keys(expected).length, Object.keys(actual).length);
diff --git a/deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js b/deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js
new file mode 100644
index 0000000000..518fe52bde
--- /dev/null
+++ b/deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Make sure passing 1 or false to patched construtor won't cause crash
+
+Object.defineProperty(Intl.NumberFormat, Symbol.hasInstance, { value: _ => true });
+assertThrows(() =>
+ Intl.NumberFormat.call(1), TypeError);
+
+assertThrows(() =>
+ Intl.NumberFormat.call(false), TypeError);
diff --git a/deps/v8/test/intl/plural-rules/check-to-number.js b/deps/v8/test/intl/plural-rules/check-to-number.js
new file mode 100644
index 0000000000..7d5396e634
--- /dev/null
+++ b/deps/v8/test/intl/plural-rules/check-to-number.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const pr = new Intl.PluralRules();
+const inputs = [undefined, null, true, false, 1, '', 'test', {}, { a: 1 }];
+
+inputs.forEach(input => {
+ const number = Number(input);
+ const expected = pr.select(number);
+ const actual = pr.select(input);
+ assertEquals(actual, expected);
+});
+
+let count = 0;
+const dummyObject = {};
+dummyObject[Symbol.toPrimitive] = () => ++count;
+assertEquals(pr.select(dummyObject), pr.select(count));
+assertEquals(count, 1);
+
+assertEquals(pr.select(0), pr.select(-0))
diff --git a/deps/v8/test/intl/regress-7982.js b/deps/v8/test/intl/regress-7982.js
new file mode 100644
index 0000000000..bd251c5939
--- /dev/null
+++ b/deps/v8/test/intl/regress-7982.js
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-locale
+
+// Make sure that maximize and minimize of locales work reasonbly.
+
+assertEquals("zh-Hans-CN-u-ca-chinese", new Intl.Locale("zh-u-ca-Chinese").maximize().toString());
+assertEquals("zh-u-ca-chinese", new Intl.Locale("zh-u-ca-Chinese").minimize().toString());
+assertEquals("th-Thai-TH-u-nu-thai", new Intl.Locale("th-Thai-TH-u-nu-Thai").maximize().toString());
+assertEquals("th-u-nu-thai", new Intl.Locale("th-Thai-TH-u-nu-Thai").minimize().toString());
+assertEquals("th-Thai-TH-u-nu-thai", new Intl.Locale("th-u-nu-Thai").maximize().toString());
+assertEquals("th-u-nu-thai", new Intl.Locale("th-u-nu-Thai").minimize().toString());
+assertEquals("zh-Hans-CN-u-ca-chinese", new Intl.Locale("zh-CN-u-ca-chinese").maximize().toString());
+assertEquals("zh-u-ca-chinese", new Intl.Locale("zh-CN-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hant-TW-u-ca-chinese", new Intl.Locale("zh-TW-u-ca-chinese").maximize().toString());
+assertEquals("zh-TW-u-ca-chinese", new Intl.Locale("zh-TW-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hant-HK-u-ca-chinese", new Intl.Locale("zh-HK-u-ca-chinese").maximize().toString());
+assertEquals("zh-HK-u-ca-chinese", new Intl.Locale("zh-HK-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hant-TW-u-ca-chinese", new Intl.Locale("zh-Hant-u-ca-chinese").maximize().toString());
+assertEquals("zh-Hant-u-ca-chinese", new Intl.Locale("zh-Hant-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hans-CN-u-ca-chinese", new Intl.Locale("zh-Hans-u-ca-chinese").maximize().toString());
+assertEquals("zh-u-ca-chinese", new Intl.Locale("zh-Hans-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hant-CN-u-ca-chinese", new Intl.Locale("zh-Hant-CN-u-ca-chinese").maximize().toString());
+assertEquals("zh-Hant-CN-u-ca-chinese", new Intl.Locale("zh-Hant-CN-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hans-CN-u-ca-chinese", new Intl.Locale("zh-Hans-CN-u-ca-chinese").maximize().toString());
+assertEquals("zh-u-ca-chinese", new Intl.Locale("zh-Hans-CN-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hant-TW-u-ca-chinese", new Intl.Locale("zh-Hant-TW-u-ca-chinese").maximize().toString());
+assertEquals("zh-TW-u-ca-chinese", new Intl.Locale("zh-Hant-TW-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hans-TW-u-ca-chinese", new Intl.Locale("zh-Hans-TW-u-ca-chinese").maximize().toString());
+assertEquals("zh-Hans-TW-u-ca-chinese", new Intl.Locale("zh-Hans-TW-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hant-HK-u-ca-chinese", new Intl.Locale("zh-Hant-HK-u-ca-chinese").maximize().toString());
+assertEquals("zh-HK-u-ca-chinese", new Intl.Locale("zh-Hant-HK-u-ca-chinese").minimize().toString());
+assertEquals("zh-Hans-HK-u-ca-chinese", new Intl.Locale("zh-Hans-HK-u-ca-chinese").maximize().toString());
+assertEquals("zh-Hans-HK-u-ca-chinese", new Intl.Locale("zh-Hans-HK-u-ca-chinese").minimize().toString());
diff --git a/deps/v8/test/intl/regress-888299.js b/deps/v8/test/intl/regress-888299.js
new file mode 100644
index 0000000000..abe9d1da34
--- /dev/null
+++ b/deps/v8/test/intl/regress-888299.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var i = 0;
+new Intl.DateTimeFormat(
+ undefined, { get hour() { if (i == 0) { i = 1; return 'numeric'} } });
diff --git a/deps/v8/test/intl/relative-time-format/format-en.js b/deps/v8/test/intl/relative-time-format/format-en.js
index cd58d65355..2af755dcbf 100644
--- a/deps/v8/test/intl/relative-time-format/format-en.js
+++ b/deps/v8/test/intl/relative-time-format/format-en.js
@@ -67,9 +67,6 @@ assertEquals('next month', longAuto.format(1, 'month'));
assertEquals('in 2 months', longAuto.format(2, 'month'));
assertEquals('in 345 months', longAuto.format(345, 'month'));
-// "quarter" is not working in ICU now
-// Tracked by ICU bug in http://bugs.icu-project.org/trac/ticket/12171
-/*
assertEquals('3 quarters ago', longAuto.format(-3, 'quarter'));
assertEquals('2 quarters ago', longAuto.format(-2, 'quarter'));
assertEquals('last quarter', longAuto.format(-1, 'quarter'));
@@ -78,7 +75,6 @@ assertEquals('this quarter', longAuto.format(-0, 'quarter'));
assertEquals('next quarter', longAuto.format(1, 'quarter'));
assertEquals('in 2 quarters', longAuto.format(2, 'quarter'));
assertEquals('in 345 quarters', longAuto.format(345, 'quarter'));
-*/
assertEquals('3 years ago', longAuto.format(-3, 'year'));
assertEquals('2 years ago', longAuto.format(-2, 'year'));
@@ -146,8 +142,6 @@ assertEquals('next mo.', shortAuto.format(1, 'month'));
assertEquals('in 2 mo.', shortAuto.format(2, 'month'));
assertEquals('in 345 mo.', shortAuto.format(345, 'month'));
-// "quarter" is not working in ICU now
-/*
assertEquals('3 qtrs. ago', shortAuto.format(-3, 'quarter'));
assertEquals('2 qtrs. ago', shortAuto.format(-2, 'quarter'));
assertEquals('last qtr.', shortAuto.format(-1, 'quarter'));
@@ -156,7 +150,6 @@ assertEquals('this qtr.', shortAuto.format(-0, 'quarter'));
assertEquals('next qtr.', shortAuto.format(1, 'quarter'));
assertEquals('in 2 qtrs.', shortAuto.format(2, 'quarter'));
assertEquals('in 345 qtrs.', shortAuto.format(345, 'quarter'));
-*/
assertEquals('3 yr. ago', shortAuto.format(-3, 'year'));
assertEquals('2 yr. ago', shortAuto.format(-2, 'year'));
@@ -225,8 +218,6 @@ assertEquals('next mo.', narrowAuto.format(1, 'month'));
assertEquals('in 2 mo.', narrowAuto.format(2, 'month'));
assertEquals('in 345 mo.', narrowAuto.format(345, 'month'));
-// "quarter" is not working in ICU now
-/*
assertEquals('3 qtrs. ago', narrowAuto.format(-3, 'quarter'));
assertEquals('2 qtrs. ago', narrowAuto.format(-2, 'quarter'));
assertEquals('last qtr.', narrowAuto.format(-1, 'quarter'));
@@ -235,7 +226,6 @@ assertEquals('this qtr.', narrowAuto.format(-0, 'quarter'));
assertEquals('next qtr.', narrowAuto.format(1, 'quarter'));
assertEquals('in 2 qtrs.', narrowAuto.format(2, 'quarter'));
assertEquals('in 345 qtrs.', narrowAuto.format(345, 'quarter'));
-*/
assertEquals('3 yr. ago', narrowAuto.format(-3, 'year'));
assertEquals('2 yr. ago', narrowAuto.format(-2, 'year'));
@@ -303,8 +293,6 @@ assertEquals('in 1 month', longAlways.format(1, 'month'));
assertEquals('in 2 months', longAlways.format(2, 'month'));
assertEquals('in 345 months', longAlways.format(345, 'month'));
-// "quarter" is not working in ICU now
-/*
assertEquals('3 quarters ago', longAlways.format(-3, 'quarter'));
assertEquals('2 quarters ago', longAlways.format(-2, 'quarter'));
assertEquals('1 quarter ago', longAlways.format(-1, 'quarter'));
@@ -313,7 +301,6 @@ assertEquals('0 quarters ago', longAlways.format(-0, 'quarter'));
assertEquals('in 1 quarter', longAlways.format(1, 'quarter'));
assertEquals('in 2 quarters', longAlways.format(2, 'quarter'));
assertEquals('in 345 quarters', longAlways.format(345, 'quarter'));
-*/
assertEquals('3 years ago', longAlways.format(-3, 'year'));
assertEquals('2 years ago', longAlways.format(-2, 'year'));
@@ -381,17 +368,14 @@ assertEquals('in 1 mo.', shortAlways.format(1, 'month'));
assertEquals('in 2 mo.', shortAlways.format(2, 'month'));
assertEquals('in 345 mo.', shortAlways.format(345, 'month'));
-// "quarter" is not working in ICU now
-/*
assertEquals('3 qtrs. ago', shortAlways.format(-3, 'quarter'));
assertEquals('2 qtrs. ago', shortAlways.format(-2, 'quarter'));
assertEquals('1 qtr. ago', shortAlways.format(-1, 'quarter'));
assertEquals('in 0 qtrs.', shortAlways.format(0, 'quarter'));
-assertEquals('0 qtr. ago', shortAlways.format(-0, 'quarter'));
+assertEquals('0 qtrs. ago', shortAlways.format(-0, 'quarter'));
assertEquals('in 1 qtr.', shortAlways.format(1, 'quarter'));
assertEquals('in 2 qtrs.', shortAlways.format(2, 'quarter'));
assertEquals('in 345 qtrs.', shortAlways.format(345, 'quarter'));
-*/
assertEquals('3 yr. ago', shortAlways.format(-3, 'year'));
assertEquals('2 yr. ago', shortAlways.format(-2, 'year'));
@@ -460,17 +444,14 @@ assertEquals('in 1 mo.', narrowAlways.format(1, 'month'));
assertEquals('in 2 mo.', narrowAlways.format(2, 'month'));
assertEquals('in 345 mo.', narrowAlways.format(345, 'month'));
-// "quarter" is not working in ICU now
-/*
assertEquals('3 qtrs. ago', narrowAlways.format(-3, 'quarter'));
assertEquals('2 qtrs. ago', narrowAlways.format(-2, 'quarter'));
assertEquals('1 qtr. ago', narrowAlways.format(-1, 'quarter'));
assertEquals('in 0 qtrs.', narrowAlways.format(0, 'quarter'));
-assertEquals('0 qtr. ago', narrowAlways.format(-0, 'quarter'));
+assertEquals('0 qtrs. ago', narrowAlways.format(-0, 'quarter'));
assertEquals('in 1 qtr.', narrowAlways.format(1, 'quarter'));
assertEquals('in 2 qtrs.', narrowAlways.format(2, 'quarter'));
assertEquals('in 345 qtrs.', narrowAlways.format(345, 'quarter'));
-*/
assertEquals('3 yr. ago', narrowAlways.format(-3, 'year'));
assertEquals('2 yr. ago', narrowAlways.format(-2, 'year'));
diff --git a/deps/v8/test/intl/relative-time-format/format-to-parts-plural.js b/deps/v8/test/intl/relative-time-format/format-to-parts-plural.js
new file mode 100644
index 0000000000..7e5e1b79a6
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/format-to-parts-plural.js
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+
+// Check plural w/ formatToParts
+// http://tc39.github.io/proposal-intl-relative-time/
+
+let rtf = new Intl.RelativeTimeFormat();
+
+// Test 1.4.4 Intl.RelativeTimeFormat.prototype.formatToParts( value, unit )
+function verifyElement(part, expectedUnit) {
+ assertEquals(true, part.type == 'literal' || part.type == 'integer');
+ assertEquals('string', typeof part.value);
+ if (part.type == 'integer') {
+ assertEquals('string', typeof part.unit);
+ assertEquals(expectedUnit, part.unit);
+ }
+};
+
+['year', 'quarter', 'month', 'week', 'day', 'hour', 'minute', 'second'].forEach(
+ function(unit) {
+ rtf.formatToParts(100, unit + 's').forEach(
+ function(part) {
+ verifyElement(part, unit);
+ });
+ });
diff --git a/deps/v8/test/intl/relative-time-format/supported-locale.js b/deps/v8/test/intl/relative-time-format/supported-locale.js
new file mode 100644
index 0000000000..b24cfb27af
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/supported-locale.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+assertEquals(typeof Intl.RelativeTimeFormat.supportedLocalesOf, "function",
+ "Intl.RelativeTimeFormat.supportedLocalesOf should be a function");
+
+var undef = Intl.RelativeTimeFormat.supportedLocalesOf();
+assertEquals([], undef);
+
+var empty = Intl.RelativeTimeFormat.supportedLocalesOf([]);
+assertEquals([], empty);
+
+var strLocale = Intl.RelativeTimeFormat.supportedLocalesOf('sr');
+assertEquals('sr', strLocale[0]);
+
+var multiLocale = ['sr-Thai-RS', 'de', 'zh-CN'];
+assertEquals(multiLocale, Intl.RelativeTimeFormat.supportedLocalesOf(multiLocale));
diff --git a/deps/v8/test/intl/segmenter/constructor.js b/deps/v8/test/intl/segmenter/constructor.js
new file mode 100644
index 0000000000..655bb100df
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/constructor.js
@@ -0,0 +1,216 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+// Segmenter constructor can't be called as function.
+assertThrows(() => Intl.Segmenter(["sr"]), TypeError);
+
+// Invalid locale string.
+assertThrows(() => new Intl.Segmenter(["abcdefghi"]), RangeError);
+
+assertDoesNotThrow(() => new Intl.Segmenter(["sr"], {}), TypeError);
+
+assertDoesNotThrow(() => new Intl.Segmenter([], {}));
+
+assertDoesNotThrow(() => new Intl.Segmenter(["fr", "ar"], {}));
+
+assertDoesNotThrow(() => new Intl.Segmenter({ 0: "ja", 1: "fr" }, {}));
+
+assertDoesNotThrow(() => new Intl.Segmenter({ 1: "ja", 2: "fr" }, {}));
+
+assertDoesNotThrow(() => new Intl.Segmenter(["sr"]));
+
+assertDoesNotThrow(() => new Intl.Segmenter());
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "grapheme"
+ })
+);
+
+assertDoesNotThrow(
+ () => new Intl.Segmenter(["sr"], { granularity: "sentence" })
+);
+
+assertDoesNotThrow(() => new Intl.Segmenter(["sr"], { granularity: "word" }));
+
+assertDoesNotThrow(
+ () => new Intl.Segmenter(["sr"], { granularity: "grapheme" })
+);
+
+assertDoesNotThrow(() => new Intl.Segmenter(["sr"], { granularity: "line" }));
+
+assertThrows(
+ () => new Intl.Segmenter(["sr"], { granularity: "standard" }),
+ RangeError
+);
+
+assertDoesNotThrow(
+ () => new Intl.Segmenter(["sr"], { lineBreakStyle: "normal" })
+);
+
+assertDoesNotThrow(
+ () => new Intl.Segmenter(["sr"], { lineBreakStyle: "strict" })
+);
+
+assertDoesNotThrow(
+ () => new Intl.Segmenter(["sr"], { lineBreakStyle: "loose" })
+);
+
+assertThrows(
+ () => new Intl.Segmenter(["sr"], { lineBreakStyle: "giant" }),
+ RangeError
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "sentence",
+ lineBreakStyle: "normal"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "sentence",
+ lineBreakStyle: "strict"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "sentence",
+ lineBreakStyle: "loose"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "word",
+ lineBreakStyle: "normal"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "word",
+ lineBreakStyle: "strict"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "word",
+ lineBreakStyle: "loose"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "grapheme",
+ lineBreakStyle: "normal"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "grapheme",
+ lineBreakStyle: "strict"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "grapheme",
+ lineBreakStyle: "loose"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "line",
+ lineBreakStyle: "loose"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "line",
+ lineBreakStyle: "normal"
+ })
+);
+
+assertDoesNotThrow(
+ () =>
+ new Intl.Segmenter(["sr"], {
+ granularity: "line",
+ lineBreakStyle: "strict"
+ })
+);
+
+// propagate exception from getter
+assertThrows(
+ () =>
+ new Intl.Segmenter(undefined, {
+ get localeMatcher() {
+ throw new TypeError("");
+ }
+ }),
+ TypeError
+);
+assertThrows(
+ () =>
+ new Intl.Segmenter(undefined, {
+ get lineBreakStyle() {
+ throw new TypeError("");
+ }
+ }),
+ TypeError
+);
+assertThrows(
+ () =>
+ new Intl.Segmenter(undefined, {
+ get granularity() {
+ throw new TypeError("");
+ }
+ }),
+ TypeError
+);
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+let localeMatcher = -1;
+let lineBreakStyle = -1;
+let granularity = -1;
+
+new Intl.Segmenter(["en-US"], {
+ get localeMatcher() {
+ localeMatcher = ++getCount;
+ },
+ get lineBreakStyle() {
+ lineBreakStyle = ++getCount;
+ },
+ get granularity() {
+ granularity = ++getCount;
+ }
+});
+
+assertEquals(1, localeMatcher);
+assertEquals(2, lineBreakStyle);
+assertEquals(3, granularity);
diff --git a/deps/v8/test/intl/segmenter/resolved-options.js b/deps/v8/test/intl/segmenter/resolved-options.js
new file mode 100644
index 0000000000..2e2a910ddb
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/resolved-options.js
@@ -0,0 +1,299 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+let segmenter = new Intl.Segmenter([], { granularity: "line" });
+// The default lineBreakStyle is 'normal'
+assertEquals("normal", segmenter.resolvedOptions().lineBreakStyle);
+
+segmenter = new Intl.Segmenter();
+assertEquals(undefined, segmenter.resolvedOptions().lineBreakStyle);
+
+// The default granularity is 'grapheme'
+assertEquals("grapheme", segmenter.resolvedOptions().granularity);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], { lineBreakStyle: "strict" }).resolvedOptions()
+ .lineBreakStyle
+);
+
+assertEquals(
+ "grapheme",
+ new Intl.Segmenter(["sr"], { lineBreakStyle: "strict" }).resolvedOptions()
+ .granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], { lineBreakStyle: "normal" }).resolvedOptions()
+ .lineBreakStyle
+);
+
+assertEquals(
+ "grapheme",
+ new Intl.Segmenter(["sr"], { lineBreakStyle: "normal" }).resolvedOptions()
+ .granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], { lineBreakStyle: "loose" }).resolvedOptions()
+ .lineBreakStyle
+);
+
+assertEquals(
+ "grapheme",
+ new Intl.Segmenter(["sr"], { lineBreakStyle: "loose" }).resolvedOptions()
+ .granularity
+);
+
+assertEquals(
+ "word",
+ new Intl.Segmenter(["sr"], { granularity: "word" }).resolvedOptions()
+ .granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], { granularity: "word" }).resolvedOptions()
+ .lineBreakStyle
+);
+
+assertEquals(
+ "grapheme",
+ new Intl.Segmenter(["sr"], { granularity: "grapheme" }).resolvedOptions()
+ .granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], { granularity: "grapheme" }).resolvedOptions()
+ .lineBreakStyle
+);
+
+assertEquals(
+ "sentence",
+ new Intl.Segmenter(["sr"], { granularity: "sentence" }).resolvedOptions()
+ .granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], { granularity: "sentence" }).resolvedOptions()
+ .lineBreakStyle
+);
+
+assertEquals(
+ "line",
+ new Intl.Segmenter(["sr"], { granularity: "line" }).resolvedOptions()
+ .granularity
+);
+
+assertEquals(
+ "normal",
+ new Intl.Segmenter(["sr"], { granularity: "line" }).resolvedOptions()
+ .lineBreakStyle
+);
+
+assertEquals(
+ "grapheme",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "grapheme"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "grapheme"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "grapheme",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "grapheme"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "grapheme"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "grapheme",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "grapheme"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "grapheme"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "word",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "word"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "word"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "word",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "word"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "word"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "word",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "word"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "word"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "sentence",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "sentence"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "sentence"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "sentence",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "sentence"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ undefined,
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "sentence"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "sentence",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "sentence"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ "normal",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "line"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "line",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "line"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ "loose",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "loose",
+ granularity: "line"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "line",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "line"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ "strict",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "strict",
+ granularity: "line"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals(
+ "line",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "line"
+ }).resolvedOptions().granularity
+);
+
+assertEquals(
+ "normal",
+ new Intl.Segmenter(["sr"], {
+ lineBreakStyle: "normal",
+ granularity: "line"
+ }).resolvedOptions().lineBreakStyle
+);
+
+assertEquals("ar", new Intl.Segmenter(["ar"]).resolvedOptions().locale);
+
+assertEquals("ar", new Intl.Segmenter(["ar", "en"]).resolvedOptions().locale);
+
+assertEquals("fr", new Intl.Segmenter(["fr", "en"]).resolvedOptions().locale);
+
+assertEquals("ar", new Intl.Segmenter(["xyz", "ar"]).resolvedOptions().locale);
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-following.js b/deps/v8/test/intl/segmenter/segment-iterator-following.js
new file mode 100644
index 0000000000..a28d6c31d1
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-iterator-following.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const segmenter = new Intl.Segmenter();
+const text = "Hello World, Test 123! Foo Bar. How are you?";
+const iter = segmenter.segment(text);
+
+assertEquals("function", typeof iter.following);
+
+// 1.5.3.2 %SegmentIteratorPrototype%.following( [ from ] )
+// 3.b If from >= iterator.[[SegmentIteratorString]], throw a RangeError exception.
+assertDoesNotThrow(() => iter.following(text.length - 1));
+assertThrows(() => iter.following(text.length), RangeError);
+assertThrows(() => iter.following(text.length + 1), RangeError);
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-next.js b/deps/v8/test/intl/segmenter/segment-iterator-next.js
new file mode 100644
index 0000000000..9aa40494ca
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-iterator-next.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const segmenter = new Intl.Segmenter();
+const text = "Hello World, Test 123! Foo Bar. How are you?";
+const iter = segmenter.segment(text);
+
+assertEquals("function", typeof iter.next);
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-preceding.js b/deps/v8/test/intl/segmenter/segment-iterator-preceding.js
new file mode 100644
index 0000000000..07d73376f2
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-iterator-preceding.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const segmenter = new Intl.Segmenter();
+const text = "Hello World, Test 123! Foo Bar. How are you?";
+const iter = segmenter.segment(text);
+
+assertEquals("function", typeof iter.preceding);
+
+// 1.5.3.3 %SegmentIteratorPrototype%.preceding( [ from ] )
+// 3.b If ... from = 0, throw a RangeError exception.
+assertThrows(() => iter.preceding(0), RangeError);
+
+// 1.5.3.3 %SegmentIteratorPrototype%.preceding( [ from ] )
+// 3.b If from > iterator.[[SegmentIteratorString]] ... , throw a RangeError exception.
+assertDoesNotThrow(() => iter.preceding(text.length - 1));
+assertDoesNotThrow(() => iter.preceding(text.length));
+assertThrows(() => iter.preceding(text.length + 1), RangeError);
diff --git a/deps/v8/test/intl/segmenter/segment-iterator.js b/deps/v8/test/intl/segmenter/segment-iterator.js
new file mode 100644
index 0000000000..0d0c31b405
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-iterator.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const segmenter = new Intl.Segmenter();
+const text = "Hello World, Test 123! Foo Bar. How are you?";
+const iter = segmenter.segment(text);
+
+assertEquals("number", typeof iter.position);
+assertEquals(0, iter.position);
+assertEquals("strig", typeof iter.breakType);
diff --git a/deps/v8/test/intl/segmenter/segment.js b/deps/v8/test/intl/segmenter/segment.js
new file mode 100644
index 0000000000..ca17c5040d
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+assertEquals("function", typeof Intl.Segmenter.prototype.segment);
diff --git a/deps/v8/test/intl/segmenter/supported-locale.js b/deps/v8/test/intl/segmenter/supported-locale.js
new file mode 100644
index 0000000000..24825a2759
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/supported-locale.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+assertEquals(
+ typeof Intl.Segmenter.supportedLocalesOf,
+ "function",
+ "Intl.Segmenter.supportedLocalesOf should be a function"
+);
+
+var undef = Intl.Segmenter.supportedLocalesOf();
+assertEquals([], undef);
+
+var empty = Intl.Segmenter.supportedLocalesOf([]);
+assertEquals([], empty);
+
+var strLocale = Intl.Segmenter.supportedLocalesOf("sr");
+assertEquals("sr", strLocale[0]);
+
+var multiLocale = ["sr-Thai-RS", "de", "zh-CN"];
+assertEquals(multiLocale, Intl.Segmenter.supportedLocalesOf(multiLocale));
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 3d4eed4849..6c5660ea9d 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -56,7 +56,7 @@ class TestSuite(testsuite.TestSuite):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js
index b56efe9836..2a4aff2ee7 100644
--- a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js
@@ -5,7 +5,7 @@
// Comparing different copy schemes against spread initial literals.
// Benchmarks for large holey arrays.
-const largeHoleyArray = new Array(100000);
+const largeHoleyArray = new Array(1e5);
for (var i = 0; i < 100; i++) {
largeHoleyArray[i] = i;
@@ -140,9 +140,9 @@ function PrintError(name, error) {
success = false;
}
-// Run the benchmark (20 x 100) iterations instead of 1 second.
+// Run the benchmark (5 x 100) iterations instead of 1 second.
function CreateBenchmark(name, f) {
- new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 20, f) ]);
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 5, f) ]);
}
CreateBenchmark('Spread', SpreadLargeHoley);
@@ -152,8 +152,9 @@ CreateBenchmark('Slice', SliceLargeHoley);
CreateBenchmark('Slice0', Slice0LargeHoley);
CreateBenchmark('ConcatReceive', ConcatReceiveLargeHoley);
CreateBenchmark('ConcatArg', ConcatArgLargeHoley);
-CreateBenchmark('ForOfPush', ForOfPushLargeHoley);
-CreateBenchmark('MapId', MapIdLargeHoley);
+// The following benchmarks are so slow that they will time out.
+// CreateBenchmark('ForOfPush', ForOfPushLargeHoley);
+// CreateBenchmark('MapId', MapIdLargeHoley);
BenchmarkSuite.config.doWarmup = true;
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLarge/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js
index a018735fc9..38643c6903 100644
--- a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLarge/run.js
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js
@@ -5,7 +5,7 @@
// Comparing different copy schemes against spread initial literals.
// Benchmarks for large packed arrays.
-const largeHoleyArray = new Array(100000);
+const largeHoleyArray = new Array(1e5);
const largeArray = Array.from(largeHoleyArray.keys());
// ----------------------------------------------------------------------------
@@ -125,7 +125,7 @@ load('../base.js');
var success = true;
function PrintResult(name, result) {
- print(name + '-ArrayLiteralInitialSpreadLarge(Score): ' + result);
+ print(name + '-ArrayLiteralInitialSpreadLargePacked(Score): ' + result);
}
function PrintError(name, error) {
@@ -133,9 +133,9 @@ function PrintError(name, error) {
success = false;
}
-// Run the benchmark (20 x 100) iterations instead of 1 second.
+// Run the benchmark (5 x 100) iterations instead of 1 second.
function CreateBenchmark(name, f) {
- new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 20, f) ]);
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 5, f) ]);
}
CreateBenchmark('Spread', SpreadLarge);
@@ -145,8 +145,9 @@ CreateBenchmark('Slice', SliceLarge);
CreateBenchmark('Slice0', Slice0Large);
CreateBenchmark('ConcatReceive', ConcatReceiveLarge);
CreateBenchmark('ConcatArg', ConcatArgLarge);
-CreateBenchmark('ForOfPush', ForOfPushLarge);
-CreateBenchmark('MapId', MapIdLarge);
+// The following benchmarks are so slow that they will time out.
+// CreateBenchmark('ForOfPush', ForOfPushLarge);
+// CreateBenchmark('MapId', MapIdLarge);
BenchmarkSuite.config.doWarmup = true;
BenchmarkSuite.config.doDeterministic = true;
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmall/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallPacked/run.js
index 5c8b8d1ac4..9d29fa40b2 100644
--- a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmall/run.js
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallPacked/run.js
@@ -126,7 +126,7 @@ load('../base.js');
var success = true;
function PrintResult(name, result) {
- print(name + '-ArrayLiteralInitialSpreadSmall(Score): ' + result);
+ print(name + '-ArrayLiteralInitialSpreadSmallPacked(Score): ' + result);
}
function PrintError(name, error) {
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 3793e2c9a8..c2aacb452f 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -60,48 +60,71 @@
"results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
},
{
- "name": "ArrayLiteralInitialSpread",
- "path": ["ArrayLiteralInitialSpread"],
+ "name": "ArrayLiteralInitialSpreadSmallPacked",
+ "path": ["ArrayLiteralInitialSpreadSmallPacked"],
"main": "run.js",
"resources": [],
- "results_regexp": "^%s\\-ArrayLiteralInitialSpread\\(Score\\): (.+)$",
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadSmallPacked\\(Score\\): (.+)$",
"tests": [
- {"name": "Spread-Small"},
- {"name": "ForLength-Small"},
- {"name": "ForLengthEmpty-Small"},
- {"name": "Slice-Small"},
- {"name": "Slice0-Small"},
- {"name": "ConcatReceive-Small"},
- {"name": "ConcatArg-Small"},
- {"name": "ForOfPush-Small"},
- {"name": "MapId-Small"},
- {"name": "Spread-Large"},
- {"name": "ForLength-Large"},
- {"name": "ForLengthEmpty-Large"},
- {"name": "Slice-Large"},
- {"name": "Slice0-Large"},
- {"name": "ConcatReceive-Large"},
- {"name": "ConcatArg-Large"},
- {"name": "ForOfPush-Large"},
- {"name": "MapId-Large"},
- {"name": "Spread-SmallHoley"},
- {"name": "ForLength-SmallHoley"},
- {"name": "ForLengthEmpty-SmallHoley"},
- {"name": "Slice-SmallHoley"},
- {"name": "Slice0-SmallHoley"},
- {"name": "ConcatReceive-SmallHoley"},
- {"name": "ConcatArg-SmallHoley"},
- {"name": "ForOfPush-SmallHoley"},
- {"name": "MapId-SmallHoley"},
- {"name": "Spread-LargeHoley"},
- {"name": "ForLength-LargeHoley"},
- {"name": "ForLengthEmpty-LargeHoley"},
- {"name": "Slice-LargeHoley"},
- {"name": "Slice0-LargeHoley"},
- {"name": "ConcatReceive-LargeHoley"},
- {"name": "ConcatArg-LargeHoley"},
- {"name": "ForOfPush-LargeHoley"},
- {"name": "MapId-LargeHoley"}
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"},
+ {"name": "ForOfPush"},
+ {"name": "MapId"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadLargePacked",
+ "path": ["ArrayLiteralInitialSpreadLargePacked"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargePacked\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadSmallHoley",
+ "path": ["ArrayLiteralInitialSpreadSmallHoley"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadSmallHoley\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"},
+ {"name": "ForOfPush"},
+ {"name": "MapId"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadLargeHoley",
+ "path": ["ArrayLiteralInitialSpreadLargeHoley"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeHoley\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"}
]
},
{
diff --git a/deps/v8/test/js-perf-test/Parsing/arrowfunctions.js b/deps/v8/test/js-perf-test/Parsing/arrowfunctions.js
index bee4ef8b30..40955ea450 100644
--- a/deps/v8/test/js-perf-test/Parsing/arrowfunctions.js
+++ b/deps/v8/test/js-perf-test/Parsing/arrowfunctions.js
@@ -27,29 +27,29 @@ new BenchmarkSuite("FakeArrowFunction", [1000], [
]);
function ArrowFunctionShortSetup() {
- code = "let a;\n" + "a = (a,b) => { return a+b; }\n".repeat(100)
+ code = "let a;\n" + "a = (a,b) => { return a+b; }\n".repeat(50)
}
function ArrowFunctionLongSetup() {
- code = "let a;\n" + "a = (a,b,c,d,e,f,g,h,i,j) => { return a+b; }\n".repeat(100)
+ code = "let a;\n" + "a = (a,b,c,d,e,f,g,h,i,j) => { return a+b; }\n".repeat(50)
}
function CommaSepExpressionListShortSetup() {
- code = "let a;\n" + "a = (a,1)\n".repeat(100)
+ code = "let a;\n" + "a = (a,1)\n".repeat(50)
}
function CommaSepExpressionListLongSetup() {
- code = "let a; let b; let c;\n" + "a = (a,2,3,4,5,b,c,1,7,1)\n".repeat(100)
+ code = "let a; let b; let c;\n" + "a = (a,2,3,4,5,b,c,1,7,1)\n".repeat(50)
}
function CommaSepExpressionListLateSetup() {
code = "let a; let b; let c; let d; let e; let f; let g; let h; let i;\n"
- + "a = (a,b,c,d,e,f,g,h,i,1)\n".repeat(100)
+ + "a = (a,b,c,d,e,f,g,h,i,1)\n".repeat(50)
}
function FakeArrowFunctionSetup() {
code = "let a; let b; let c; let d; let e; let f; let g; let h; let i; let j;\n"
- + "a = (a,b,c,d,e,f,g,h,i,j)\n".repeat(100)
+ + "a = (a,b,c,d,e,f,g,h,i,j)\n".repeat(50)
}
function Run() {
diff --git a/deps/v8/test/js-perf-test/SpreadCallsGeneral/run.js b/deps/v8/test/js-perf-test/SpreadCallsGeneral/run.js
index fc4bf35a77..9f8db96447 100644
--- a/deps/v8/test/js-perf-test/SpreadCallsGeneral/run.js
+++ b/deps/v8/test/js-perf-test/SpreadCallsGeneral/run.js
@@ -64,6 +64,6 @@ CreateBenchmark('ApplySpreadLiteral', ApplySpreadLiteral);
CreateBenchmark('SpreadCall', SpreadCall);
CreateBenchmark('SpreadCallSpreadLiteral', SpreadCallSpreadLiteral);
-BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doWarmup = true;
BenchmarkSuite.config.doDeterministic = undefined;
BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/TurboFan/typedLowering.js b/deps/v8/test/js-perf-test/TurboFan/typedLowering.js
index d2ce15cc6e..663951f99c 100644
--- a/deps/v8/test/js-perf-test/TurboFan/typedLowering.js
+++ b/deps/v8/test/js-perf-test/TurboFan/typedLowering.js
@@ -7,7 +7,9 @@ function NumberToString() {
var num = 10240;
var obj = {};
- for ( var i = 0; i < num; i++ )
+ for ( var i = 0; i < num; i++ ) {
ret = obj["test" + num];
+ }
}
+
createSuite('NumberToString', 1000, NumberToString);
diff --git a/deps/v8/test/message/README.md b/deps/v8/test/message/README.md
index 270d583fcd..ba36b14bfe 100644
--- a/deps/v8/test/message/README.md
+++ b/deps/v8/test/message/README.md
@@ -1,7 +1,7 @@
# JavaScript tests with expected output
-Tests in test/message pass if the output matches the expected output. Message
-tests are particularly useful when checking for exact error messages.
+Tests in `test/message` pass if the output matches the expected output.
+Message tests are particularly useful when checking for exact error messages.
Tests and their expected output must have the same filename, with the `.js` and
`.out` extension.
@@ -15,11 +15,13 @@ foo.out
handle output from multiple runs, e.g., `--stress-opt`. Without an exception,
the output will be generated several times and the comparison will fail.
-You can use a regex in the expected output. Instead of the exact
-path. use
- ```
+You can use a regex in the expected output instead of the exact
+path:
+
+```
*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a'
```
+
Empty lines are ignored in the comparison, but whitespaces are not.
Exact details of the test runner are in [testcfg.py](testcfg.py).
diff --git a/deps/v8/test/message/fail/class-fields-computed.js b/deps/v8/test/message/fail/class-fields-computed.js
new file mode 100644
index 0000000000..d9b41906ab
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-computed.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields
+
+class X {
+ [foo()] = 1;
+}
diff --git a/deps/v8/test/message/fail/class-fields-computed.out b/deps/v8/test/message/fail/class-fields-computed.out
new file mode 100644
index 0000000000..214b273af5
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-computed.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: ReferenceError: foo is not defined
+ [foo()] = 1;
+ ^
+ReferenceError: foo is not defined
+ at *%(basename)s:8:4 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-static-throw.js b/deps/v8/test/message/fail/class-fields-static-throw.js
new file mode 100644
index 0000000000..e7c9fec1ba
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-static-throw.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields --harmony-static-fields
+//
+// TODO(gsathya): Remove 'Function' from stack trace.
+
+class X {
+ static x = foo();
+}
diff --git a/deps/v8/test/message/fail/class-fields-static-throw.out b/deps/v8/test/message/fail/class-fields-static-throw.out
new file mode 100644
index 0000000000..a16b050bbd
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-static-throw.out
@@ -0,0 +1,6 @@
+*%(basename)s:10: ReferenceError: foo is not defined
+ static x = foo();
+ ^
+ReferenceError: foo is not defined
+ at Function.<static_fields_initializer> (*%(basename)s:10:14)
+ at *%(basename)s:1:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-throw.js b/deps/v8/test/message/fail/class-fields-throw.js
new file mode 100644
index 0000000000..235a964ae8
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-throw.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields
+
+class X {
+ x = foo();
+}
+
+new X;
diff --git a/deps/v8/test/message/fail/class-fields-throw.out b/deps/v8/test/message/fail/class-fields-throw.out
new file mode 100644
index 0000000000..f1036fde86
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-throw.out
@@ -0,0 +1,7 @@
+*%(basename)s:8: ReferenceError: foo is not defined
+ x = foo();
+ ^
+ReferenceError: foo is not defined
+ at X.<instance_fields_initializer> (*%(basename)s:8:7)
+ at new X (*%(basename)s:7:1)
+ at *%(basename)s:11:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/map-arg-non-iterable.out b/deps/v8/test/message/fail/map-arg-non-iterable.out
index 78aa8ef033..988a8f8b7e 100644
--- a/deps/v8/test/message/fail/map-arg-non-iterable.out
+++ b/deps/v8/test/message/fail/map-arg-non-iterable.out
@@ -1,6 +1,6 @@
-*%(basename)s:5: TypeError: 1 is not iterable
+*%(basename)s:5: TypeError: number 1 is not iterable (cannot read property Symbol(Symbol.iterator))
new Map(1);
^
-TypeError: 1 is not iterable
+TypeError: number 1 is not iterable (cannot read property Symbol(Symbol.iterator))
at new Map (<anonymous>)
at *%(basename)s:5:1
diff --git a/deps/v8/test/message/fail/undefined-keyed-property.out b/deps/v8/test/message/fail/undefined-keyed-property.out
index 84673252eb..94600196ca 100644
--- a/deps/v8/test/message/fail/undefined-keyed-property.out
+++ b/deps/v8/test/message/fail/undefined-keyed-property.out
@@ -2,8 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:6: TypeError: Cannot read property 'Symbol(Symbol.iterator)' of undefined
+*%(basename)s:6: TypeError: undefined is not iterable (cannot read property Symbol(Symbol.iterator))
x[Symbol.iterator];
^
-TypeError: Cannot read property 'Symbol(Symbol.iterator)' of undefined
+TypeError: undefined is not iterable (cannot read property Symbol(Symbol.iterator))
at *%(basename)s:6:2
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 6aba054251..d106f51e27 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -43,4 +43,10 @@
'wasm-trace-memory-liftoff': [SKIP],
}], # arch != x64 and arch != ia32
+['variant == code_serializer', {
+ # TODO(yangguo): Code serializer output is incompatible with all message
+ # tests.
+ '*': [SKIP],
+}], # variant == code_serializer
+
]
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index b0f821f62f..e27a3ed2a2 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -59,7 +59,7 @@ class TestSuite(testsuite.TestSuite):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@@ -96,6 +96,11 @@ class TestCase(testcase.TestCase):
def _get_source_path(self):
return os.path.join(self.suite.root, self.path + self._get_suffix())
+ def skip_predictable(self):
+ # Message tests expected to fail don't print allocation output for
+ # predictable testing.
+ return super(TestCase, self).skip_predictable() or self._expected_fail()
+
@property
def output_proc(self):
return message.OutProc(self.expected_outcomes,
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.js b/deps/v8/test/message/wasm-trace-memory-interpreted.js
index 75f42e82e6..fdac585b39 100644
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.js
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt --expose-wasm --wasm-trace-memory --wasm-interpret-all
+// Flags: --no-stress-opt --expose-wasm --trace-wasm-memory --wasm-interpret-all
load("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.js b/deps/v8/test/message/wasm-trace-memory-liftoff.js
index b56d1d7978..a23eca4a0f 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.js
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt --wasm-trace-memory --liftoff --no-future
+// Flags: --no-stress-opt --trace-wasm-memory --liftoff --no-future
// Flags: --no-wasm-tier-up
load("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
index adb1e2b7a7..53c46073ec 100644
--- a/deps/v8/test/message/wasm-trace-memory.js
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-stress-opt --wasm-trace-memory --no-liftoff --no-future
+// Flags: --no-stress-opt --trace-wasm-memory --no-liftoff --no-future
// Flags: --no-wasm-tier-up
load("test/mjsunit/wasm/wasm-constants.js");
diff --git a/deps/v8/test/mjsunit/array-functions-prototype-misc.js b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
index 7ff5d4f2f5..dd95e2d266 100644
--- a/deps/v8/test/mjsunit/array-functions-prototype-misc.js
+++ b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
@@ -31,13 +31,8 @@
* should work on other objects too, so we test that too.
*/
-var LARGE = 400000;
-var VERYLARGE = 4000000000;
-
-// Nicer for firefox 1.5. Unless you uncomment the following two lines,
-// smjs will appear to hang on this file.
-//var LARGE = 40000;
-//var VERYLARGE = 40000;
+var LARGE = 40000;
+var VERYLARGE = 40000;
var fourhundredth = LARGE/400;
@@ -45,7 +40,7 @@ function PseudoArray() {
};
for (var use_real_arrays = 0; use_real_arrays <= 1; use_real_arrays++) {
- var poses = [0, 140, 20000, VERYLARGE];
+ var poses = [0, 140, 20000];
var the_prototype;
var new_function;
var push_function;
@@ -252,19 +247,22 @@ for (var use_real_arrays = 0; use_real_arrays <= 1; use_real_arrays++) {
assertEquals("bar", a[2]);
// Shift.
- var baz = shift_function(a);
- assertEquals("baz", baz);
- assertEquals("boo", a[0]);
- assertEquals(pos + 3, a.length);
- assertEquals("foo", a[pos + 2]);
-
- // Slice.
- var bar = slice_function(a, 1, 0); // don't throw an exception please.
- bar = slice_function(a, 1, 2);
- assertEquals("bar", bar[0]);
- assertEquals(1, bar.length);
- assertEquals("bar", a[1]);
-
+ // Skip VERYLARGE arrays, as we removed sparse support for shift.
+ // Slice is also skipped, since it relies on the "shift" test to be run.
+ if (pos < VERYLARGE) {
+ var baz = shift_function(a);
+ assertEquals("baz", baz);
+ assertEquals("boo", a[0]);
+ assertEquals(pos + 3, a.length);
+ assertEquals("foo", a[pos + 2]);
+
+ // Slice.
+ var bar = slice_function(a, 1, 0); // don't throw an exception please.
+ bar = slice_function(a, 1, 2);
+ assertEquals("bar", bar[0]);
+ assertEquals(1, bar.length);
+ assertEquals("bar", a[1]);
+ }
}
}
diff --git a/deps/v8/test/mjsunit/array-splice.js b/deps/v8/test/mjsunit/array-splice.js
index 75ff2d174b..460172edd7 100644
--- a/deps/v8/test/mjsunit/array-splice.js
+++ b/deps/v8/test/mjsunit/array-splice.js
@@ -445,3 +445,21 @@
"array.hasOwnProperty(Math.pow(2, 32) - 2)");
}
})();
+
+// Verify that fast implementations aren't confused by empty DOUBLE element arrays
+(function() {
+
+ function foo(dontAddAnything) {
+ let a = [];
+ if (dontAddAnything === undefined) {
+ a[1] = 0.5;
+ }
+ return a.splice(0, 0, 3.5);
+ }
+
+ // Learn via allocation site tracking to create double arrays in foo().
+ foo();
+ foo();
+ // force splice to copy the input array.
+ foo(true);
+})();
diff --git a/deps/v8/test/mjsunit/array-unshift.js b/deps/v8/test/mjsunit/array-unshift.js
index 50aab4f52a..cbc8d4091d 100644
--- a/deps/v8/test/mjsunit/array-unshift.js
+++ b/deps/v8/test/mjsunit/array-unshift.js
@@ -190,15 +190,12 @@
(function() {
for (var i = 0; i < 7; i++) {
try {
- new Array(Math.pow(2, 32) - 3).unshift(1, 2, 3, 4, 5);
- throw 'Should have thrown RangeError';
+ let obj = { length: 2 ** 53 - 3};
+ Array.prototype.unshift.call(obj, 1, 2, 3, 4, 5);
+ throw 'Should have thrown TypeError';
} catch (e) {
- assertTrue(e instanceof RangeError);
+ assertTrue(e instanceof TypeError);
}
-
- // Check smi boundary
- var bigNum = (1 << 30) - 3;
- assertEquals(bigNum + 7, new Array(bigNum).unshift(1, 2, 3, 4, 5, 6, 7));
}
})();
diff --git a/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-1.js b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-1.js
new file mode 100644
index 0000000000..42482595cc
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-1.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces
+
+// Check that Error.prepareStackTrace doesn't expose strict
+// mode closures, even in the presence of async frames.
+Error.prepareStackTrace = (e, frames) => {
+ assertEquals(two, frames[0].getFunction());
+ assertEquals(two.name, frames[0].getFunctionName());
+ assertEquals(undefined, frames[1].getFunction());
+ assertEquals(one.name, frames[1].getFunctionName());
+ return frames;
+};
+
+async function one(x) {
+ "use strict";
+ return await two(x);
+}
+
+async function two(x) {
+ try {
+ x = await x;
+ throw new Error();
+ } catch (e) {
+ return e.stack;
+ }
+}
+
+one(1).catch(e => setTimeout(_ => {throw e}, 0));
diff --git a/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-2.js b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-2.js
new file mode 100644
index 0000000000..8126a83dc9
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-2.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces
+
+// Check that Error.prepareStackTrace doesn't expose strict
+// mode closures, even in the presence of async frames.
+Error.prepareStackTrace = (e, frames) => {
+ assertEquals(undefined, frames[0].getFunction());
+ assertEquals(two.name, frames[0].getFunctionName());
+ assertEquals(undefined, frames[1].getFunction());
+ assertEquals(one.name, frames[1].getFunctionName());
+ return frames;
+};
+
+async function one(x) {
+ return await two(x);
+}
+
+async function two(x) {
+ "use strict";
+ try {
+ x = await x;
+ throw new Error();
+ } catch (e) {
+ return e.stack;
+ }
+}
+
+one(1).catch(e => setTimeout(_ => {throw e}, 0));
diff --git a/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-3.js b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-3.js
new file mode 100644
index 0000000000..429b0f64c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-3.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces
+
+// Check that Error.prepareStackTrace properly marks async frames.
+Error.prepareStackTrace = (e, frames) => {
+ assertEquals(two, frames[0].getFunction());
+ assertEquals(two.name, frames[0].getFunctionName());
+ assertFalse(frames[0].isAsync());
+ assertEquals(two, frames[1].getFunction());
+ assertEquals(one.name, frames[1].getFunctionName());
+ assertTrue(frames[1].isAsync());
+ return frames;
+};
+
+async function one(x) {
+ return await two(x);
+}
+
+async function two(x) {
+ try {
+ x = await x;
+ throw new Error();
+ } catch (e) {
+ return e.stack;
+ }
+}
+
+one(1).catch(e => setTimeout(_ => {throw e}, 0));
diff --git a/deps/v8/test/mjsunit/async-stack-traces.js b/deps/v8/test/mjsunit/async-stack-traces.js
new file mode 100644
index 0000000000..05cf8a095f
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces.js
@@ -0,0 +1,270 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --async-stack-traces
+
+// Basic test with an explicit throw.
+(function() {
+ async function one(x) {
+ await two(x);
+ }
+
+ async function two(x) {
+ await x;
+ throw new Error();
+ }
+
+ async function test(f) {
+ try {
+ await f(1);
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async () => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
+
+// Basic test with an implicit throw (via ToNumber on Symbol).
+(function() {
+ async function one(x) {
+ return await two(x);
+ }
+
+ async function two(x) {
+ await x;
+ return +x; // This will raise a TypeError.
+ }
+
+ async function test(f) {
+ try {
+ await f(Symbol());
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, TypeError);
+ assertMatches(/TypeError.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async() => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
+
+// Basic test with throw in inlined function.
+(function() {
+ function throwError() {
+ throw new Error();
+ }
+
+ async function one(x) {
+ return await two(x);
+ }
+
+ async function two(x) {
+ await x;
+ return throwError();
+ }
+
+ async function test(f) {
+ try {
+ await f(1);
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async() => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
+
+// Basic test with async function inlined into sync function.
+(function() {
+ function callOne(x) {
+ return one(x);
+ }
+
+ function callTwo(x) {
+ return two(x);
+ }
+
+ async function one(x) {
+ return await callTwo(x);
+ }
+
+ async function two(x) {
+ await x;
+ throw new Error();
+ }
+
+ async function test(f) {
+ try {
+ await f(1);
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async() => {
+ await test(callOne);
+ await test(callOne);
+ %OptimizeFunctionOnNextCall(callTwo);
+ await test(callOne);
+ %OptimizeFunctionOnNextCall(callOne);
+ await test(callOne);
+ })());
+})();
+
+// Basic test with async functions and promises chained via
+// Promise.prototype.then(), which should still work following
+// the generic chain upwards.
+(function() {
+ async function one(x) {
+ return await two(x).then(x => x);
+ }
+
+ async function two(x) {
+ await x.then(x => x);
+ throw new Error();
+ }
+
+ async function test(f) {
+ try {
+ await f(Promise.resolve(1));
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async() => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
+
+// Basic test for async generators called from async
+// functions with an explicit throw.
+(function() {
+ async function one(x) {
+ for await (const y of two(x)) {}
+ }
+
+ async function* two(x) {
+ await x;
+ throw new Error();
+ }
+
+ async function test(f) {
+ try {
+ await f(1);
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async () => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
+
+// Basic test for async functions called from async
+// generators with an explicit throw.
+(function() {
+ async function* one(x) {
+ await two(x);
+ }
+
+ async function two(x) {
+ await x;
+ throw new Error();
+ }
+
+ async function test(f) {
+ try {
+ for await (const x of f(1)) {}
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async () => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
+
+// Basic test for async functions called from async
+// generators with an explicit throw (with yield).
+(function() {
+ async function* one(x) {
+ yield two(x);
+ }
+
+ async function two(x) {
+ await x;
+ throw new Error();
+ }
+
+ async function test(f) {
+ try {
+ for await (const x of f(1)) {}
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async () => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index 8cbb2969f7..61ed87fc13 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -471,7 +471,7 @@ TestCoverage(
{"start":472,"end":503,"count":0},
{"start":626,"end":653,"count":0},
{"start":768,"end":803,"count":0},
- {"start":867,"end":869,"count":0}]
+ {"start":867,"end":868,"count":0}]
);
TestCoverage(
@@ -850,46 +850,4 @@ Util.escape("foo.bar"); // 0400
{"start":268,"end":350,"count":0}]
);
-TestCoverage(
-"https://crbug.com/v8/8237",
-`
-!function() { // 0000
- if (true) // 0050
- while (false) return; else nop(); // 0100
-}(); // 0150
-!function() { // 0200
- if (true) l0: { break l0; } else // 0250
- if (nop()) { } // 0300
-}(); // 0350
-!function() { // 0400
- if (true) { if (false) { return; } // 0450
- } else if (nop()) { } }(); // 0500
-!function(){ // 0550
- if(true)while(false)return;else nop() // 0600
-}(); // 0650
-!function(){ // 0700
- if(true) l0:{break l0}else if (nop()){} // 0750
-}(); // 0800
-!function(){ // 0850
- if(true){if(false){return}}else // 0900
- if(nop()){} // 0950
-}(); // 1000
-`,
-[{"start":0,"end":1049,"count":1},
- {"start":1,"end":151,"count":1},
- {"start":118,"end":137,"count":0},
- {"start":201,"end":351,"count":1},
- {"start":277,"end":318,"count":0},
- {"start":401,"end":525,"count":1},
- {"start":475,"end":486,"count":0},
- {"start":503,"end":523,"count":0},
- {"start":551,"end":651,"count":1},
- {"start":622,"end":639,"count":0},
- {"start":701,"end":801,"count":1},
- {"start":773,"end":791,"count":0},
- {"start":851,"end":1001,"count":1},
- {"start":920,"end":928,"count":0},
- {"start":929,"end":965,"count":0}]
-);
-
%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/code-coverage-class-fields.js b/deps/v8/test/mjsunit/code-coverage-class-fields.js
new file mode 100644
index 0000000000..a91c25824f
--- /dev/null
+++ b/deps/v8/test/mjsunit/code-coverage-class-fields.js
@@ -0,0 +1,199 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt --harmony-public-fields --harmony-static-fields
+// Files: test/mjsunit/code-coverage-utils.js
+
+%DebugToggleBlockCoverage(true);
+
+TestCoverage(
+"class with no fields",
+`class X { // 000
+}; // 050
+`,
+ [
+ { start: 0, end: 98, count: 1 },
+ { start: 0, end: 0, count: 0 },
+ ]
+);
+
+TestCoverage(
+"class that's not created",
+`class X { // 000
+ x = function() { } // 050
+}; // 100
+`,
+ [
+ { start: 0, end: 148, count: 1 },
+ { start: 0, end: 0, count: 0 },
+ { start: 51, end: 69, count: 0 },
+ ]
+);
+
+TestCoverage(
+"class with field thats not called",
+`class X { // 000
+ x = function() { } // 050
+}; // 100
+let x = new X(); // 150
+`,
+ [
+ { start: 0, end: 198, count: 1 },
+ { start: 0, end: 0, count: 1 },
+ { start: 51, end: 69, count: 1 },
+ { start: 55, end: 69, count: 0 }
+ ]
+);
+
+TestCoverage(
+"class field",
+`class X { // 000
+ x = function() { } // 050
+}; // 100
+let x = new X(); // 150
+x.x(); // 200
+`,
+ [
+ { start: 0, end: 248, count: 1 },
+ { start: 0, end: 0, count: 1 },
+ { start: 51, end: 69, count: 1 },
+ { start: 55, end: 69, count: 1 }
+ ]
+);
+
+TestCoverage(
+"non contiguous class field",
+`class X { // 000
+ x = function() { } // 050
+ foo() { } // 100
+ y = function() {} // 150
+}; // 200
+let x = new X(); // 250
+x.x(); // 300
+x.y(); // 350
+`,
+ [
+ { start: 0, end: 398, count: 1 },
+ { start: 0, end: 0, count: 1 },
+ { start: 51, end: 168, count: 1 },
+ { start: 55, end: 69, count: 1 },
+ { start: 101, end: 110, count: 0 },
+ { start: 155, end: 168, count: 1 },
+ ]
+);
+
+TestCoverage(
+"non contiguous class field thats called",
+`class X { // 000
+ x = function() { } // 050
+ foo() { } // 100
+ y = function() {} // 150
+}; // 200
+let x = new X(); // 250
+x.x(); // 300
+x.y(); // 350
+x.foo(); // 400
+`,
+ [
+ { start: 0, end: 448, count: 1 },
+ { start: 0, end: 0, count: 1 },
+ { start: 51, end: 168, count: 1 },
+ { start: 55, end: 69, count: 1 },
+ { start: 101, end: 110, count: 1 },
+ { start: 155, end: 168, count: 1 },
+ ]
+);
+
+TestCoverage(
+"class with initializer iife",
+`class X { // 000
+ x = (function() { })() // 050
+}; // 100
+let x = new X(); // 150
+`,
+ [
+ { start: 0, end: 198, count: 1 },
+ { start: 0, end: 0, count: 1 },
+ { start: 51, end: 73, count: 1 },
+ { start: 56, end: 70, count: 1 }
+ ]
+);
+
+TestCoverage(
+"class with computed field",
+`
+function f() {}; // 000
+class X { // 050
+ [f()] = (function() { })() // 100
+}; // 150
+let x = new X(); // 200
+`,
+ [
+ { start: 0, end: 249, count: 1 },
+ { start: 0, end: 15, count: 1 },
+ { start: 50, end: 50, count: 1 },
+ { start: 102, end: 128, count: 1 },
+ { start: 111, end: 125, count: 1 }
+ ]
+);
+
+TestCoverage(
+"static class field that's not called",
+`class X { // 000
+ static x = function() { } // 050
+}; // 100
+`,
+ [
+ { start: 0, end: 148, count: 1 },
+ { start: 0, end: 0, count: 0 },
+ { start: 51, end: 76, count: 1 },
+ { start: 62, end: 76, count: 0 }
+ ]
+);
+
+TestCoverage(
+"static class field",
+`class X { // 000
+ static x = function() { } // 050
+}; // 100
+X.x(); // 150
+`,
+ [
+ { start: 0, end: 198, count: 1 },
+ { start: 0, end: 0, count: 0 },
+ { start: 51, end: 76, count: 1 },
+ { start: 62, end: 76, count: 1 }
+ ]
+);
+
+TestCoverage(
+"static class field with iife",
+`class X { // 000
+ static x = (function() { })() // 050
+}; // 100
+`,
+ [
+ { start: 0, end: 148, count: 1 },
+ { start: 0, end: 0, count: 0 },
+ { start: 51, end: 80, count: 1 },
+ { start: 63, end: 77, count: 1 }
+ ]
+);
+
+TestCoverage(
+"computed static class field",
+`
+function f() {} // 000
+class X { // 050
+ static [f()] = (function() { })() // 100
+}; // 150
+`,
+ [
+ { start: 0, end: 199, count: 1 },
+ { start: 0, end: 15, count: 1 },
+ { start: 50, end: 50, count: 0 },
+ { start: 102, end: 135, count: 1 },
+ { start: 118, end: 132, count: 1 }
+ ]
+);
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js b/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
new file mode 100644
index 0000000000..c1057e1d1b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
@@ -0,0 +1,135 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Known symbols abstract equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo() { return a == b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known symbols abstract in-equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo() { return a != b; }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Known symbol on one side abstract equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo(a) { return a == b; }
+
+ // Warmup
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertOptimized(foo);
+
+ // Make optimized code bail out
+ assertFalse(foo("a"));
+ assertUnoptimized(foo);
+
+ // Make sure TurboFan learns the new feedback
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo("a"));
+ assertOptimized(foo);
+})();
+
+// Known symbol on one side abstract in-equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo(a) { return a != b; }
+
+ // Warmup
+ assertFalse(foo(b));
+ assertTrue(foo(a));
+ assertFalse(foo(b));
+ assertTrue(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(b));
+ assertTrue(foo(a));
+
+ // Make optimized code bail out
+ assertTrue(foo("a"));
+ assertUnoptimized(foo);
+
+ // Make sure TurboFan learns the new feedback
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo("a"));
+ assertOptimized(foo);
+})();
+
+// Feedback based symbol abstract equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo(a, b) { return a == b; }
+
+ // Warmup
+ assertTrue(foo(b, b));
+ assertFalse(foo(a, b));
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+
+ // Make optimized code bail out
+ assertFalse(foo("a", b));
+ assertUnoptimized(foo);
+
+ // Make sure TurboFan learns the new feedback
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo("a", b));
+ assertOptimized(foo);
+})();
+
+// Feedback based symbol abstract in-equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo(a, b) { return a != b; }
+
+ assertFalse(foo(b, b));
+ assertTrue(foo(a, b));
+ assertFalse(foo(a, a));
+ assertTrue(foo(b, a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(a, a));
+ assertTrue(foo(b, a));
+
+ // Make optimized code bail out
+ assertTrue(foo("a", b));
+ assertUnoptimized(foo);
+
+ // Make sure TurboFan learns the new feedback
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo("a", b));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-buffer-is-view.js b/deps/v8/test/mjsunit/compiler/array-buffer-is-view.js
new file mode 100644
index 0000000000..b56763b5b2
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-buffer-is-view.js
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that ObjectIsArrayBufferView lowering works correctly
+// in EffectControlLinearizer in the case that the input is
+// known to be a HeapObject by TurboFan. For this we use the
+// simple trick with an object literal whose field `x` will
+// only ever contain HeapObjects and so the representation
+// tracking is going to pick it up.
+(function() {
+ function foo(x) {
+ return ArrayBuffer.isView({x}.x);
+ }
+
+ assertFalse(foo(Symbol()));
+ assertFalse(foo("some string"));
+ assertFalse(foo(new Object()));
+ assertFalse(foo(new Array()));
+ assertFalse(foo(new ArrayBuffer(1)));
+ assertTrue(foo(new Int32Array(1)));
+ assertTrue(foo(new DataView(new ArrayBuffer(1))));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(Symbol()));
+ assertFalse(foo("some string"));
+ assertFalse(foo(new Object()));
+ assertFalse(foo(new Array()));
+ assertFalse(foo(new ArrayBuffer(1)));
+ assertTrue(foo(new Int32Array(1)));
+ assertTrue(foo(new DataView(new ArrayBuffer(1))));
+ assertOptimized(foo);
+})();
+
+// Test that ObjectIsArrayBufferView lowering works correctly
+// in EffectControlLinearizer in the case that the input is
+// some arbitrary tagged value.
+(function() {
+ function foo(x) {
+ return ArrayBuffer.isView(x);
+ }
+
+ assertFalse(foo(1));
+ assertFalse(foo(1.1));
+ assertFalse(foo(Symbol()));
+ assertFalse(foo("some string"));
+ assertFalse(foo(new Object()));
+ assertFalse(foo(new Array()));
+ assertFalse(foo(new ArrayBuffer(1)));
+ assertTrue(foo(new Int32Array(1)));
+ assertTrue(foo(new DataView(new ArrayBuffer(1))));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1));
+ assertFalse(foo(1.1));
+ assertFalse(foo(Symbol()));
+ assertFalse(foo("some string"));
+ assertFalse(foo(new Object()));
+ assertFalse(foo(new Array()));
+ assertFalse(foo(new ArrayBuffer(1)));
+ assertTrue(foo(new Int32Array(1)));
+ assertTrue(foo(new DataView(new ArrayBuffer(1))));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-is-array.js b/deps/v8/test/mjsunit/compiler/array-is-array.js
new file mode 100644
index 0000000000..37c916ddac
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-is-array.js
@@ -0,0 +1,105 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test JSObjectIsArray in JSTypedLowering for the case that the
+// input value is known to be an Array literal.
+(function() {
+ function foo() {
+ return Array.isArray([]);
+ }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Test JSObjectIsArray in JSTypedLowering for the case that the
+// input value is known to be a Proxy for an Array literal.
+(function() {
+ function foo() {
+ return Array.isArray(new Proxy([], {}));
+ }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Test JSObjectIsArray in JSTypedLowering for the case that the
+// input value is known to be an Object literal.
+(function() {
+ function foo() {
+ return Array.isArray({});
+ }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Test JSObjectIsArray in JSTypedLowering for the case that the
+// input value is known to be a Proxy for an Object literal.
+(function() {
+ function foo() {
+ return Array.isArray(new Proxy({}, {}));
+ }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Test JSObjectIsArray in JSTypedLowering for the case that
+// TurboFan doesn't know anything about the input value.
+(function() {
+ function foo(x) {
+ return Array.isArray(x);
+ }
+
+ assertFalse(foo({}));
+ assertFalse(foo(new Proxy({}, {})));
+ assertTrue(foo([]));
+ assertTrue(foo(new Proxy([], {})));
+ assertThrows(() => {
+ const {proxy, revoke} = Proxy.revocable([], {});
+ revoke();
+ foo(proxy);
+ }, TypeError);
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo({}));
+ assertFalse(foo(new Proxy({}, {})));
+ assertTrue(foo([]));
+ assertTrue(foo(new Proxy([], {})));
+ assertThrows(() => {
+ const {proxy, revoke} = Proxy.revocable([], {});
+ revoke();
+ foo(proxy);
+ }, TypeError);
+})();
+
+// Test JSObjectIsArray in JSTypedLowering for the case that
+// we pass a revoked proxy and catch the exception locally.
+(function() {
+ function foo(x) {
+ const {proxy, revoke} = Proxy.revocable(x, {});
+ revoke();
+ try {
+ return Array.isArray(proxy);
+ } catch (e) {
+ return e;
+ }
+ }
+
+ assertInstanceof(foo([]), TypeError);
+ assertInstanceof(foo({}), TypeError);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo([]), TypeError);
+ assertInstanceof(foo({}), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/context-sensitivity.js b/deps/v8/test/mjsunit/compiler/context-sensitivity.js
new file mode 100644
index 0000000000..1f0f1f274a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/context-sensitivity.js
@@ -0,0 +1,550 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const object1 = {[Symbol.toPrimitive]() { return 1; }};
+const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
+
+// Test that JSAdd is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y + x);
+ }
+
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSSubtract is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y - x);
+ }
+
+ assertEquals(1, foo(0));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(0));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSMultiply is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y * x);
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSDivide is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y / x);
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSModulus is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y % x);
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSExponentiate is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y ** x);
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSBitwiseOr is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y | x);
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSBitwiseAnd is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y & x);
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSBitwiseXor is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y ^ x);
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSShiftLeft is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y << x);
+ }
+
+ assertEquals(2, foo(1));
+ assertEquals(2, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(1));
+ assertEquals(2, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSShiftRight is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y >> x);
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSShiftRightLogical is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y >>> x);
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSEqual is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y == x);
+ }
+
+ assertFalse(foo(0));
+ assertTrue(foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(0));
+ assertTrue(foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSLessThan is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y < x);
+ }
+
+ assertFalse(foo(0));
+ assertFalse(foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(0));
+ assertFalse(foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSGreaterThan is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => x > y);
+ }
+
+ assertFalse(foo(0));
+ assertFalse(foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(0));
+ assertFalse(foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSLessThanOrEqual is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => y <= x);
+ }
+
+ assertFalse(foo(0));
+ assertTrue(foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(0));
+ assertTrue(foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSGreaterThanOrEqual is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn(1);
+ }
+
+ function foo(x) {
+ return bar(y => x >= y);
+ }
+
+ assertFalse(foo(0));
+ assertTrue(foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(0));
+ assertTrue(foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSInstanceOf is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn({});
+ }
+
+ function foo(c) {
+ return bar(o => o instanceof c);
+ }
+
+ assertTrue(foo(Object));
+ assertFalse(foo(Array));
+ assertThrows(() => foo({[Symbol.hasInstance]() { throw new Error(); }}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(Object));
+ assertFalse(foo(Array));
+ assertThrows(() => foo({[Symbol.hasInstance]() { throw new Error(); }}));
+})();
+
+// Test that JSBitwiseNot is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo(x) {
+ return bar(() => ~x);
+ }
+
+ assertEquals(0, foo(-1));
+ assertEquals(~1, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(-1));
+ assertEquals(~1, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSNegate is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo(x) {
+ return bar(() => -x);
+ }
+
+ assertEquals(1, foo(-1));
+ assertEquals(-1, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(-1));
+ assertEquals(-1, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSIncrement is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo(x) {
+ return bar(() => ++x);
+ }
+
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSDecrement is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo(x) {
+ return bar(() => --x);
+ }
+
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(object1));
+ assertThrows(() => foo(thrower));
+})();
+
+// Test that JSCreateArguments[UnmappedArguments] is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo() {
+ "use strict";
+ return bar(() => arguments)[0];
+ }
+
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 2));
+ assertEquals(undefined, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 2));
+ assertEquals(undefined, foo());
+})();
+
+// Test that JSCreateArguments[RestParameters] is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo(...args) {
+ return bar(() => args)[0];
+ }
+
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 2));
+ assertEquals(undefined, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 2));
+ assertEquals(undefined, foo());
+})();
+
+// Test that JSLoadGlobal/JSStoreGlobal are not context-sensitive.
+(function(global) {
+ var actualValue = 'Some value';
+
+ Object.defineProperty(global, 'globalValue', {
+ configurable: true,
+ enumerable: true,
+ get: function() {
+ return actualValue;
+ },
+ set: function(v) {
+ actualValue = v;
+ }
+ });
+
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo(v) {
+ return bar(() => {
+ const o = globalValue;
+ globalValue = v;
+ return o;
+ });
+ }
+
+ assertEquals('Some value', foo('Another value'));
+ assertEquals('Another value', actualValue);
+ assertEquals('Another value', foo('Some value'));
+ assertEquals('Some value', actualValue);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals('Some value', foo('Another value'));
+ assertEquals('Another value', actualValue);
+ assertEquals('Another value', foo('Some value'));
+ assertEquals('Some value', actualValue);
+})(this);
+
+// Test that for..in is not context-sensitive.
+(function() {
+ function bar(fn) {
+ return fn();
+ }
+
+ function foo(o) {
+ return bar(() => {
+ var s = "";
+ for (var k in o) { s += k; }
+ return s;
+ });
+ }
+
+ assertEquals('abc', foo({a: 1, b: 2, c: 3}));
+ assertEquals('ab', foo(Object.create({a: 1, b: 2})));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals('abc', foo({a: 1, b: 2, c: 3}));
+ assertEquals("ab", foo(Object.create({a:1, b:2})));
+})();
+
+// Test that most generator operations are not context-sensitive.
+(function() {
+ function bar(fn) {
+ let s = undefined;
+ for (const x of fn()) {
+ if (s === undefined) s = x;
+ else s += x;
+ }
+ return s;
+ }
+
+ function foo(x, y, z) {
+ return bar(function*() {
+ yield x;
+ yield y;
+ yield z;
+ });
+ }
+
+ assertEquals(6, foo(1, 2, 3));
+ assertEquals("abc", foo("a", "b", "c"));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(6, foo(1, 2, 3));
+ assertEquals("abc", foo("a", "b", "c"));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-constant.js b/deps/v8/test/mjsunit/compiler/dataview-constant.js
new file mode 100644
index 0000000000..f5f0b5e955
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dataview-constant.js
@@ -0,0 +1,173 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test DataView.prototype.getInt8()/setInt8() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setInt8(0, 42);
+ dv.setInt8(1, 24);
+
+ function foo(i) {
+ const x = dv.getInt8(i);
+ dv.setInt8(i, x+1);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(1));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(1));
+})();
+
+// Test DataView.prototype.getUint8()/setUint8() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setUint8(0, 42);
+ dv.setUint8(1, 24);
+
+ function foo(i) {
+ const x = dv.getUint8(i);
+ dv.setUint8(i, x+1);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(1));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(1));
+})();
+
+// Test DataView.prototype.getInt16()/setInt16() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setInt16(0, 42, true);
+ dv.setInt16(2, 24, true);
+
+ function foo(i) {
+ const x = dv.getInt16(i, true);
+ dv.setInt16(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(2));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(2));
+})();
+
+// Test DataView.prototype.getUint16()/setUint16() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setUint16(0, 42, true);
+ dv.setUint16(2, 24, true);
+
+ function foo(i) {
+ const x = dv.getUint16(i, true);
+ dv.setUint16(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(2));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(2));
+})();
+
+// Test DataView.prototype.getInt32()/setInt32() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setInt32(0, 42, true);
+ dv.setInt32(4, 24, true);
+
+ function foo(i) {
+ const x = dv.getInt32(i, true);
+ dv.setInt32(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(4));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(4));
+})();
+
+// Test DataView.prototype.getUint32()/setUint32() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setUint32(0, 42, true);
+ dv.setUint32(4, 24, true);
+
+ function foo(i) {
+ const x = dv.getUint32(i, true);
+ dv.setUint32(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(4));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(4));
+})();
+
+// Test DataView.prototype.getFloat32()/setFloat32() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setFloat32(0, 42, true);
+ dv.setFloat32(4, 24, true);
+
+ function foo(i) {
+ const x = dv.getFloat32(i, true);
+ dv.setFloat32(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(4));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(4));
+})();
+
+// Test DataView.prototype.getFloat64()/setFloat64() for constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setFloat64(0, 42, true);
+ dv.setFloat64(8, 24, true);
+
+ function foo(i) {
+ const x = dv.getFloat64(i, true);
+ dv.setFloat64(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(0));
+ assertEquals(24, foo(8));
+ assertEquals(43, foo(0));
+ assertEquals(25, foo(8));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(0));
+ assertEquals(26, foo(8));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-neutered.js b/deps/v8/test/mjsunit/compiler/dataview-neutered.js
new file mode 100644
index 0000000000..54b35f73c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dataview-neutered.js
@@ -0,0 +1,376 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Invalidate the neutering protector.
+%ArrayBufferNeuter(new ArrayBuffer(1));
+
+// Check DataView.prototype.getInt8() optimization.
+(function() {
+ const ab = new ArrayBuffer(1);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getInt8(0);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.getUint8() optimization.
+(function() {
+ const ab = new ArrayBuffer(1);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getUint8(0);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.getInt16() optimization.
+(function() {
+ const ab = new ArrayBuffer(2);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getInt16(0, true);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.getUint16() optimization.
+(function() {
+ const ab = new ArrayBuffer(2);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getUint16(0, true);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.getInt32() optimization.
+(function() {
+ const ab = new ArrayBuffer(4);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getInt32(0, true);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.getUint32() optimization.
+(function() {
+ const ab = new ArrayBuffer(4);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getUint32(0, true);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.getFloat32() optimization.
+(function() {
+ const ab = new ArrayBuffer(4);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getFloat32(0, true);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.getFloat64() optimization.
+(function() {
+ const ab = new ArrayBuffer(8);
+ const dv = new DataView(ab);
+
+ function foo(dv) {
+ return dv.getFloat64(0, true);
+ }
+
+ assertEquals(0, foo(dv));
+ assertEquals(0, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(dv));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setInt8() optimization.
+(function() {
+ const ab = new ArrayBuffer(1);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setInt8(0, x);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getInt8(0));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getInt8(0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setUint8() optimization.
+(function() {
+ const ab = new ArrayBuffer(1);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setUint8(0, x);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getUint8(0));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getUint8(0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setInt16() optimization.
+(function() {
+ const ab = new ArrayBuffer(2);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setInt16(0, x, true);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getInt16(0, true));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getInt16(0, true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setUint16() optimization.
+(function() {
+ const ab = new ArrayBuffer(2);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setUint16(0, x, true);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getUint16(0, true));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getUint16(0, true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setInt32() optimization.
+(function() {
+ const ab = new ArrayBuffer(4);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setInt32(0, x, true);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getInt32(0, true));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getInt32(0, true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setUint32() optimization.
+(function() {
+ const ab = new ArrayBuffer(4);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setUint32(0, x, true);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getUint32(0, true));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getUint32(0, true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setFloat32() optimization.
+(function() {
+ const ab = new ArrayBuffer(4);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setFloat32(0, x, true);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getFloat32(0, true));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getFloat32(0, true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
+
+// Check DataView.prototype.setFloat64() optimization.
+(function() {
+ const ab = new ArrayBuffer(8);
+ const dv = new DataView(ab);
+
+ function foo(dv, x) {
+ return dv.setFloat64(0, x, true);
+ }
+
+ assertEquals(undefined, foo(dv, 1));
+ assertEquals(1, dv.getFloat64(0, true));
+ assertEquals(undefined, foo(dv, 2));
+ assertEquals(2, dv.getFloat64(0, true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(dv, 3));
+ assertOptimized(foo);
+ %ArrayBufferNeuter(ab);
+ assertThrows(() => foo(dv, 4), TypeError);
+ assertUnoptimized(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(() => foo(dv, 5), TypeError);
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-nonconstant.js b/deps/v8/test/mjsunit/compiler/dataview-nonconstant.js
new file mode 100644
index 0000000000..0420660c83
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dataview-nonconstant.js
@@ -0,0 +1,173 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test DataView.prototype.getInt8()/setInt8() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setInt8(0, 42);
+ dv.setInt8(1, 24);
+
+ function foo(dv, i) {
+ const x = dv.getInt8(i);
+ dv.setInt8(i, x+1);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 1));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 1));
+})();
+
+// Test DataView.prototype.getUint8()/setUint8() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setUint8(0, 42);
+ dv.setUint8(1, 24);
+
+ function foo(dv, i) {
+ const x = dv.getUint8(i);
+ dv.setUint8(i, x+1);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 1));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 1));
+})();
+
+// Test DataView.prototype.getInt16()/setInt16() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setInt16(0, 42, true);
+ dv.setInt16(2, 24, true);
+
+ function foo(dv, i) {
+ const x = dv.getInt16(i, true);
+ dv.setInt16(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 2));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 2));
+})();
+
+// Test DataView.prototype.getUint16()/setUint16() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setUint16(0, 42, true);
+ dv.setUint16(2, 24, true);
+
+ function foo(dv, i) {
+ const x = dv.getUint16(i, true);
+ dv.setUint16(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 2));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 2));
+})();
+
+// Test DataView.prototype.getInt32()/setInt32() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setInt32(0, 42, true);
+ dv.setInt32(4, 24, true);
+
+ function foo(dv, i) {
+ const x = dv.getInt32(i, true);
+ dv.setInt32(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 4));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 4));
+})();
+
+// Test DataView.prototype.getUint32()/setUint32() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setUint32(0, 42, true);
+ dv.setUint32(4, 24, true);
+
+ function foo(dv, i) {
+ const x = dv.getUint32(i, true);
+ dv.setUint32(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 4));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 4));
+})();
+
+// Test DataView.prototype.getFloat32()/setFloat32() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setFloat32(0, 42, true);
+ dv.setFloat32(4, 24, true);
+
+ function foo(dv, i) {
+ const x = dv.getFloat32(i, true);
+ dv.setFloat32(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 4));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 4));
+})();
+
+// Test DataView.prototype.getFloat64()/setFloat64() for non-constant DataViews.
+(function() {
+ const dv = new DataView(new ArrayBuffer(1024));
+ dv.setFloat64(0, 42, true);
+ dv.setFloat64(8, 24, true);
+
+ function foo(dv, i) {
+ const x = dv.getFloat64(i, true);
+ dv.setFloat64(i, x+1, true);
+ return x;
+ }
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 8));
+ assertEquals(43, foo(dv, 0));
+ assertEquals(25, foo(dv, 8));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(44, foo(dv, 0));
+ assertEquals(26, foo(dv, 8));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js b/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
index f520167e19..65e736c706 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
@@ -185,3 +185,56 @@
%OptimizeFunctionOnNextCall(f);
f(); f();
})();
+
+// Test variable index access to strict arguments
+// with up to 2 elements.
+(function testArgumentsVariableIndexStrict() {
+ function g() {
+ "use strict";
+ var s = 0;
+ for (var i = 0; i < arguments.length; ++i) s += arguments[i];
+ return s;
+ }
+
+ function f(x, y) {
+ // (a) arguments[i] is dead code since arguments.length is 0.
+ const a = g();
+ // (b) arguments[i] always yields the first element.
+ const b = g(x);
+ // (c) arguments[i] can yield either x or y.
+ const c = g(x, y);
+ return a + b + c;
+ }
+
+ assertEquals(4, f(1, 2));
+ assertEquals(5, f(2, 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(4, f(1, 2));
+ assertEquals(5, f(2, 1));
+})();
+
+// Test variable index access to sloppy arguments
+// with up to 2 elements.
+(function testArgumentsVariableIndexSloppy() {
+ function g() {
+ var s = 0;
+ for (var i = 0; i < arguments.length; ++i) s += arguments[i];
+ return s;
+ }
+
+ function f(x, y) {
+ // (a) arguments[i] is dead code since arguments.length is 0.
+ const a = g();
+ // (b) arguments[i] always yields the first element.
+ const b = g(x);
+ // (c) arguments[i] can yield either x or y.
+ const c = g(x, y);
+ return a + b + c;
+ }
+
+ assertEquals(4, f(1, 2));
+ assertEquals(5, f(2, 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(4, f(1, 2));
+ assertEquals(5, f(2, 1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-array.js b/deps/v8/test/mjsunit/compiler/escape-analysis-array.js
new file mode 100644
index 0000000000..2c44fa8c9b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-array.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test variable index access to array with 1 element.
+(function testOneElementArrayVariableIndex() {
+ function f(i) {
+ const a = new Array("first");
+ return a[i];
+ }
+
+ assertEquals("first", f(0));
+ assertEquals("first", f(0));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("first", f(0));
+})();
+
+// Test variable index access to array with 2 elements.
+(function testTwoElementArrayVariableIndex() {
+ function f(i) {
+ const a = new Array("first", "second");
+ return a[i];
+ }
+
+ assertEquals("first", f(0));
+ assertEquals("second", f(1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("first", f(0));
+ assertEquals("second", f(1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js b/deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js
new file mode 100644
index 0000000000..2ac1253a18
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test variable index access to rest parameters
+// with up to 2 elements.
+(function testRestParametersVariableIndex() {
+ function g(...args) {
+ let s = 0;
+ for (let i = 0; i < args.length; ++i) s += args[i];
+ return s;
+ }
+
+ function f(x, y) {
+ // (a) args[i] is dead code since args.length is 0.
+ const a = g();
+ // (b) args[i] always yields the first element.
+ const b = g(x);
+ // (c) args[i] can yield either x or y.
+ const c = g(x, y);
+ return a + b + c;
+ }
+
+ assertEquals(4, f(1, 2));
+ assertEquals(5, f(2, 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(4, f(1, 2));
+ assertEquals(5, f(2, 1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/instanceof2.js b/deps/v8/test/mjsunit/compiler/instanceof2.js
new file mode 100644
index 0000000000..ca006e3046
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/instanceof2.js
@@ -0,0 +1,233 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+// Without instance creation:
+
+(function() {
+ function Goo() {};
+ const goo = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ const goo = {};
+ Goo.prototype = Object.prototype;
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ const goo = {};
+ Goo.prototype = 42
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+(function() {
+ function Goo() {};
+ const goo = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ const goo = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+ Goo.prototype = 42;
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+
+// With instance creation:
+
+(function() {
+ function Goo() {};
+ const goo = new Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ const goo = new Goo();
+ Goo.prototype = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ const goo = new Goo();
+ Goo.prototype = 42;
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+(function() {
+ function Goo() {};
+ const goo = new Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+ Goo.prototype = {};
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ const goo = new Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+ Goo.prototype = 42
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+(function() {
+ function Goo() {};
+ Goo.prototype = 42;
+ const goo = new Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ Goo.prototype = {};
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ Goo.prototype = 42;
+ const goo = new Goo();
+ Goo.prototype = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ Goo.prototype = {};
+ const goo = new Goo();
+ Goo.prototype = 42;
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function Goo() {};
+ Goo.prototype = {};
+ const goo = new Goo();
+ Goo.prototype = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/instanceof3.js b/deps/v8/test/mjsunit/compiler/instanceof3.js
new file mode 100644
index 0000000000..e390c42092
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/instanceof3.js
@@ -0,0 +1,233 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+// Without instance creation:
+
+(function() {
+ function* Goo() {};
+ const goo = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = {};
+ Goo.prototype = Object.prototype;
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = {};
+ Goo.prototype = 42
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+ Goo.prototype = 42;
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+
+// With instance creation:
+
+(function() {
+ function* Goo() {};
+ const goo = Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = Goo();
+ Goo.prototype = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = Goo();
+ Goo.prototype = 42;
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+ Goo.prototype = {};
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ const goo = Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertTrue(IsGoo(goo));
+ assertTrue(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertTrue(IsGoo(goo));
+ Goo.prototype = 42
+ assertThrows(_ => IsGoo(goo), TypeError);
+})();
+
+(function() {
+ function* Goo() {};
+ Goo.prototype = 42;
+ const goo = Goo();
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ Goo.prototype = {};
+ assertFalse(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ Goo.prototype = 42;
+ const goo = Goo();
+ Goo.prototype = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertFalse(IsGoo(goo));
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ Goo.prototype = {};
+ const goo = Goo();
+ Goo.prototype = 42;
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertThrows(_ => IsGoo(goo), TypeError);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ %OptimizeFunctionOnNextCall(IsGoo);
+ assertThrows(_ => IsGoo(goo), TypeError);
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
+
+(function() {
+ function* Goo() {};
+ Goo.prototype = {};
+ const goo = Goo();
+ Goo.prototype = {};
+
+ function IsGoo(x) {
+ return x instanceof Goo;
+ }
+
+ assertFalse(IsGoo(goo));
+ assertFalse(IsGoo(goo));
+ %OptimizeFunctionOnNextCall(IsGoo);
+ Goo.prototype = Object.prototype;
+ assertTrue(IsGoo(goo));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/int64.js b/deps/v8/test/mjsunit/compiler/int64.js
new file mode 100644
index 0000000000..0a88a95895
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/int64.js
@@ -0,0 +1,91 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test NumberAdd with PositiveSafeInteger -> PositiveSafeInteger (as Tagged).
+(function() {
+ function foo(x) {
+ const i = x ? 0xFFFFFFFF : 0;
+ return i + 1;
+ }
+
+ assertEquals(0x000000001, foo(false));
+ assertEquals(0x000000001, foo(false));
+ assertEquals(0x100000000, foo(true));
+ assertEquals(0x100000000, foo(true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0x000000001, foo(false));
+ assertEquals(0x100000000, foo(true));
+})();
+
+// Test NumberAdd with SafeInteger -> SafeInteger (as Tagged).
+(function() {
+ function foo(x) {
+ const i = x ? 0xFFFFFFFF : -1;
+ return i + 1;
+ }
+
+ assertEquals(0x000000000, foo(false));
+ assertEquals(0x000000000, foo(false));
+ assertEquals(0x100000000, foo(true));
+ assertEquals(0x100000000, foo(true));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0x000000000, foo(false));
+ assertEquals(0x100000000, foo(true));
+})();
+
+// NumberAdd: Smi x Unsigned32 -> SafeInteger (as Float64).
+(function() {
+ const a = new Float64Array(1);
+
+ function foo(o) {
+ a[0] = o.x + 0xFFFFFFFF;
+ return a[0];
+ }
+
+ assertEquals(0x0FFFFFFFF, foo({x:0}));
+ assertEquals(0x100000000, foo({x:1}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0x100000000, foo({x:1}));
+})();
+
+// NumberAdd: Smi x Unsigned32 -> SafeInteger (as TaggedSigned).
+(function() {
+ function foo(o) {
+ return {x: Math.floor((o.x + 11123456789) + -11123456788)}.x;
+ }
+
+ assertEquals(1, foo({x:0}));
+ assertEquals(2, foo({x:1}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo({x:1}));
+})();
+
+// NumberSubtract: Unsigned32 x Unsigned32 -> SafeInteger (as Word32).
+(function() {
+ function foo(a, i) {
+ i = ((i >>> 0)) - 0xFFFFFFFF;
+ return a[i];
+ }
+
+ assertEquals(1, foo([1], 0xFFFFFFFF));
+ assertEquals(2, foo([2], 0xFFFFFFFF));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([3], 0xFFFFFFFF));
+})();
+
+// Test that the Deoptimizer can handle Word64 properly.
+(function() {
+ function foo(b) {
+ const i = ((b >>> 0)) - 0xFFFFFFFF;
+ %DeoptimizeFunction(foo);
+ return i;
+ }
+
+ assertEquals(0, foo(0xFFFFFFFF));
+ assertEquals(0, foo(0xFFFFFFFF));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0xFFFFFFFF));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/math-imul.js b/deps/v8/test/mjsunit/compiler/math-imul.js
new file mode 100644
index 0000000000..1de18a6a2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/math-imul.js
@@ -0,0 +1,76 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test Math.imul() with no inputs.
+(function() {
+ function foo() { return Math.imul(); }
+
+ assertEquals(0, foo());
+ assertEquals(0, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo());
+})();
+
+// Test Math.imul() with only one input.
+(function() {
+ function foo(x) { return Math.imul(x); }
+
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(3));
+})();
+
+// Test Math.imul() with wrong types.
+(function() {
+ function foo(x, y) { return Math.imul(x, y); }
+
+ assertEquals(0, foo(null, 1));
+ assertEquals(0, foo(2, undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(null, 1));
+ assertEquals(0, foo(2, undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(null, 1));
+ assertEquals(0, foo(2, undefined));
+ assertOptimized(foo);
+})();
+
+// Test Math.imul() with signed integers (statically known).
+(function() {
+ function foo(x, y) { return Math.imul(x|0, y|0); }
+
+ assertEquals(1, foo(1, 1));
+ assertEquals(2, foo(2, 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1, 1));
+ assertEquals(2, foo(2, 1));
+ assertOptimized(foo);
+})();
+
+// Test Math.imul() with unsigned integers (statically known).
+(function() {
+ function foo(x, y) { return Math.imul(x>>>0, y>>>0); }
+
+ assertEquals(1, foo(1, 1));
+ assertEquals(2, foo(2, 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1, 1));
+ assertEquals(2, foo(2, 1));
+ assertOptimized(foo);
+})();
+
+// Test Math.imul() with floating-point numbers.
+(function() {
+ function foo(x, y) { return Math.imul(x, y); }
+
+ assertEquals(1, foo(1.1, 1.1));
+ assertEquals(2, foo(2.1, 1.1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1.1, 1.1));
+ assertEquals(2, foo(2.1, 1.1));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/math-max.js b/deps/v8/test/mjsunit/compiler/math-max.js
new file mode 100644
index 0000000000..350bdfba88
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/math-max.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test the case where TurboFan can statically rule out -0 from the
+// Math.max type.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type [-inf, inf] \/ MinusZero.
+ x = +x;
+ x = Math.round(x);
+ return Object.is(-0, Math.max(1, x))
+ }
+
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+})();
+
+// Test the case where -0 is ruled out because it's strictly less than +0.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type [-inf, inf] \/ MinusZero.
+ x = +x;
+ x = Math.round(x);
+ return Object.is(-0, Math.max(0, x))
+ }
+
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/math-min.js b/deps/v8/test/mjsunit/compiler/math-min.js
new file mode 100644
index 0000000000..882103984d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/math-min.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test the case where TurboFan can statically rule out -0 from the
+// Math.min type.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type [-inf, inf] \/ MinusZero.
+ x = +x;
+ x = Math.round(x);
+ return Object.is(-0, Math.min(-1, x))
+ }
+
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+})();
+
+// Test the case where +0 is ruled out because it's strictly greater than -0.
+(function() {
+ function foo(x) {
+ // Arrange x such that TurboFan infers type [-inf, inf] \/ MinusZero.
+ x = +x;
+ x = Math.round(x);
+ return Object.is(+0, Math.min(-0, x))
+ }
+
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(-0));
+ assertFalse(foo(-1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-abs.js b/deps/v8/test/mjsunit/compiler/number-abs.js
new file mode 100644
index 0000000000..9eb8ab5bb5
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-abs.js
@@ -0,0 +1,76 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberAbs correctly deals with PositiveInteger \/ MinusZero
+// and turns the -0 into a 0.
+(function() {
+ function foo(x) {
+ x = Math.floor(x);
+ x = Math.max(x, -0);
+ return 1 / Math.abs(x);
+ }
+
+ assertEquals(Infinity, foo(-0));
+ assertEquals(Infinity, foo(-0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(Infinity, foo(-0));
+})();
+
+// Test that NumberAbs properly passes the kIdentifyZeros truncation
+// for Signed32 \/ MinusZero inputs.
+(function() {
+ function foo(x) {
+ return Math.abs(x * -2);
+ }
+
+ assertEquals(2, foo(-1));
+ assertEquals(4, foo(-2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(-1));
+ assertEquals(4, foo(-2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
+
+// Test that NumberAbs properly passes the kIdentifyZeros truncation
+// for Unsigned32 \/ MinusZero inputs.
+(function() {
+ function foo(x) {
+ x = x | 0;
+ return Math.abs(Math.max(x * -2, 0));
+ }
+
+ assertEquals(2, foo(-1));
+ assertEquals(4, foo(-2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(-1));
+ assertEquals(4, foo(-2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
+
+// Test that NumberAbs properly passes the kIdentifyZeros truncation
+// for OrderedNumber inputs.
+(function() {
+ function foo(x) {
+ x = x | 0;
+ return Math.abs(Math.min(x * -2, 2 ** 32));
+ }
+
+ assertEquals(2, foo(-1));
+ assertEquals(4, foo(-2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(-1));
+ assertEquals(4, foo(-2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-add.js b/deps/v8/test/mjsunit/compiler/number-add.js
new file mode 100644
index 0000000000..61e6495c52
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-add.js
@@ -0,0 +1,62 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This tests that NumberAdd passes on the right truncations
+// even if it figures out during SimplifiedLowering that it
+// can indeed do a Word32 operation (based on the feedback
+// baked in for its inputs by other operators).
+(function() {
+ // We need a + with Number feedback to get to a NumberAdd
+ // during the typed lowering pass of TurboFan's frontend.
+ function foo(x, y) { return x + y; }
+ foo(0.1, 0.2);
+ foo(0.1, 0.2);
+
+ // Now we need to fool TurboFan to think that it has to
+ // perform the `foo(x,-1)` on Float64 values until the
+ // very last moment (after the RETYPE phase of the
+ // SimplifiedLowering) where it realizes that the inputs
+ // and outputs of the NumberAdd allow it perform the
+ // operation on Word32.
+ function bar(x) {
+ x = Math.trunc(foo(x - 1, 1));
+ return foo(x, -1);
+ }
+
+ assertEquals(0, bar(1));
+ assertEquals(1, bar(2));
+ %OptimizeFunctionOnNextCall(bar);
+ assertEquals(2, bar(3));
+})();
+
+// This tests that SpeculativeNumberAdd can still lower to
+// Int32Add in SimplifiedLowering, which requires some magic
+// to make sure that SpeculativeNumberAdd survives to that
+// point, especially the JSTypedLowering needs to be unable
+// to tell that the inputs to SpeculativeNumberAdd are non
+// String primitives.
+(function() {
+ // We need a function that has a + with feedback Number or
+ // NumberOrOddball, but for whose inputs the JSTypedLowering
+ // cannot reduce it to NumberAdd (with SpeculativeToNumber
+ // conversions). We achieve this utilizing an object literal
+ // indirection here.
+ function baz(x) {
+ return {x}.x + x;
+ }
+ baz(null);
+ baz(undefined);
+
+ // Now we just need to truncate the result.
+ function foo(x) {
+ return baz(1) | 0;
+ }
+
+ assertEquals(2, foo());
+ assertEquals(2, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-ceil.js b/deps/v8/test/mjsunit/compiler/number-ceil.js
new file mode 100644
index 0000000000..ce87cd0fc0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-ceil.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberCeil propagates kIdentifyZeros truncations.
+(function() {
+ function foo(x) {
+ return Math.abs(Math.ceil(x * -2));
+ }
+
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js b/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
new file mode 100644
index 0000000000..33abf6b913
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
@@ -0,0 +1,152 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that SpeculativeNumberEqual[SignedSmall] properly passes the
+// kIdentifyZeros truncation.
+(function() {
+ function foo(x, y) {
+ if (x * y === 0) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ assertOptimized(foo);
+ // Even if x*y produces -0 now, it should stay optimized.
+ assertEquals(0, foo(-3, 0));
+ assertEquals(0, foo(0, -3));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberEqual[Number] properly passes the
+// kIdentifyZeros truncation.
+(function() {
+ // Produce a SpeculativeNumberEqual with Number feedback.
+ function bar(x, y) { return x === y; }
+ bar(0.1, 0.5);
+ bar(-0, 100);
+
+ function foo(x, y) {
+ if (bar(x * y, 0)) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ assertOptimized(foo);
+ // Even if x*y produces -0 now, it should stay optimized.
+ assertEquals(0, foo(-3, 0));
+ assertEquals(0, foo(0, -3));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberLessThan[SignedSmall] properly passes the
+// kIdentifyZeros truncation.
+(function() {
+ function foo(x, y) {
+ if (x * y < 0) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(1, -1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1, -1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ assertOptimized(foo);
+ // Even if x*y produces -0 now, it should stay optimized.
+ assertEquals(1, foo(-3, 0));
+ assertEquals(1, foo(0, -3));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberLessThan[Number] properly passes the
+// kIdentifyZeros truncation.
+(function() {
+ // Produce a SpeculativeNumberLessThan with Number feedback.
+ function bar(x, y) { return x < y; }
+ bar(0.1, 0.5);
+ bar(-0, 100);
+
+ function foo(x, y) {
+ if (bar(x * y, 0)) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(1, -1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1, -1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ assertOptimized(foo);
+ // Even if x*y produces -0 now, it should stay optimized.
+ assertEquals(1, foo(-3, 0));
+ assertEquals(1, foo(0, -3));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberLessThanOrEqual[SignedSmall] properly passes the
+// kIdentifyZeros truncation.
+(function() {
+ function foo(x, y) {
+ if (x * y <= 0) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ assertOptimized(foo);
+ // Even if x*y produces -0 now, it should stay optimized.
+ assertEquals(0, foo(-3, 0));
+ assertEquals(0, foo(0, -3));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberLessThanOrEqual[Number] properly passes the
+// kIdentifyZeros truncation.
+(function() {
+ // Produce a SpeculativeNumberLessThanOrEqual with Number feedback.
+ function bar(x, y) { return x <= y; }
+ bar(0.1, 0.5);
+ bar(-0, 100);
+
+ function foo(x, y) {
+ if (bar(x * y, 0)) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0, 1));
+ assertEquals(1, foo(1, 1));
+ assertEquals(1, foo(1, 2));
+ assertOptimized(foo);
+ // Even if x*y produces -0 now, it should stay optimized.
+ assertEquals(0, foo(-3, 0));
+ assertEquals(0, foo(0, -3));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-divide.js b/deps/v8/test/mjsunit/compiler/number-divide.js
new file mode 100644
index 0000000000..c4cc8fa881
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-divide.js
@@ -0,0 +1,207 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Test that NumberDivide with Number feedback works if only in the
+// end SimplifiedLowering figures out that the inputs to this operation
+// are actually Unsigned32.
+(function() {
+ // We need a separately polluted % with NumberOrOddball feedback.
+ function bar(x) { return x / 2; }
+ bar(undefined); // The % feedback is now NumberOrOddball.
+
+ // Now just use the gadget above in a way that only after RETYPE
+ // in SimplifiedLowering we find out that the `x` is actually in
+ // Unsigned32 range (based on taking the SignedSmall feedback on
+ // the + operator).
+ function foo(x) {
+ x = (x >>> 0) + 1;
+ return bar(x) | 0;
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(2, foo(3));
+ assertEquals(2, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(2, foo(3));
+ assertEquals(2, foo(4));
+ assertOptimized(foo);
+})();
+
+// Test that NumberDivide with Number feedback works if only in the
+// end SimplifiedLowering figures out that the inputs to this operation
+// are actually Signed32.
+(function() {
+ // We need a separately polluted % with NumberOrOddball feedback.
+ function bar(x) { return x / 2; }
+ bar(undefined); // The % feedback is now NumberOrOddball.
+
+ // Now just use the gadget above in a way that only after RETYPE
+ // in SimplifiedLowering we find out that the `x` is actually in
+ // Signed32 range (based on taking the SignedSmall feedback on
+ // the + operator).
+ function foo(x) {
+ x = (x | 0) + 1;
+ return bar(x) | 0;
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(2, foo(3));
+ assertEquals(2, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(2, foo(3));
+ assertEquals(2, foo(4));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberDivide turns into CheckedInt32Div, and
+// that the "known power of two divisor" optimization works correctly.
+(function() {
+ function foo(x) { return (x | 0) / 2; }
+
+ // Warmup with proper int32 divisions.
+ assertEquals(1, foo(2));
+ assertEquals(2, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo(6));
+ assertOptimized(foo);
+
+ // Make optimized code fail.
+ assertEquals(0.5, foo(1));
+ assertUnoptimized(foo);
+
+ // Try again with the new feedback, and now it should stay optimized.
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(4, foo(8));
+ assertOptimized(foo);
+ assertEquals(0.5, foo(1));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberDivide turns into CheckedInt32Div, and
+// that the optimized code properly bails out on "division by zero".
+(function() {
+ function foo(x, y) { return x / y; }
+
+ // Warmup with proper int32 divisions.
+ assertEquals(2, foo(4, 2));
+ assertEquals(2, foo(8, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(2, 2));
+ assertOptimized(foo);
+
+ // Make optimized code fail.
+ assertEquals(Infinity, foo(1, 0));
+ assertUnoptimized(foo);
+
+ // Try again with the new feedback, and now it should stay optimized.
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(2, 1));
+ assertOptimized(foo);
+ assertEquals(Infinity, foo(1, 0));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberDivide turns into CheckedInt32Div, and
+// that the optimized code properly bails out on minus zero.
+(function() {
+ function foo(x, y) { return x / y; }
+
+ // Warmup with proper int32 divisions.
+ assertEquals(2, foo(4, 2));
+ assertEquals(2, foo(8, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(2, 2));
+ assertOptimized(foo);
+
+ // Make optimized code fail.
+ assertEquals(-0, foo(0, -1));
+ assertUnoptimized(foo);
+
+ // Try again with the new feedback, and now it should stay optimized.
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(2, 1));
+ assertOptimized(foo);
+ assertEquals(-0, foo(0, -1));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberDivide turns into CheckedInt32Div, and
+// that the optimized code properly bails out if result is -kMinInt.
+(function() {
+ function foo(x, y) { return x / y; }
+
+ // Warmup with proper int32 divisions.
+ assertEquals(2, foo(4, 2));
+ assertEquals(2, foo(8, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(2, 2));
+ assertOptimized(foo);
+
+ // Make optimized code fail.
+ assertEquals(2147483648, foo(-2147483648, -1));
+ assertUnoptimized(foo);
+
+ // Try again with the new feedback, and now it should stay optimized.
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(2, 1));
+ assertOptimized(foo);
+ assertEquals(2147483648, foo(-2147483648, -1));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberDivide turns into CheckedUint32Div, and
+// that the "known power of two divisor" optimization works correctly.
+(function() {
+ function foo(s) { return s.length / 2; }
+
+ // Warmup with proper uint32 divisions.
+ assertEquals(1, foo("ab".repeat(1)));
+ assertEquals(2, foo("ab".repeat(2)));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo("ab".repeat(3)));
+ assertOptimized(foo);
+
+ // Make optimized code fail.
+ assertEquals(0.5, foo("a"));
+ assertUnoptimized(foo);
+
+ // Try again with the new feedback, and now it should stay optimized.
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(4, foo("ab".repeat(4)));
+ assertOptimized(foo);
+ assertEquals(0.5, foo("a"));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberDivide turns into CheckedUint32Div, and
+// that the optimized code properly bails out on "division by zero".
+(function() {
+ function foo(x, y) { return (x >>> 0) / (y >>> 0); }
+
+ // Warmup with proper uint32 divisions.
+ assertEquals(2, foo(4, 2));
+ assertEquals(2, foo(8, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(2, 2));
+ assertOptimized(foo);
+
+ // Make optimized code fail.
+ assertEquals(Infinity, foo(1, 0));
+ assertUnoptimized(foo);
+
+ // Try again with the new feedback, and now it should stay optimized.
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(2, 1));
+ assertOptimized(foo);
+ assertEquals(Infinity, foo(1, 0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-floor.js b/deps/v8/test/mjsunit/compiler/number-floor.js
new file mode 100644
index 0000000000..180b89e559
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-floor.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberFloor propagates kIdentifyZeros truncations.
+(function() {
+ function foo(x) {
+ return Math.abs(Math.floor(x * -2));
+ }
+
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-issafeinteger.js b/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
index 192fb6c124..b705e95ed5 100644
--- a/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
+++ b/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
@@ -40,11 +40,19 @@ function test(f) {
assertFalse(f(2 * near_lower - 7));
}
-function f(x) {
- return Number.isSafeInteger(+x);
-}
+// Check that the NumberIsSafeInteger simplified operator in
+// TurboFan does the right thing.
+function NumberIsSafeInteger(x) { return Number.isSafeInteger(+x); }
+test(NumberIsSafeInteger);
+test(NumberIsSafeInteger);
+%OptimizeFunctionOnNextCall(NumberIsSafeInteger);
+test(NumberIsSafeInteger);
-test(f);
-test(f);
-%OptimizeFunctionOnNextCall(f);
-test(f);
+// Check that the ObjectIsSafeInteger simplified operator in
+// TurboFan does the right thing as well (i.e. when TurboFan
+// is not able to tell statically that the inputs are numbers).
+function ObjectIsSafeInteger(x) { return Number.isSafeInteger(x); }
+test(ObjectIsSafeInteger);
+test(ObjectIsSafeInteger);
+%OptimizeFunctionOnNextCall(ObjectIsSafeInteger);
+test(ObjectIsSafeInteger);
diff --git a/deps/v8/test/mjsunit/compiler/number-max.js b/deps/v8/test/mjsunit/compiler/number-max.js
new file mode 100644
index 0000000000..7e5a4a4ad1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-max.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberMax properly passes the kIdentifyZeros truncation.
+(function() {
+ function foo(x) {
+ if (Math.max(x * -2, 1) == 1) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(2));
+ assertEquals(1, foo(-1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(2));
+ assertEquals(1, foo(-1));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-min.js b/deps/v8/test/mjsunit/compiler/number-min.js
new file mode 100644
index 0000000000..72bff78686
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-min.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberMin properly passes the kIdentifyZeros truncation.
+(function() {
+ function foo(x) {
+ if (Math.min(x * -2, -1) == -2) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(1, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-modulus.js b/deps/v8/test/mjsunit/compiler/number-modulus.js
new file mode 100644
index 0000000000..5f695d1ee5
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-modulus.js
@@ -0,0 +1,256 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Test that NumberModulus with Number feedback works if only in the
+// end SimplifiedLowering figures out that the inputs to this operation
+// are actually Unsigned32.
+(function() {
+ // We need a separately polluted % with NumberOrOddball feedback.
+ function bar(x) { return x % 2; }
+ bar(undefined); // The % feedback is now NumberOrOddball.
+
+ // Now just use the gadget above in a way that only after RETYPE
+ // in SimplifiedLowering we find out that the `x` is actually in
+ // Unsigned32 range (based on taking the SignedSmall feedback on
+ // the + operator).
+ function foo(x) {
+ x = (x >>> 0) + 1;
+ return bar(x) | 0;
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ assertOptimized(foo);
+})();
+
+// Test that NumberModulus with Number feedback works if only in the
+// end SimplifiedLowering figures out that the inputs to this operation
+// are actually Signed32.
+(function() {
+ // We need a separately polluted % with NumberOrOddball feedback.
+ function bar(x) { return x % 2; }
+ bar(undefined); // The % feedback is now NumberOrOddball.
+
+ // Now just use the gadget above in a way that only after RETYPE
+ // in SimplifiedLowering we find out that the `x` is actually in
+ // Signed32 range (based on taking the SignedSmall feedback on
+ // the + operator).
+ function foo(x) {
+ x = (x | 0) + 1;
+ return bar(x) | 0;
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberModulus with Number feedback works if
+// only in the end SimplifiedLowering figures out that the inputs to
+// this operation are actually Unsigned32.
+(function() {
+ // We need to use an object literal here to make sure that the
+ // SpeculativeNumberModulus is not turned into a NumberModulus
+ // early during JSTypedLowering.
+ function bar(x) { return {x}.x % 2; }
+ bar(undefined); // The % feedback is now NumberOrOddball.
+
+ // Now just use the gadget above in a way that only after RETYPE
+ // in SimplifiedLowering we find out that the `x` is actually in
+ // Unsigned32 range (based on taking the SignedSmall feedback on
+ // the + operator).
+ function foo(x) {
+ x = (x >>> 0) + 1;
+ return bar(x) | 0;
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberModulus with Number feedback works if
+// only in the end SimplifiedLowering figures out that the inputs to
+// this operation are actually Signed32.
+(function() {
+ // We need to use an object literal here to make sure that the
+ // SpeculativeNumberModulus is not turned into a NumberModulus
+ // early during JSTypedLowering.
+ function bar(x) { return {x}.x % 2; }
+ bar(undefined); // The % feedback is now NumberOrOddball.
+
+ // Now just use the gadget above in a way that only after RETYPE
+ // in SimplifiedLowering we find out that the `x` is actually in
+ // Signed32 range (based on taking the SignedSmall feedback on
+ // the + operator).
+ function foo(x) {
+ x = (x | 0) + 1;
+ return bar(x) | 0;
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+ assertEquals(1, foo(2));
+ assertEquals(0, foo(3));
+ assertEquals(1, foo(4));
+ assertOptimized(foo);
+})();
+
+// Test that NumberModulus works in the case where TurboFan
+// can infer that the output is Signed32 \/ MinusZero, and
+// there's a truncation on the result that identifies zeros
+// (via the SpeculativeNumberEqual).
+(function() {
+ // We need a separately polluted % with NumberOrOddball feedback.
+ function bar(x) { return x % 2; }
+ bar(undefined); // The % feedback is now NumberOrOddball.
+
+ // Now we just use the gadget above on an `x` that is known
+ // to be in Signed32 range and compare it to 0, which passes
+ // a truncation that identifies zeros.
+ function foo(x) {
+ if (bar(x | 0) == 0) return 0;
+ return 1;
+ }
+
+ assertEquals(0, foo(2));
+ assertEquals(1, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(2));
+ assertEquals(1, foo(1));
+ assertOptimized(foo);
+
+ // Now `foo` should stay optimized even if `x % 2` would
+ // produce -0, aka when we pass a negative value for `x`.
+ assertEquals(0, foo(-2));
+ assertEquals(1, foo(-1));
+ assertOptimized(foo);
+})();
+
+// Test that CheckedInt32Mod handles the slow-path (when
+// the left hand side is negative) correctly.
+(function() {
+ // We need a SpeculativeNumberModulus with SignedSmall feedback.
+ function foo(x, y) {
+ return x % y;
+ }
+
+ assertEquals(0, foo(2, 1));
+ assertEquals(0, foo(2, 2));
+ assertEquals(-1, foo(-3, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(2, 1));
+ assertEquals(0, foo(2, 2));
+ assertEquals(-1, foo(-3, 2));
+ assertOptimized(foo);
+
+ // Now `foo` should deoptimize if the result is -0.
+ assertEquals(-0, foo(-2, 2));
+ assertUnoptimized(foo);
+})();
+
+// Test that NumberModulus passes kIdentifiesZero to the
+// left hand side input when the result doesn't care about
+// 0 vs -0, even when the inputs are outside Signed32.
+(function() {
+ function foo(x) {
+ return (x * -2) % (2 ** 32) === 0;
+ }
+
+ assertFalse(foo(2));
+ assertFalse(foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(2));
+ assertFalse(foo(1));
+
+ // Now `foo` should stay optimized even if `x * -2` would
+ // produce -0, aka when we pass a zero value for `x`.
+ assertTrue(foo(0));
+ assertOptimized(foo);
+})();
+
+// Test that NumberModulus passes kIdentifiesZero to the
+// right hand side input, even when the inputs are outside
+// the Signed32 range.
+(function() {
+ function foo(x) {
+ return (2 ** 32) % (x * -2);
+ }
+
+ assertEquals(0, foo(1));
+ assertEquals(0, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1));
+
+ // Now `foo` should stay optimized even if `x * -2` would
+ // produce -0, aka when we pass a zero value for `x`.
+ assertEquals(NaN, foo(0));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberModulus passes kIdentifiesZero
+// to the right hand side input, even when feedback is consumed.
+(function() {
+ function foo(x, y) {
+ return (x % (y * -2)) | 0;
+ }
+
+ assertEquals(0, foo(2, 1));
+ assertEquals(-1, foo(-3, 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(2, 1));
+ assertEquals(-1, foo(-3, 1));
+ assertOptimized(foo);
+
+ // Now `foo` should stay optimized even if `y * -2` would
+ // produce -0, aka when we pass a zero value for `y`.
+ assertEquals(0, foo(2, 0));
+ assertOptimized(foo);
+})();
+
+// Test that SpeculativeNumberModulus passes kIdentifiesZero
+// to the left hand side input, even when feedback is consumed.
+(function() {
+ function foo(x, y) {
+ return ((x * -2) % y) | 0;
+ }
+
+ assertEquals(-2, foo(1, 3));
+ assertEquals(-2, foo(1, 3));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-2, foo(1, 3));
+ assertOptimized(foo);
+
+ // Now `foo` should stay optimized even if `x * -2` would
+ // produce -0, aka when we pass a zero value for `x`.
+ assertEquals(0, foo(0, 2));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-round.js b/deps/v8/test/mjsunit/compiler/number-round.js
new file mode 100644
index 0000000000..9aec7f7a12
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-round.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberRound propagates kIdentifyZeros truncations.
+(function() {
+ function foo(x) {
+ return Math.abs(Math.round(x * -2));
+ }
+
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-subtract.js b/deps/v8/test/mjsunit/compiler/number-subtract.js
new file mode 100644
index 0000000000..cb3e1c7e70
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-subtract.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This tests that SpeculativeNumberSubtract can still lower to
+// Int32Sub in SimplifiedLowering, which requires some magic
+// to make sure that SpeculativeNumberSubtract survives to that
+// point, especially the JSTypedLowering needs to be unable
+// to tell that the inputs to SpeculativeNumberAdd are not
+// Number, Undefined, Null or Boolean.
+(function() {
+ // We need a function that has a - with feedback Number or
+ // NumberOrOddball, but for whose inputs the JSTypedLowering
+ // cannot reduce it to NumberSubtract (with SpeculativeToNumber
+ // conversions). We achieve this utilizing an object literal
+ // indirection here.
+ function baz(x) {
+ return {x}.x - x;
+ }
+ baz(null);
+ baz(undefined);
+
+ // Now we just need to truncate the result.
+ function foo(x) {
+ return baz(42) | 0;
+ }
+
+ assertEquals(0, foo());
+ assertEquals(0, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-toboolean.js b/deps/v8/test/mjsunit/compiler/number-toboolean.js
new file mode 100644
index 0000000000..02b30b3ed6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-toboolean.js
@@ -0,0 +1,45 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberToBoolean properly passes the kIdentifyZeros truncation
+// for Signed32 \/ MinusZero inputs.
+(function() {
+ function foo(x) {
+ if (x * -2) return 1;
+ return 0;
+ }
+
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(1));
+ assertEquals(1, foo(2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
+
+
+// Test that NumberToBoolean properly passes the kIdentifyZeros truncation
+// for Unsigned32 \/ MinusZero inputs.
+(function() {
+ function foo(x) {
+ x = x | 0;
+ if (Math.max(x * -2, 0)) return 1;
+ return 0;
+ }
+
+ assertEquals(1, foo(-1));
+ assertEquals(1, foo(-2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(-1));
+ assertEquals(1, foo(-2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-trunc.js b/deps/v8/test/mjsunit/compiler/number-trunc.js
new file mode 100644
index 0000000000..aa7d02c20f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-trunc.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test that NumberTrunc propagates kIdentifyZeros truncations.
+(function() {
+ function foo(x) {
+ return Math.abs(Math.trunc(x * -2));
+ }
+
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo(1));
+ assertEquals(4, foo(2));
+ assertOptimized(foo);
+ // Now `foo` should stay optimized even if `x * -2` would produce `-0`.
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/redundancy-elimination.js b/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
new file mode 100644
index 0000000000..1e5185adb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
@@ -0,0 +1,194 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberOperation()
+// TurboFan optimization for the case of SpeculativeNumberAdd with
+// Number feedback.
+(function() {
+ function bar(i) {
+ return ++i;
+ }
+ bar(0.1);
+
+ function foo(a, i) {
+ const x = a[i];
+ const y = a[bar(i)];
+ return x + y;
+ }
+
+ assertEquals(3, foo([1, 2], 0));
+ assertEquals(3, foo([1, 2], 0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([1, 2], 0));
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberOperation()
+// TurboFan optimization for the case of SpeculativeNumberAdd with
+// NumberOrOddball feedback.
+(function() {
+ function bar(i) {
+ return ++i;
+ }
+ assertEquals(NaN, bar(undefined));
+
+ function foo(a, i) {
+ const x = a[i];
+ const y = a[bar(i)];
+ return x + y;
+ }
+
+ assertEquals(3, foo([1, 2], 0));
+ assertEquals(3, foo([1, 2], 0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([1, 2], 0));
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberOperation()
+// TurboFan optimization for the case of SpeculativeNumberSubtract with
+// Number feedback.
+(function() {
+ function bar(i) {
+ return --i;
+ }
+ assertEquals(-0.9, bar(0.1));
+
+ function foo(a, i) {
+ const x = a[i];
+ const y = a[bar(i)];
+ return x + y;
+ }
+
+ assertEquals(3, foo([1, 2], 1));
+ assertEquals(3, foo([1, 2], 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([1, 2], 1));
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberOperation()
+// TurboFan optimization for the case of SpeculativeNumberSubtract with
+// NumberOrOddball feedback.
+(function() {
+ function bar(i) {
+ return --i;
+ }
+ assertEquals(NaN, bar(undefined));
+
+ function foo(a, i) {
+ const x = a[i];
+ const y = a[bar(i)];
+ return x + y;
+ }
+
+ assertEquals(3, foo([1, 2], 1));
+ assertEquals(3, foo([1, 2], 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([1, 2], 1));
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberOperation()
+// TurboFan optimization for the case of SpeculativeToNumber.
+(function() {
+ function foo(a, i) {
+ const x = a[i];
+ const y = i++;
+ return x + y;
+ }
+
+ assertEquals(1, foo([1, 2], 0));
+ assertEquals(1, foo([1, 2], 0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo([1, 2], 0));
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberOperation()
+// TurboFan optimization for the case of SpeculativeSafeIntegerAdd.
+(function() {
+ function foo(a, i) {
+ const x = a[i];
+ const y = a[++i];
+ return x + y;
+ }
+
+ assertEquals(3, foo([1, 2], 0));
+ assertEquals(3, foo([1, 2], 0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([1, 2], 0));
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberOperation()
+// TurboFan optimization for the case of SpeculativeSafeIntegerSubtract.
+(function() {
+ function foo(a, i) {
+ const x = a[i];
+ const y = a[--i];
+ return x + y;
+ }
+
+ assertEquals(3, foo([1, 2], 1));
+ assertEquals(3, foo([1, 2], 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([1, 2], 1));
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberComparison()
+// TurboFan optimization for the case of SpeculativeNumberEqual.
+(function() {
+ function foo(a, i) {
+ const x = a[i];
+ if (i === 0) return x;
+ return i;
+ }
+
+ assertEquals(1, foo([1, 2], 0));
+ assertEquals(1, foo([1, 2], 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo([1, 2], 0));
+ assertEquals(1, foo([1, 2], 1));
+ // Even passing -0 should not deoptimize and
+ // of course still pass the equality test above.
+ assertEquals(9, foo([9, 2], -0));
+ assertOptimized(foo);
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberComparison()
+// TurboFan optimization for the case of SpeculativeNumberLessThan.
+(function() {
+ function foo(a, i) {
+ const x = a[i];
+ if (i < 1) return x;
+ return i;
+ }
+
+ assertEquals(1, foo([1, 2], 0));
+ assertEquals(1, foo([1, 2], 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo([1, 2], 0));
+ assertEquals(1, foo([1, 2], 1));
+ // Even passing -0 should not deoptimize and
+ // of course still pass the equality test above.
+ assertEquals(9, foo([9, 2], -0));
+ assertOptimized(foo);
+})();
+
+// Test the RedundancyElimination::ReduceSpeculativeNumberComparison()
+// TurboFan optimization for the case of SpeculativeNumberLessThanOrEqual.
+(function() {
+ function foo(a, i) {
+ const x = a[i];
+ if (i <= 0) return x;
+ return i;
+ }
+
+ assertEquals(1, foo([1, 2], 0));
+ assertEquals(1, foo([1, 2], 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo([1, 2], 0));
+ assertEquals(1, foo([1, 2], 1));
+ // Even passing -0 should not deoptimize and
+ // of course still pass the equality test above.
+ assertEquals(9, foo([9, 2], -0));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-7121.js b/deps/v8/test/mjsunit/compiler/regress-7121.js
index 98c1a1ac19..bdf3133bb8 100644
--- a/deps/v8/test/mjsunit/compiler/regress-7121.js
+++ b/deps/v8/test/mjsunit/compiler/regress-7121.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
function foo() { %_ToLength(42n) }
assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-884052.js b/deps/v8/test/mjsunit/compiler/regress-884052.js
new file mode 100644
index 0000000000..babfcc3cea
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-884052.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ var a = new Array(2);
+ for (var i = 1; i > -1; i = i - 2) {
+ if (i < a.length) a = new Array(i);
+ }
+}
+
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-890057.js b/deps/v8/test/mjsunit/compiler/regress-890057.js
new file mode 100644
index 0000000000..655c4431e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-890057.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {}
+function g() {
+ f.prototype = undefined;
+ f();
+ new f();
+}
+
+// Do not use %OptimizeFunctionOnNextCall here, this particular bug needs
+// to trigger truly concurrent compilation.
+for (let i = 0; i < 10000; i++) g();
diff --git a/deps/v8/test/mjsunit/compiler/regress-890620.js b/deps/v8/test/mjsunit/compiler/regress-890620.js
new file mode 100644
index 0000000000..f5fc7f4f65
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-890620.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = 42;
+
+function g(n) {
+ while (n > 0) {
+ a = new Array(n);
+ n--;
+ }
+}
+
+g(1);
+
+function f() {
+ g();
+}
+
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+assertEquals(1, a.length);
diff --git a/deps/v8/test/mjsunit/compiler/regress-895799.js b/deps/v8/test/mjsunit/compiler/regress-895799.js
new file mode 100644
index 0000000000..4305b7427b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-895799.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+class C extends Object {
+ constructor() {
+ try { super(); } catch (e) { };
+ return 1;
+ }
+}
+
+class A extends C {
+ constructor() {
+ super();
+ throw new Error();
+ return { get: () => this };
+ }
+}
+
+var D = new Proxy(A, { get() { %DeoptimizeFunction(A); } });
+
+try { Reflect.construct(A, [], D); } catch(e) {}
+%OptimizeFunctionOnNextCall(A);
+try { Reflect.construct(A, [], D); } catch(e) {}
diff --git a/deps/v8/test/mjsunit/compiler/strict-equal-symbol.js b/deps/v8/test/mjsunit/compiler/strict-equal-symbol.js
new file mode 100644
index 0000000000..aee1ecfa60
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/strict-equal-symbol.js
@@ -0,0 +1,50 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Known symbols strict equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo() { return a === b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known symbol on one side strict equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo(a) { return a === b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+})();
+
+// Feedback based symbol strict equality.
+(function() {
+ const a = Symbol("a");
+ const b = Symbol("b");
+
+ function foo(a, b) { return a === b; }
+
+ assertTrue(foo(b, b));
+ assertFalse(foo(a, b));
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/string-add-try-catch.js b/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
index d7a3d2583c..5ae5b00d18 100644
--- a/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
+++ b/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
@@ -4,6 +4,9 @@
// Flags: --allow-natives-syntax
+// Test that string concatenation overflow (going over string max length)
+// is handled gracefully, i.e. an error is thrown
+
var a = "a".repeat(%StringMaxLength());
(function() {
@@ -37,3 +40,57 @@ var a = "a".repeat(%StringMaxLength());
foo("a");
assertInstanceof(foo(a), RangeError);
})();
+
+(function() {
+ function foo(a, b) {
+ try {
+ return "0123456789012".concat(a);
+ } catch (e) {
+ return e;
+ }
+ }
+
+ foo("a");
+ foo("a");
+ %OptimizeFunctionOnNextCall(foo);
+ foo("a");
+ assertInstanceof(foo(a), RangeError);
+})();
+
+var obj = {
+ toString: function() {
+ throw new Error('toString has thrown');
+ }
+};
+
+(function() {
+ function foo(a, b) {
+ try {
+ return "0123456789012" + obj;
+ } catch (e) {
+ return e;
+ }
+ }
+
+ foo("a");
+ foo("a");
+ %OptimizeFunctionOnNextCall(foo);
+ foo("a");
+ assertInstanceof(foo(a), Error);
+})();
+
+(function() {
+ function foo(a, b) {
+ try {
+ return a + 123;
+ } catch (e) {
+ return e;
+ }
+ }
+
+ foo("a");
+ foo("a");
+ %OptimizeFunctionOnNextCall(foo);
+ foo("a");
+ assertInstanceof(foo(a), RangeError);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/string-from-code-point.js b/deps/v8/test/mjsunit/compiler/string-from-code-point.js
new file mode 100644
index 0000000000..165ea0c234
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/string-from-code-point.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Test that String.fromCodePoint() properly identifies zeros.
+(function() {
+ function foo(x) {
+ return String.fromCodePoint(x);
+ }
+
+ assertEquals("\u0000", foo(0));
+ assertEquals("\u0000", foo(-0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals("\u0000", foo(0));
+ assertEquals("\u0000", foo(-0));
+ assertOptimized(foo);
+
+ // Now passing anything outside the valid code point
+ // range should invalidate the optimized code.
+ assertThrows(_ => foo(-1));
+ assertUnoptimized(foo);
+
+ // And TurboFan should not inline the builtin anymore
+ // from now on (aka no deoptimization loop).
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals("\u0000", foo(0));
+ assertEquals("\u0000", foo(-0));
+ assertThrows(_ => foo(-1));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/typed-array-constructor.js b/deps/v8/test/mjsunit/compiler/typed-array-constructor.js
index a785eadf37..07d6a7ca4e 100644
--- a/deps/v8/test/mjsunit/compiler/typed-array-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/typed-array-constructor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
const limit = %MaxSmi() + 1;
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-script.js b/deps/v8/test/mjsunit/d8/d8-worker-script.js
new file mode 100644
index 0000000000..7c5d595b2b
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-worker-script.js
@@ -0,0 +1,39 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verify that the Worker constrcutor by default treats its first argument
+// as the filename of a script load and run.
+
+// Resources: test/mjsunit/d8/d8-worker-script.txt
+
+if (this.Worker) {
+ var w = new Worker('test/mjsunit/d8/d8-worker-script.txt');
+ assertEquals("Starting worker", w.getMessage());
+ w.postMessage("");
+ assertEquals("DONE", w.getMessage());
+ w.terminate();
+}
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-script.txt b/deps/v8/test/mjsunit/d8/d8-worker-script.txt
new file mode 100644
index 0000000000..9254cea4f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-worker-script.txt
@@ -0,0 +1,8 @@
+// Worker script used by d8-worker-script.js.
+// This file is named `.txt` to prevent it being treated as a test itself.
+
+onmessage = function(m) {
+ postMessage('DONE');
+}
+
+postMessage('Starting worker');
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
index 0a15413ea3..f166ca2eb1 100644
--- a/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
@@ -45,7 +45,7 @@ if (this.Worker) {
Atomics.store(ta, 0, 100);
};`;
- var w = new Worker(workerScript);
+ var w = new Worker(workerScript, {type: 'string'});
var sab = new SharedArrayBuffer(16);
var ta = new Uint32Array(sab);
@@ -84,7 +84,7 @@ if (this.Worker) {
var id;
var workers = [];
for (id = 0; id < 4; ++id) {
- workers[id] = new Worker(workerScript);
+ workers[id] = new Worker(workerScript, {type: 'string'});
workers[id].postMessage({sab: sab, id: id});
}
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js b/deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js
index a114d8587e..621ec253bc 100644
--- a/deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker-spawn-worker.js
@@ -27,14 +27,14 @@
if (this.Worker) {
var workerScript =
- `var w = new Worker('postMessage(42)');
+ `var w = new Worker('postMessage(42)', {type: 'string'});
onmessage = function(parentMsg) {
w.postMessage(parentMsg);
var childMsg = w.getMessage();
postMessage(childMsg);
};`;
- var w = new Worker(workerScript);
+ var w = new Worker(workerScript, {type: 'string'});
w.postMessage(9);
assertEquals(42, w.getMessage());
}
diff --git a/deps/v8/test/mjsunit/d8/d8-worker.js b/deps/v8/test/mjsunit/d8/d8-worker.js
index a73d7b1706..afc03f5c8b 100644
--- a/deps/v8/test/mjsunit/d8/d8-worker.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker.js
@@ -97,7 +97,21 @@ if (this.Worker) {
return ab;
}
- var w = new Worker(workerScript);
+ assertThrows(function() {
+ // Second arg must be 'options' object
+ new Worker(workerScript, 123);
+ });
+
+ assertThrows(function() {
+ new Worker('test/mjsunit/d8/d8-worker.js', {type: 'invalid'});
+ });
+
+ assertThrows(function() {
+ // worker type defaults to 'classic' which tries to load from file
+ new Worker(workerScript);
+ });
+
+ var w = new Worker(workerScript, {type: 'string'});
assertEquals("Starting worker", w.getMessage());
@@ -140,6 +154,12 @@ if (this.Worker) {
w.postMessage(ab2, [ab2]);
assertEquals(0, ab2.byteLength); // ArrayBuffer should be neutered.
+ // Attempting to transfer the same ArrayBuffer twice should throw.
+ assertThrows(function() {
+ var ab3 = createArrayBuffer(4);
+ w.postMessage(ab3, [ab3, ab3]);
+ });
+
assertEquals("undefined", typeof foo);
// Read a message from the worker.
@@ -150,7 +170,7 @@ if (this.Worker) {
// Make sure that the main thread doesn't block forever in getMessage() if
// the worker dies without posting a message.
- var w2 = new Worker('');
+ var w2 = new Worker('', {type: 'string'});
var msg = w2.getMessage();
assertEquals(undefined, msg);
}
diff --git a/deps/v8/test/mjsunit/es6/array-spread-holey.js b/deps/v8/test/mjsunit/es6/array-spread-holey.js
new file mode 100644
index 0000000000..7d95e51b29
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-spread-holey.js
@@ -0,0 +1,52 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test spreading of holey arrays. Holes should be replaced with undefined.
+
+var a = [, 2];
+
+assertEquals([, 2], [...a]);
+assertTrue([...a].hasOwnProperty(0));
+assertTrue([2, ...a].hasOwnProperty(1));
+
+
+class MyArray1 extends Array {
+ constructor(a) {
+ super(...a);
+ }
+}
+var myarr1 = new MyArray1(a);
+assertEquals(undefined, myarr1[0]);
+assertTrue(myarr1.hasOwnProperty(0));
+
+
+class MyArray2 extends Array {
+ constructor(a) {
+ super(2, ...a);
+ }
+}
+var myarr2 = new MyArray2(a);
+assertEquals(undefined, myarr2[1]);
+assertTrue(myarr2.hasOwnProperty(1));
+
+function foo0() { return arguments.hasOwnProperty(0); }
+assertTrue(foo0(...a));
+
+function foo1() { return arguments.hasOwnProperty(1); }
+assertTrue(foo1(2, ...a));
+
+// This test pollutes the Array prototype. No more tests should be run in the
+// same instance after this.
+a.__proto__[0] = 1;
+var arr2 = [...a];
+assertEquals([1,2], arr2);
+assertTrue(arr2.hasOwnProperty(0));
+
+myarr1 = new MyArray1(a);
+assertEquals(1, myarr1[0]);
+assertTrue(myarr1.hasOwnProperty(0));
+
+var myarr2 = new MyArray2(a);
+assertEquals(1, myarr2[1]);
+assertTrue(myarr2.hasOwnProperty(1));
diff --git a/deps/v8/test/mjsunit/es6/proxy-function-tostring.js b/deps/v8/test/mjsunit/es6/proxy-function-tostring.js
index d859822df0..e151bf65b1 100644
--- a/deps/v8/test/mjsunit/es6/proxy-function-tostring.js
+++ b/deps/v8/test/mjsunit/es6/proxy-function-tostring.js
@@ -1,7 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --noharmony-function-tostring
-assertThrows(() => new Proxy(function() {}, {}).toString(), TypeError);
+assertEquals(new Proxy(function() {}, {}).toString(),
+ 'function () { [native code] }');
diff --git a/deps/v8/test/mjsunit/es6/string-iterator2.js b/deps/v8/test/mjsunit/es6/string-iterator2.js
new file mode 100644
index 0000000000..6bfd51a815
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-iterator2.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// Tests for spreading primitive strings.
+
+assertEquals([...''], []);
+
+var str = 'ott';
+assertEquals(['o', 't', 't'], [...str]);
+assertTrue(%StringIteratorProtector());
+
+str[Symbol.iterator] = {};
+// Symbol.iterator can't be set on primitive strings, so it shouldn't invalidate
+// the protector.
+assertTrue(%StringIteratorProtector());
+
+// This changes the String Iterator prototype. No more tests should be run after
+// this in the same instance.
+var iterator = str[Symbol.iterator]();
+iterator.__proto__.next = () => ({value : undefined, done : true});
+
+assertFalse(%StringIteratorProtector());
+assertEquals([], [...str]);
diff --git a/deps/v8/test/mjsunit/es6/string-iterator3.js b/deps/v8/test/mjsunit/es6/string-iterator3.js
new file mode 100644
index 0000000000..1b0e0273e5
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-iterator3.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// Tests for primitive strings.
+
+var str = 'ott';
+assertTrue(%StringIteratorProtector());
+assertEquals(['o', 't', 't'], [...str]);
+
+// This changes the String prototype. No more tests should be run after this in
+// the same instance.
+str.__proto__[Symbol.iterator] =
+ function() {
+ return {next : () => ({value : undefined, done : true})};
+ };
+assertFalse(%StringIteratorProtector());
+assertEquals([], [...str]);
diff --git a/deps/v8/test/mjsunit/es6/string-iterator4.js b/deps/v8/test/mjsunit/es6/string-iterator4.js
new file mode 100644
index 0000000000..48c6521d3b
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-iterator4.js
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// Tests for wrapped strings.
+
+var str = new String('ott');
+assertTrue(%StringIteratorProtector());
+assertEquals(['o', 't', 't'], [...str]);
+
+function iterator_fn() {
+ return {next : () => ({value : undefined, done : true})};
+};
+
+str[Symbol.iterator] = iterator_fn;
+// This shouldn't invalidate the protector, because it doesn't support String
+// objects.
+assertTrue(%StringIteratorProtector());
+assertEquals([], [...str]);
+
+
+var str2 = new String('ott');
+assertEquals(['o', 't', 't'], [...str2]);
+// This changes the String prototype. No more tests should be run after this in
+// the same instance.
+str2.__proto__[Symbol.iterator] = iterator_fn;
+assertFalse(%StringIteratorProtector());
+assertEquals([], [...str2]);
diff --git a/deps/v8/test/mjsunit/es6/string-iterator5.js b/deps/v8/test/mjsunit/es6/string-iterator5.js
new file mode 100644
index 0000000000..ec9754a4bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-iterator5.js
@@ -0,0 +1,15 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Tests for primitive strings.
+
+var iterator = 'ott'[Symbol.iterator]();
+
+// These modifications shouldn't invalidate the String iterator protector.
+iterator.__proto__.fonts = {};
+assertTrue(%StringIteratorProtector());
+iterator.__proto__[0] = 0;
+assertTrue(%StringIteratorProtector());
diff --git a/deps/v8/test/mjsunit/es6/string-iterator6.js b/deps/v8/test/mjsunit/es6/string-iterator6.js
new file mode 100644
index 0000000000..d1cd1f31eb
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-iterator6.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+assertTrue(%StringIteratorProtector());
+
+delete 'ott'.__proto__[Symbol.iterator];
+
+assertFalse(%StringIteratorProtector());
diff --git a/deps/v8/test/mjsunit/es6/string-iterator7.js b/deps/v8/test/mjsunit/es6/string-iterator7.js
new file mode 100644
index 0000000000..387c6e81fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-iterator7.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertTrue(%StringIteratorProtector());
+
+const p = ""[Symbol.iterator]().__proto__;
+let x = Object.create(p);
+x.next = 42;
+
+assertTrue(%StringIteratorProtector());
diff --git a/deps/v8/test/mjsunit/es6/string-iterator8.js b/deps/v8/test/mjsunit/es6/string-iterator8.js
new file mode 100644
index 0000000000..dbd4b7c46a
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-iterator8.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertTrue(%StringIteratorProtector());
+
+var proto = String.prototype;
+
+String.prototype = {};
+
+assertEquals(proto, String.prototype);
+assertTrue(%StringIteratorProtector());
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
index 3b57f8f644..0a55fccf5c 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
@@ -212,7 +212,7 @@ tests.push(function TestFromTypedArraySpeciesNeutersBuffer(constr) {
});
tests.push(function TestLengthIsMaxSmi(constr) {
- var myObject = { 0: 5, 1: 6, length: %_MaxSmi() + 1 };
+ var myObject = { 0: 5, 1: 6, length: %MaxSmi() + 1 };
assertThrows(function() {
new constr(myObject);
@@ -258,7 +258,7 @@ tests.push(function TestOffsetIsUsed(constr) {
});
tests.push(function TestLengthIsNonSmiNegativeNumber(constr) {
- var ta = new constr({length: -%_MaxSmi() - 2});
+ var ta = new constr({length: -%MaxSmi() - 2});
assertEquals(0, ta.length);
});
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js b/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js
index 0a267bc64b..e6cbcc4201 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax --mock-arraybuffer-allocator
(function TestBufferByteLengthNonSmi() {
- var non_smi_byte_length = %_MaxSmi() + 1;
+ var non_smi_byte_length = %MaxSmi() + 1;
var buffer = new ArrayBuffer(non_smi_byte_length);
@@ -20,7 +20,7 @@
})();
(function TestByteOffsetNonSmi() {
- var non_smi_byte_length = %_MaxSmi() + 11;
+ var non_smi_byte_length = %MaxSmi() + 11;
var buffer = new ArrayBuffer(non_smi_byte_length);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js b/deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js
index 1f842878dc..e4a8c2b626 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-set-bytelength-not-smi.js
@@ -5,13 +5,13 @@
// Flags: --allow-natives-syntax --mock-arraybuffer-allocator
(function TestBufferByteLengthNonSmi() {
- const source_buffer_length = %_MaxSmi() + 1;
+ const source_buffer_length = %MaxSmi() + 1;
const source_buffer = new ArrayBuffer(source_buffer_length);
const source = new Uint16Array(source_buffer);
assertEquals(source_buffer_length, source_buffer.byteLength);
assertEquals(source_buffer_length / 2, source.length);
- const target_buffer_length = %_MaxSmi() - 1;
+ const target_buffer_length = %MaxSmi() - 1;
const target_buffer = new ArrayBuffer(target_buffer_length);
const target = new Uint16Array(target_buffer);
assertEquals(target_buffer_length, target_buffer.byteLength);
diff --git a/deps/v8/test/mjsunit/es9/object-spread-ic-dontenum-transition.js b/deps/v8/test/mjsunit/es9/object-spread-ic-dontenum-transition.js
new file mode 100644
index 0000000000..c403566a38
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/object-spread-ic-dontenum-transition.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testMegamorphicWithDontEnumTransition() {
+ function spread(o) { return { ...o }; }
+
+ // Set up transition tree
+ let obj = { ...{}, a: 0, b: 1, c: 2, };
+ Object.defineProperty(obj, "boom", { enumerable: false, configurable: true,
+ writable: true });
+
+ // make CloneObjectIC MEGAMORPHIC
+ spread(new Proxy({}, {}));
+
+ // Ensure we don't crash, and create the correct object
+ let result = spread({ a: 0, b: 1, c: 2, boom: 3 });
+
+ assertEquals({ a: 0, b: 1, c: 2, boom: 3 }, result);
+ assertEquals({
+ enumerable: true,
+ writable: true,
+ configurable: true,
+ value: 3,
+ }, Object.getOwnPropertyDescriptor(result, "boom"));
+})();
diff --git a/deps/v8/test/mjsunit/es9/object-spread-ic-multiple-transitions.js b/deps/v8/test/mjsunit/es9/object-spread-ic-multiple-transitions.js
new file mode 100644
index 0000000000..f31a6d90d6
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/object-spread-ic-multiple-transitions.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testMegamorphicWithNonSimpleTransitionHandler() {
+ function spread(o) { return { ...o }; }
+
+ // Set up transition tree
+ let obj = { ...{}, a: 0, b: 1, boom: 2};
+
+ // make CloneObjectIC MEGAMORPHIC
+ spread(new Proxy({}, {}));
+
+ // Ensure we don't crash, and create the correct object
+ assertEquals({ a: 0, b: 1, c: 2 }, spread({ a: 0, b: 1, c: 2 }));
+})();
diff --git a/deps/v8/test/mjsunit/external-backing-store-gc.js b/deps/v8/test/mjsunit/external-backing-store-gc.js
new file mode 100644
index 0000000000..005ec15e59
--- /dev/null
+++ b/deps/v8/test/mjsunit/external-backing-store-gc.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --mock-arraybuffer-allocator --mock-arraybuffer-allocator-limit=1300000000
+
+// --mock-arraybuffer-allocator-limit should be above the hard limit external
+// for memory. Below that limit anything is opportunistic and may be delayed,
+// e.g., by tasks getting stalled and the event loop not being invoked.
+
+for (var i = 0; i < 1536; i++) {
+ let garbage = new ArrayBuffer(1024*1024);
+}
diff --git a/deps/v8/test/mjsunit/for-in-special-cases.js b/deps/v8/test/mjsunit/for-in-special-cases.js
index b592ad259e..27129e1aac 100644
--- a/deps/v8/test/mjsunit/for-in-special-cases.js
+++ b/deps/v8/test/mjsunit/for-in-special-cases.js
@@ -64,21 +64,15 @@ assertEquals(10, j);
function Accumulate(x) {
- var accumulator = "";
+ var accumulator = [];
for (var i in x) {
- accumulator += i;
+ accumulator.push(i);
}
return accumulator;
}
for (var i = 0; i < 3; ++i) {
- var elements = Accumulate("abcd");
- // We do not assume that for-in enumerates elements in order.
- assertTrue(-1 != elements.indexOf("0"));
- assertTrue(-1 != elements.indexOf("1"));
- assertTrue(-1 != elements.indexOf("2"));
- assertTrue(-1 != elements.indexOf("3"));
- assertEquals(4, elements.length);
+ assertEquals(Accumulate("abcd"), ['0', '1', '2', '3']);
}
function for_in_string_prototype() {
@@ -99,23 +93,51 @@ function for_in_string_prototype() {
// If for-in returns elements in a different order on multiple calls, this
// assert will fail. If that happens, consider if that behavior is OK.
assertEquals(elements, elements1, "For-in elements not the same both times.");
- // We do not assume that for-in enumerates elements in order.
- assertTrue(-1 != elements.indexOf("0"));
- assertTrue(-1 != elements.indexOf("1"));
- assertTrue(-1 != elements.indexOf("2"));
- assertTrue(-1 != elements.indexOf("7"));
- assertTrue(-1 != elements.indexOf("foo"));
- assertTrue(-1 != elements.indexOf("bar"));
- assertTrue(-1 != elements.indexOf("gub"));
- assertEquals(13, elements.length);
-
- elements = Accumulate(x);
- assertTrue(-1 != elements.indexOf("0"));
- assertTrue(-1 != elements.indexOf("1"));
- assertTrue(-1 != elements.indexOf("2"));
- assertTrue(-1 != elements.indexOf("foo"));
- assertEquals(6, elements.length);
+ assertEquals(["7","bar","gub","0","1","2","foo"], elements)
+
+ assertEquals(['0', '1', '2', 'foo'], Accumulate(x))
}
for_in_string_prototype();
for_in_string_prototype();
+
+
+(function for_in_dictionary_prototype_1() {
+ let prototype1 = {prop: 0, prop1: 1};
+ let derived1 = Object.create(null, {
+ prop: {enumerable: false, configurable: true, value: 0},
+ });
+ Object.setPrototypeOf(derived1, prototype1);
+
+ let prototype2 = {prop: 0, prop1: 1};
+ let derived2 = Object.create(prototype2, {
+ prop: {enumerable: false, configurable: true, value: 0},
+ });
+
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['prop1'], Accumulate(derived1));
+ assertEquals(['prop1'], Accumulate(derived2));
+ }
+})();
+
+(function for_in_dictionary_prototype_2() {
+ let prototype1 = {prop: 0, prop1: 1};
+ let derived1 = Object.create(null, {
+ prop: {enumerable: false, configurable: true, value: 1},
+ prop2: {enumerable: true, configurable: true, value: 2},
+ prop3: {enumerable: false, configurable: true, value: 3},
+ });
+ Object.setPrototypeOf(derived1, prototype1);
+
+ let prototype2 = {prop: 0, prop1: 1};
+ let derived2 = Object.create(prototype2, {
+ prop: {enumerable: false, configurable: true, value: 0},
+ prop2: {enumerable: true, configurable: true, value: 2},
+ prop3: {enumerable: false, configurable: true, value: 3},
+ });
+
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['prop2', 'prop1'], Accumulate(derived1));
+ assertEquals(['prop2', 'prop1'], Accumulate(derived2));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/harmony/async-await-optimization.js b/deps/v8/test/mjsunit/harmony/async-await-optimization.js
new file mode 100644
index 0000000000..b24e541916
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-await-optimization.js
@@ -0,0 +1,124 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-await-optimization
+
+// test basic interleaving
+(function () {
+ const actual = [];
+ const expected = [ 'await', 1, 'await', 2 ];
+ const iterations = 2;
+
+ async function pushAwait() {
+ actual.push('await');
+ }
+
+ async function callAsync() {
+ for (let i = 0; i < iterations; i++) {
+ await pushAwait();
+ }
+ return 0;
+ }
+
+ function checkAssertions() {
+ assertArrayEquals(expected, actual,
+ 'Async/await and promises should be interleaved.');
+ }
+
+ assertPromiseResult((async() => {
+ callAsync();
+
+ return new Promise(function (resolve) {
+ actual.push(1);
+ resolve();
+ }).then(function () {
+ actual.push(2);
+ }).then(checkAssertions);
+ })());
+})();
+
+// test async generators
+(function () {
+ const actual = [];
+ const expected = [ 'await', 1, 'await', 2 ];
+ const iterations = 2;
+
+ async function pushAwait() {
+ actual.push('await');
+ }
+
+ async function* callAsync() {
+ for (let i = 0; i < iterations; i++) {
+ await pushAwait();
+ }
+ return 0;
+ }
+
+ function checkAssertions() {
+ assertArrayEquals(expected, actual,
+ 'Async/await and promises should be interleaved when using async generators.');
+ }
+
+ assertPromiseResult((async() => {
+ callAsync().next();
+
+ return new Promise(function (resolve) {
+ actual.push(1);
+ resolve();
+ }).then(function () {
+ actual.push(2);
+ }).then(checkAssertions);
+ })());
+})();
+
+// test yielding from async generators
+(function () {
+ const actual = [];
+ const expected = [
+ 'Promise: 6',
+ 'Promise: 5',
+ 'Await: 3',
+ 'Promise: 4',
+ 'Promise: 3',
+ 'Await: 2',
+ 'Promise: 2',
+ 'Promise: 1',
+ 'Await: 1',
+ 'Promise: 0'
+ ];
+ const iterations = 3;
+
+ async function* naturalNumbers(start) {
+ let current = start;
+ while (current > 0) {
+ yield Promise.resolve(current--);
+ }
+ }
+
+ async function trigger() {
+ for await (const num of naturalNumbers(iterations)) {
+ actual.push('Await: ' + num);
+ }
+ }
+
+ async function checkAssertions() {
+ assertArrayEquals(expected, actual,
+ 'Async/await and promises should be interleaved when yielding.');
+ }
+
+ async function countdown(counter) {
+ actual.push('Promise: ' + counter);
+ if (counter > 0) {
+ return Promise.resolve(counter - 1).then(countdown);
+ } else {
+ await checkAssertions();
+ }
+ }
+
+ assertPromiseResult((async() => {
+ trigger();
+
+ return countdown(iterations * 2);
+ })());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/atomics-notify.js b/deps/v8/test/mjsunit/harmony/atomics-notify.js
new file mode 100644
index 0000000000..cf18321786
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/atomics-notify.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --harmony-sharedarraybuffer
+
+// This test needs to be killed if we remove Atomics.wake.
+assertNotSame(Atomics.wake, Atomics.notify);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-value-check.js b/deps/v8/test/mjsunit/harmony/atomics-value-check.js
index b953863daf..053bc6dfc5 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-value-check.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-value-check.js
@@ -12,7 +12,7 @@ var workerScript =
`onmessage=function(msg) {
postMessage(0);
};`;
-var worker = new Worker(workerScript);
+var worker = new Worker(workerScript, {type: 'string'});
var value_obj = {
valueOf: function() {worker.postMessage({sab:sab}, [sta.buffer]);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/add.js b/deps/v8/test/mjsunit/harmony/bigint/add.js
index 5e986b3726..791db6a3b9 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/add.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/add.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: -0xc4043e2c4cc49e4d6870103ce7c2ff2d512bf4b1b67553ba410db514ee0af8888ad6cfn,
b: 0x2aae86de73ff479133a657a40d26e8dcf192019c7421836615ec34978bad93n,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/and.js b/deps/v8/test/mjsunit/harmony/bigint/and.js
index 7a68f8b3dc..a90ec22f51 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/and.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/and.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0x9252b94f220ded0c18706998886397699c5a25527575dn,
b: -0x286817ba2e8fd8n,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
index 51b5073d24..154a0929e5 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint
-
// BigInt.asIntN
{
assertEquals(2, BigInt.asIntN.length);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/basics.js b/deps/v8/test/mjsunit/harmony/bigint/basics.js
index b6318d5324..0368c69b52 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/basics.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/basics.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
'use strict'
diff --git a/deps/v8/test/mjsunit/harmony/bigint/comparisons.js b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
index 73eb24d687..abc7a8082a 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
'use strict'
@@ -297,18 +297,6 @@ const six = BigInt(6);
assertTrue(Reflect.defineProperty(obj, 'foo', {value: zero}));
assertTrue(Reflect.defineProperty(obj, 'foo', {value: another_zero}));
assertFalse(Reflect.defineProperty(obj, 'foo', {value: one}));
-}{
- assertTrue(%SameValue(zero, zero));
- assertTrue(%SameValue(zero, another_zero));
-
- assertFalse(%SameValue(zero, +0));
- assertFalse(%SameValue(zero, -0));
-
- assertFalse(%SameValue(+0, zero));
- assertFalse(%SameValue(-0, zero));
-
- assertTrue(%SameValue(one, one));
- assertTrue(%SameValue(one, another_one));
}
// SameValueZero
@@ -351,18 +339,6 @@ const six = BigInt(6);
assertTrue(new Map([[one, 42]]).has(one));
assertTrue(new Map([[one, 42]]).has(another_one));
-}{
- assertTrue(%SameValueZero(zero, zero));
- assertTrue(%SameValueZero(zero, another_zero));
-
- assertFalse(%SameValueZero(zero, +0));
- assertFalse(%SameValueZero(zero, -0));
-
- assertFalse(%SameValueZero(+0, zero));
- assertFalse(%SameValueZero(-0, zero));
-
- assertTrue(%SameValueZero(one, one));
- assertTrue(%SameValueZero(one, another_one));
}
// Abstract comparison
diff --git a/deps/v8/test/mjsunit/harmony/bigint/dataview.js b/deps/v8/test/mjsunit/harmony/bigint/dataview.js
index 5ead649909..bad56d2b69 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/dataview.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/dataview.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint
-
var buffer = new ArrayBuffer(64);
var dataview = new DataView(buffer, 8, 24);
var bytes = new Uint8Array(buffer);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/dec.js b/deps/v8/test/mjsunit/harmony/bigint/dec.js
index ddb0431cba..36ca2193de 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/dec.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/dec.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0x26ffcdbd233a53e7ca4612f2b02e1f2c1d885c3177e7n,
r: 0x26ffcdbd233a53e7ca4612f2b02e1f2c1d885c3177e6n
diff --git a/deps/v8/test/mjsunit/harmony/bigint/div.js b/deps/v8/test/mjsunit/harmony/bigint/div.js
index 1eeea1184f..8b167140de 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/div.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/div.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: -0x1e0f357314bac34227333c0c2086430dae88cb538f161174888591n,
b: 0x390n,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/exp.js b/deps/v8/test/mjsunit/harmony/bigint/exp.js
index 54d5849373..7fbc2dc402 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/exp.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/exp.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
assertEquals(1n, (-1n) ** 0n);
assertEquals(-1n, (-1n) ** 1n);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/inc.js b/deps/v8/test/mjsunit/harmony/bigint/inc.js
index 4ead89e1bf..7842600393 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/inc.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/inc.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0xb3df90n,
r: 0xb3df91n
diff --git a/deps/v8/test/mjsunit/harmony/bigint/json.js b/deps/v8/test/mjsunit/harmony/bigint/json.js
index eb0eefc4bb..cf392234c5 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/json.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/json.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
'use strict'
diff --git a/deps/v8/test/mjsunit/harmony/bigint/mod.js b/deps/v8/test/mjsunit/harmony/bigint/mod.js
index c8cc7fa4fd..01f64ad4ca 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/mod.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/mod.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0xaed3c714bb42a73d708bcf1dc9a9deebadc913ef42bac6a6178a60n,
b: -0xf3d6bd1c059b79n,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/mul.js b/deps/v8/test/mjsunit/harmony/bigint/mul.js
index c6a9ae6148..77c3a1c9bb 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/mul.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/mul.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0x2bf1f236c2df29f7c99be052dfe1b69ae158d777fea487af889f6259f472c0n,
b: -0xae0090dfn,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/neg.js b/deps/v8/test/mjsunit/harmony/bigint/neg.js
index 2fedf297a5..15b2fb4ee0 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/neg.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/neg.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0xcn,
r: -0xcn
diff --git a/deps/v8/test/mjsunit/harmony/bigint/not.js b/deps/v8/test/mjsunit/harmony/bigint/not.js
index 6b4b2eb713..27b6a78ba6 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/not.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/not.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0x9f0305cd75e4n,
r: -0x9f0305cd75e5n
diff --git a/deps/v8/test/mjsunit/harmony/bigint/or.js b/deps/v8/test/mjsunit/harmony/bigint/or.js
index c378e141cd..3203258c21 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/or.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/or.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0x77a87n,
b: 0xde08e7433fb9584911b8cb4bc7eed802299b4489fc635974d063847da4e8b461df5dn,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js b/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
index 4dedf4d27c..3bf0148c95 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
function f(x, b) {
if (b) return Math.trunc(+(x))
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regressions.js b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
index c1df45a1b1..8e13622eab 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/regressions.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint
-
var a = 5n;
var b = a / -1n;
assertEquals(5n, a);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/sar.js b/deps/v8/test/mjsunit/harmony/bigint/sar.js
index f66115dcb6..66d2f2d268 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/sar.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/sar.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0x211a34fn,
b: 0xa6n,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/shl.js b/deps/v8/test/mjsunit/harmony/bigint/shl.js
index bedd785b54..0e7b402bc1 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/shl.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/shl.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: -0xe813d76adc0a177778c0c232c595e8572b783210f4a7009d7c1787n,
b: 0x9en,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/sub.js b/deps/v8/test/mjsunit/harmony/bigint/sub.js
index a1ff9b4bb3..21613f768a 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/sub.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/sub.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: 0xc4fd438551d58edn,
b: 0x91b42ee55a50d974an,
diff --git a/deps/v8/test/mjsunit/harmony/bigint/tonumber.js b/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
index d59e8429b8..a6f7d13b7e 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/tonumber.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint
-
function Check(bigint, number_string) {
var number = Number(bigint);
if (number_string.substring(0, 2) === "0x") {
diff --git a/deps/v8/test/mjsunit/harmony/bigint/turbo.js b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
index 4ce4880f3d..d0f00050c8 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/turbo.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-bigint
+// Flags: --allow-natives-syntax
'use strict'
diff --git a/deps/v8/test/mjsunit/harmony/bigint/typedarray.js b/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
index 29713b8a20..e530441dd4 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint --allow-natives-syntax
+// Flags: --allow-natives-syntax
var intarray = new BigInt64Array(8);
var uintarray = new BigUint64Array(8);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/xor.js b/deps/v8/test/mjsunit/harmony/bigint/xor.js
index a934825bd9..cf32b65603 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/xor.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/xor.js
@@ -4,8 +4,6 @@
// Generated by tools/bigint-tester.py.
-// Flags: --harmony-bigint
-
var data = [{
a: -0x46505bec40d461c595b5e4be178b7d00n,
b: -0x9170e5437d4e3ec7c0971e2c6d3bbbd2929ff108ea4ee64f7a91aa367fn,
diff --git a/deps/v8/test/mjsunit/harmony/bigintarray-keyedstore-tobigint.js b/deps/v8/test/mjsunit/harmony/bigintarray-keyedstore-tobigint.js
index 29b44472c9..18ba0ff171 100644
--- a/deps/v8/test/mjsunit/harmony/bigintarray-keyedstore-tobigint.js
+++ b/deps/v8/test/mjsunit/harmony/bigintarray-keyedstore-tobigint.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-bigint
-
let TypedArrayConstructors = [
BigUint64Array,
BigInt64Array,
diff --git a/deps/v8/test/mjsunit/harmony/function-tostring.js b/deps/v8/test/mjsunit/harmony/function-tostring.js
index 4a7e93cd3b..2af14f16cf 100644
--- a/deps/v8/test/mjsunit/harmony/function-tostring.js
+++ b/deps/v8/test/mjsunit/harmony/function-tostring.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-function-tostring
-
var prefix = "/*before*/";
var suffix = "/*after*/";
diff --git a/deps/v8/test/mjsunit/harmony/futex.js b/deps/v8/test/mjsunit/harmony/futex.js
index 394b4ddaf0..188832cf3c 100644
--- a/deps/v8/test/mjsunit/harmony/futex.js
+++ b/deps/v8/test/mjsunit/harmony/futex.js
@@ -133,7 +133,7 @@ if (this.Worker) {
postMessage(result);
};`;
- var worker = new Worker(workerScript);
+ var worker = new Worker(workerScript, {type: 'string'});
worker.postMessage({sab: sab, offset: offset});
// Spin until the worker is waiting on the futex.
@@ -143,7 +143,7 @@ if (this.Worker) {
assertEquals("ok", worker.getMessage());
worker.terminate();
- var worker2 = new Worker(workerScript);
+ var worker2 = new Worker(workerScript, {type: 'string'});
var offset = 8;
var i32a2 = new Int32Array(sab, offset);
worker2.postMessage({sab: sab, offset: offset});
@@ -156,7 +156,7 @@ if (this.Worker) {
// Futex should work when index and buffer views are different, but
// the real address is the same.
- var worker3 = new Worker(workerScript);
+ var worker3 = new Worker(workerScript, {type: 'string'});
i32a2 = new Int32Array(sab, 4);
worker3.postMessage({sab: sab, offset: 8});
@@ -205,7 +205,7 @@ if (this.Worker) {
var id;
var workers = [];
for (id = 0; id < 4; id++) {
- workers[id] = new Worker(workerScript);
+ workers[id] = new Worker(workerScript, {type: 'string'});
workers[id].postMessage({sab: sab, id: id});
}
diff --git a/deps/v8/test/mjsunit/harmony/global.js b/deps/v8/test/mjsunit/harmony/global.js
index 733b95312b..3d43864c47 100644
--- a/deps/v8/test/mjsunit/harmony/global.js
+++ b/deps/v8/test/mjsunit/harmony/global.js
@@ -13,7 +13,7 @@ assertEquals(globalThis.globalThis.globalThis.globalThis, this);
{
const realm = Realm.create();
assertEquals(Realm.global(realm), Realm.eval(realm, 'globalThis'));
- assertTrue(Realm.global(realm) !== globalThis);
+ assertNotEquals(Realm.global(realm), globalThis);
}
{
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-13.js b/deps/v8/test/mjsunit/harmony/modules-import-13.js
index 40016246f0..1cec1cce61 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-13.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-13.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-dynamic-import
+// Resources: test/mjsunit/harmony/modules-skip-1.js
ran = false;
async function test1() {
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js
new file mode 100644
index 0000000000..e033c722c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js
@@ -0,0 +1,266 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-sequence
+
+const re = /\p{Emoji_Flag_Sequence}/u;
+
+assertTrue(re.test('\u{1F1E6}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1FF}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1F6}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1FD}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1E7}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1EF}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F6}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1FB}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1E7}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1F5}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1FB}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1FD}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1E8}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1E9}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1E9}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1E9}\u{1F1EF}'));
+assertTrue(re.test('\u{1F1E9}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1E9}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1E9}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1E9}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1EA}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1EB}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1EB}\u{1F1EF}'));
+assertTrue(re.test('\u{1F1EB}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1EB}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1EB}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1EB}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1E7}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F5}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F6}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1EC}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1ED}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1ED}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1ED}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1ED}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1ED}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1ED}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F6}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1EE}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1EF}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1EF}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1EF}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1EF}\u{1F1F5}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1F5}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1E6}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1E7}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1FB}'));
+assertTrue(re.test('\u{1F1F1}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F5}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F6}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1FB}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1FD}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1F2}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1F5}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1F3}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1F4}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1F5}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1F6}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1F7}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1F7}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1F7}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1F7}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1F7}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1E7}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1EF}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1FB}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1FD}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1F8}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1E9}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1ED}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1EF}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1F1}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1F4}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1F7}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1FB}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1FC}'));
+assertTrue(re.test('\u{1F1F9}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1FA}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1FA}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1FA}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1FA}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1FA}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1FA}\u{1F1FE}'));
+assertTrue(re.test('\u{1F1FA}\u{1F1FF}'));
+assertTrue(re.test('\u{1F1FB}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1FB}\u{1F1E8}'));
+assertTrue(re.test('\u{1F1FB}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1FB}\u{1F1EC}'));
+assertTrue(re.test('\u{1F1FB}\u{1F1EE}'));
+assertTrue(re.test('\u{1F1FB}\u{1F1F3}'));
+assertTrue(re.test('\u{1F1FB}\u{1F1FA}'));
+assertTrue(re.test('\u{1F1FC}\u{1F1EB}'));
+assertTrue(re.test('\u{1F1FC}\u{1F1F8}'));
+assertTrue(re.test('\u{1F1FD}\u{1F1F0}'));
+assertTrue(re.test('\u{1F1FE}\u{1F1EA}'));
+assertTrue(re.test('\u{1F1FE}\u{1F1F9}'));
+assertTrue(re.test('\u{1F1FF}\u{1F1E6}'));
+assertTrue(re.test('\u{1F1FF}\u{1F1F2}'));
+assertTrue(re.test('\u{1F1F0}\u{1F1FE}'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js
new file mode 100644
index 0000000000..8366a395ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-sequence
+
+const re = /\p{Emoji_Keycap_Sequence}/u;
+
+assertTrue(re.test('#\uFE0F\u20E3'));
+assertTrue(re.test('9\uFE0F\u20E3'));
+assertTrue(re.test('0\uFE0F\u20E3'));
+assertTrue(re.test('1\uFE0F\u20E3'));
+assertTrue(re.test('2\uFE0F\u20E3'));
+assertTrue(re.test('3\uFE0F\u20E3'));
+assertTrue(re.test('*\uFE0F\u20E3'));
+assertTrue(re.test('5\uFE0F\u20E3'));
+assertTrue(re.test('6\uFE0F\u20E3'));
+assertTrue(re.test('7\uFE0F\u20E3'));
+assertTrue(re.test('8\uFE0F\u20E3'));
+assertTrue(re.test('4\uFE0F\u20E3'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js
new file mode 100644
index 0000000000..0e11d6c462
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js
@@ -0,0 +1,538 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-sequence
+
+const re = /\p{Emoji_Modifier_Sequence}/u;
+
+assertTrue(re.test('\u261D\u{1F3FB}'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FF}'));
+assertTrue(re.test('\u261D\u{1F3FD}'));
+assertTrue(re.test('\u261D\u{1F3FE}'));
+assertTrue(re.test('\u261D\u{1F3FF}'));
+assertTrue(re.test('\u26F9\u{1F3FB}'));
+assertTrue(re.test('\u26F9\u{1F3FC}'));
+assertTrue(re.test('\u26F9\u{1F3FD}'));
+assertTrue(re.test('\u26F9\u{1F3FE}'));
+assertTrue(re.test('\u26F9\u{1F3FF}'));
+assertTrue(re.test('\u270A\u{1F3FB}'));
+assertTrue(re.test('\u270A\u{1F3FC}'));
+assertTrue(re.test('\u270A\u{1F3FD}'));
+assertTrue(re.test('\u270A\u{1F3FE}'));
+assertTrue(re.test('\u270A\u{1F3FF}'));
+assertTrue(re.test('\u270B\u{1F3FB}'));
+assertTrue(re.test('\u270B\u{1F3FC}'));
+assertTrue(re.test('\u270B\u{1F3FD}'));
+assertTrue(re.test('\u270B\u{1F3FE}'));
+assertTrue(re.test('\u270B\u{1F3FF}'));
+assertTrue(re.test('\u270C\u{1F3FB}'));
+assertTrue(re.test('\u270C\u{1F3FC}'));
+assertTrue(re.test('\u270C\u{1F3FD}'));
+assertTrue(re.test('\u270C\u{1F3FE}'));
+assertTrue(re.test('\u270C\u{1F3FF}'));
+assertTrue(re.test('\u270D\u{1F3FB}'));
+assertTrue(re.test('\u270D\u{1F3FC}'));
+assertTrue(re.test('\u270D\u{1F3FD}'));
+assertTrue(re.test('\u270D\u{1F3FE}'));
+assertTrue(re.test('\u270D\u{1F3FF}'));
+assertTrue(re.test('\u{1F385}\u{1F3FB}'));
+assertTrue(re.test('\u{1F385}\u{1F3FC}'));
+assertTrue(re.test('\u{1F385}\u{1F3FD}'));
+assertTrue(re.test('\u{1F385}\u{1F3FE}'));
+assertTrue(re.test('\u{1F385}\u{1F3FF}'));
+assertTrue(re.test('\u{1F3C2}\u{1F3FB}'));
+assertTrue(re.test('\u{1F3C2}\u{1F3FC}'));
+assertTrue(re.test('\u{1F3C2}\u{1F3FD}'));
+assertTrue(re.test('\u{1F3C2}\u{1F3FE}'));
+assertTrue(re.test('\u{1F3C2}\u{1F3FF}'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FB}'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FC}'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FD}'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FE}'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FF}'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FB}'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FC}'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FD}'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FE}'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FF}'));
+assertTrue(re.test('\u{1F3C7}\u{1F3FB}'));
+assertTrue(re.test('\u{1F3C7}\u{1F3FC}'));
+assertTrue(re.test('\u{1F3C7}\u{1F3FD}'));
+assertTrue(re.test('\u{1F3C7}\u{1F3FE}'));
+assertTrue(re.test('\u{1F3C7}\u{1F3FF}'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FB}'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FC}'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FD}'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FE}'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FF}'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FB}'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FC}'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FD}'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FE}'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FF}'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FB}'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FC}'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FD}'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FE}'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FF}'));
+assertTrue(re.test('\u{1F442}\u{1F3FB}'));
+assertTrue(re.test('\u{1F442}\u{1F3FC}'));
+assertTrue(re.test('\u{1F442}\u{1F3FD}'));
+assertTrue(re.test('\u{1F442}\u{1F3FE}'));
+assertTrue(re.test('\u{1F442}\u{1F3FF}'));
+assertTrue(re.test('\u{1F443}\u{1F3FB}'));
+assertTrue(re.test('\u{1F443}\u{1F3FC}'));
+assertTrue(re.test('\u{1F443}\u{1F3FD}'));
+assertTrue(re.test('\u{1F443}\u{1F3FE}'));
+assertTrue(re.test('\u{1F443}\u{1F3FF}'));
+assertTrue(re.test('\u{1F446}\u{1F3FB}'));
+assertTrue(re.test('\u{1F446}\u{1F3FC}'));
+assertTrue(re.test('\u{1F446}\u{1F3FD}'));
+assertTrue(re.test('\u{1F446}\u{1F3FE}'));
+assertTrue(re.test('\u{1F446}\u{1F3FF}'));
+assertTrue(re.test('\u{1F447}\u{1F3FB}'));
+assertTrue(re.test('\u{1F447}\u{1F3FC}'));
+assertTrue(re.test('\u{1F447}\u{1F3FD}'));
+assertTrue(re.test('\u{1F447}\u{1F3FE}'));
+assertTrue(re.test('\u{1F447}\u{1F3FF}'));
+assertTrue(re.test('\u{1F448}\u{1F3FB}'));
+assertTrue(re.test('\u{1F448}\u{1F3FC}'));
+assertTrue(re.test('\u{1F448}\u{1F3FD}'));
+assertTrue(re.test('\u{1F448}\u{1F3FE}'));
+assertTrue(re.test('\u{1F448}\u{1F3FF}'));
+assertTrue(re.test('\u{1F449}\u{1F3FB}'));
+assertTrue(re.test('\u{1F449}\u{1F3FC}'));
+assertTrue(re.test('\u{1F449}\u{1F3FD}'));
+assertTrue(re.test('\u{1F449}\u{1F3FE}'));
+assertTrue(re.test('\u{1F449}\u{1F3FF}'));
+assertTrue(re.test('\u{1F44A}\u{1F3FB}'));
+assertTrue(re.test('\u{1F44A}\u{1F3FC}'));
+assertTrue(re.test('\u{1F44A}\u{1F3FD}'));
+assertTrue(re.test('\u{1F44A}\u{1F3FE}'));
+assertTrue(re.test('\u{1F44A}\u{1F3FF}'));
+assertTrue(re.test('\u{1F44B}\u{1F3FB}'));
+assertTrue(re.test('\u{1F44B}\u{1F3FC}'));
+assertTrue(re.test('\u{1F44B}\u{1F3FD}'));
+assertTrue(re.test('\u{1F44B}\u{1F3FE}'));
+assertTrue(re.test('\u{1F44B}\u{1F3FF}'));
+assertTrue(re.test('\u{1F44C}\u{1F3FB}'));
+assertTrue(re.test('\u{1F44C}\u{1F3FC}'));
+assertTrue(re.test('\u{1F44C}\u{1F3FD}'));
+assertTrue(re.test('\u{1F44C}\u{1F3FE}'));
+assertTrue(re.test('\u{1F44C}\u{1F3FF}'));
+assertTrue(re.test('\u{1F44D}\u{1F3FB}'));
+assertTrue(re.test('\u{1F44D}\u{1F3FC}'));
+assertTrue(re.test('\u{1F44D}\u{1F3FD}'));
+assertTrue(re.test('\u{1F44D}\u{1F3FE}'));
+assertTrue(re.test('\u{1F44D}\u{1F3FF}'));
+assertTrue(re.test('\u{1F44E}\u{1F3FB}'));
+assertTrue(re.test('\u{1F44E}\u{1F3FC}'));
+assertTrue(re.test('\u{1F44E}\u{1F3FD}'));
+assertTrue(re.test('\u{1F44E}\u{1F3FE}'));
+assertTrue(re.test('\u{1F44E}\u{1F3FF}'));
+assertTrue(re.test('\u{1F44F}\u{1F3FB}'));
+assertTrue(re.test('\u{1F44F}\u{1F3FC}'));
+assertTrue(re.test('\u{1F44F}\u{1F3FD}'));
+assertTrue(re.test('\u{1F44F}\u{1F3FE}'));
+assertTrue(re.test('\u{1F44F}\u{1F3FF}'));
+assertTrue(re.test('\u{1F450}\u{1F3FB}'));
+assertTrue(re.test('\u{1F450}\u{1F3FC}'));
+assertTrue(re.test('\u{1F450}\u{1F3FD}'));
+assertTrue(re.test('\u{1F450}\u{1F3FE}'));
+assertTrue(re.test('\u{1F450}\u{1F3FF}'));
+assertTrue(re.test('\u{1F466}\u{1F3FB}'));
+assertTrue(re.test('\u{1F466}\u{1F3FC}'));
+assertTrue(re.test('\u{1F466}\u{1F3FD}'));
+assertTrue(re.test('\u{1F466}\u{1F3FE}'));
+assertTrue(re.test('\u{1F466}\u{1F3FF}'));
+assertTrue(re.test('\u{1F467}\u{1F3FB}'));
+assertTrue(re.test('\u{1F467}\u{1F3FC}'));
+assertTrue(re.test('\u{1F467}\u{1F3FD}'));
+assertTrue(re.test('\u{1F467}\u{1F3FE}'));
+assertTrue(re.test('\u{1F467}\u{1F3FF}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}'));
+assertTrue(re.test('\u{1F46E}\u{1F3FB}'));
+assertTrue(re.test('\u{1F46E}\u{1F3FC}'));
+assertTrue(re.test('\u{1F46E}\u{1F3FD}'));
+assertTrue(re.test('\u{1F46E}\u{1F3FE}'));
+assertTrue(re.test('\u{1F46E}\u{1F3FF}'));
+assertTrue(re.test('\u{1F470}\u{1F3FB}'));
+assertTrue(re.test('\u{1F470}\u{1F3FC}'));
+assertTrue(re.test('\u{1F470}\u{1F3FD}'));
+assertTrue(re.test('\u{1F470}\u{1F3FE}'));
+assertTrue(re.test('\u{1F470}\u{1F3FF}'));
+assertTrue(re.test('\u{1F471}\u{1F3FB}'));
+assertTrue(re.test('\u{1F471}\u{1F3FC}'));
+assertTrue(re.test('\u{1F471}\u{1F3FD}'));
+assertTrue(re.test('\u{1F471}\u{1F3FE}'));
+assertTrue(re.test('\u{1F471}\u{1F3FF}'));
+assertTrue(re.test('\u{1F472}\u{1F3FB}'));
+assertTrue(re.test('\u{1F472}\u{1F3FC}'));
+assertTrue(re.test('\u{1F472}\u{1F3FD}'));
+assertTrue(re.test('\u{1F472}\u{1F3FE}'));
+assertTrue(re.test('\u{1F472}\u{1F3FF}'));
+assertTrue(re.test('\u{1F473}\u{1F3FB}'));
+assertTrue(re.test('\u{1F473}\u{1F3FC}'));
+assertTrue(re.test('\u{1F473}\u{1F3FD}'));
+assertTrue(re.test('\u{1F473}\u{1F3FE}'));
+assertTrue(re.test('\u{1F473}\u{1F3FF}'));
+assertTrue(re.test('\u{1F474}\u{1F3FB}'));
+assertTrue(re.test('\u{1F474}\u{1F3FC}'));
+assertTrue(re.test('\u{1F474}\u{1F3FD}'));
+assertTrue(re.test('\u{1F474}\u{1F3FE}'));
+assertTrue(re.test('\u{1F474}\u{1F3FF}'));
+assertTrue(re.test('\u{1F475}\u{1F3FB}'));
+assertTrue(re.test('\u{1F475}\u{1F3FC}'));
+assertTrue(re.test('\u{1F475}\u{1F3FD}'));
+assertTrue(re.test('\u{1F475}\u{1F3FE}'));
+assertTrue(re.test('\u{1F475}\u{1F3FF}'));
+assertTrue(re.test('\u{1F476}\u{1F3FB}'));
+assertTrue(re.test('\u{1F476}\u{1F3FC}'));
+assertTrue(re.test('\u{1F476}\u{1F3FD}'));
+assertTrue(re.test('\u{1F476}\u{1F3FE}'));
+assertTrue(re.test('\u{1F476}\u{1F3FF}'));
+assertTrue(re.test('\u{1F477}\u{1F3FB}'));
+assertTrue(re.test('\u{1F477}\u{1F3FC}'));
+assertTrue(re.test('\u{1F477}\u{1F3FD}'));
+assertTrue(re.test('\u{1F477}\u{1F3FE}'));
+assertTrue(re.test('\u{1F477}\u{1F3FF}'));
+assertTrue(re.test('\u{1F478}\u{1F3FB}'));
+assertTrue(re.test('\u{1F478}\u{1F3FC}'));
+assertTrue(re.test('\u{1F478}\u{1F3FD}'));
+assertTrue(re.test('\u{1F478}\u{1F3FE}'));
+assertTrue(re.test('\u{1F478}\u{1F3FF}'));
+assertTrue(re.test('\u{1F47C}\u{1F3FB}'));
+assertTrue(re.test('\u{1F47C}\u{1F3FC}'));
+assertTrue(re.test('\u{1F47C}\u{1F3FD}'));
+assertTrue(re.test('\u{1F47C}\u{1F3FE}'));
+assertTrue(re.test('\u{1F47C}\u{1F3FF}'));
+assertTrue(re.test('\u{1F481}\u{1F3FB}'));
+assertTrue(re.test('\u{1F481}\u{1F3FC}'));
+assertTrue(re.test('\u{1F481}\u{1F3FD}'));
+assertTrue(re.test('\u{1F481}\u{1F3FE}'));
+assertTrue(re.test('\u{1F481}\u{1F3FF}'));
+assertTrue(re.test('\u{1F482}\u{1F3FB}'));
+assertTrue(re.test('\u{1F482}\u{1F3FC}'));
+assertTrue(re.test('\u{1F482}\u{1F3FD}'));
+assertTrue(re.test('\u{1F482}\u{1F3FE}'));
+assertTrue(re.test('\u{1F482}\u{1F3FF}'));
+assertTrue(re.test('\u{1F483}\u{1F3FB}'));
+assertTrue(re.test('\u{1F483}\u{1F3FC}'));
+assertTrue(re.test('\u{1F483}\u{1F3FD}'));
+assertTrue(re.test('\u{1F483}\u{1F3FE}'));
+assertTrue(re.test('\u{1F483}\u{1F3FF}'));
+assertTrue(re.test('\u{1F485}\u{1F3FB}'));
+assertTrue(re.test('\u{1F485}\u{1F3FC}'));
+assertTrue(re.test('\u{1F485}\u{1F3FD}'));
+assertTrue(re.test('\u{1F485}\u{1F3FE}'));
+assertTrue(re.test('\u{1F485}\u{1F3FF}'));
+assertTrue(re.test('\u{1F486}\u{1F3FB}'));
+assertTrue(re.test('\u{1F486}\u{1F3FC}'));
+assertTrue(re.test('\u{1F486}\u{1F3FD}'));
+assertTrue(re.test('\u{1F486}\u{1F3FE}'));
+assertTrue(re.test('\u{1F486}\u{1F3FF}'));
+assertTrue(re.test('\u{1F487}\u{1F3FB}'));
+assertTrue(re.test('\u{1F487}\u{1F3FC}'));
+assertTrue(re.test('\u{1F487}\u{1F3FD}'));
+assertTrue(re.test('\u{1F487}\u{1F3FE}'));
+assertTrue(re.test('\u{1F487}\u{1F3FF}'));
+assertTrue(re.test('\u{1F4AA}\u{1F3FB}'));
+assertTrue(re.test('\u{1F4AA}\u{1F3FC}'));
+assertTrue(re.test('\u{1F4AA}\u{1F3FD}'));
+assertTrue(re.test('\u{1F4AA}\u{1F3FE}'));
+assertTrue(re.test('\u{1F4AA}\u{1F3FF}'));
+assertTrue(re.test('\u{1F574}\u{1F3FB}'));
+assertTrue(re.test('\u{1F574}\u{1F3FC}'));
+assertTrue(re.test('\u{1F574}\u{1F3FD}'));
+assertTrue(re.test('\u{1F574}\u{1F3FE}'));
+assertTrue(re.test('\u{1F574}\u{1F3FF}'));
+assertTrue(re.test('\u{1F575}\u{1F3FB}'));
+assertTrue(re.test('\u{1F575}\u{1F3FC}'));
+assertTrue(re.test('\u{1F575}\u{1F3FD}'));
+assertTrue(re.test('\u{1F575}\u{1F3FE}'));
+assertTrue(re.test('\u{1F575}\u{1F3FF}'));
+assertTrue(re.test('\u{1F57A}\u{1F3FB}'));
+assertTrue(re.test('\u{1F57A}\u{1F3FC}'));
+assertTrue(re.test('\u{1F57A}\u{1F3FD}'));
+assertTrue(re.test('\u{1F57A}\u{1F3FE}'));
+assertTrue(re.test('\u{1F57A}\u{1F3FF}'));
+assertTrue(re.test('\u{1F590}\u{1F3FB}'));
+assertTrue(re.test('\u{1F590}\u{1F3FC}'));
+assertTrue(re.test('\u{1F590}\u{1F3FD}'));
+assertTrue(re.test('\u{1F590}\u{1F3FE}'));
+assertTrue(re.test('\u{1F590}\u{1F3FF}'));
+assertTrue(re.test('\u261D\u{1F3FC}'));
+assertTrue(re.test('\u{1F595}\u{1F3FC}'));
+assertTrue(re.test('\u{1F595}\u{1F3FD}'));
+assertTrue(re.test('\u{1F595}\u{1F3FE}'));
+assertTrue(re.test('\u{1F595}\u{1F3FF}'));
+assertTrue(re.test('\u{1F596}\u{1F3FB}'));
+assertTrue(re.test('\u{1F596}\u{1F3FC}'));
+assertTrue(re.test('\u{1F596}\u{1F3FD}'));
+assertTrue(re.test('\u{1F596}\u{1F3FE}'));
+assertTrue(re.test('\u{1F596}\u{1F3FF}'));
+assertTrue(re.test('\u{1F645}\u{1F3FB}'));
+assertTrue(re.test('\u{1F645}\u{1F3FC}'));
+assertTrue(re.test('\u{1F645}\u{1F3FD}'));
+assertTrue(re.test('\u{1F645}\u{1F3FE}'));
+assertTrue(re.test('\u{1F645}\u{1F3FF}'));
+assertTrue(re.test('\u{1F646}\u{1F3FB}'));
+assertTrue(re.test('\u{1F646}\u{1F3FC}'));
+assertTrue(re.test('\u{1F646}\u{1F3FD}'));
+assertTrue(re.test('\u{1F646}\u{1F3FE}'));
+assertTrue(re.test('\u{1F646}\u{1F3FF}'));
+assertTrue(re.test('\u{1F647}\u{1F3FB}'));
+assertTrue(re.test('\u{1F647}\u{1F3FC}'));
+assertTrue(re.test('\u{1F647}\u{1F3FD}'));
+assertTrue(re.test('\u{1F647}\u{1F3FE}'));
+assertTrue(re.test('\u{1F647}\u{1F3FF}'));
+assertTrue(re.test('\u{1F64B}\u{1F3FB}'));
+assertTrue(re.test('\u{1F64B}\u{1F3FC}'));
+assertTrue(re.test('\u{1F64B}\u{1F3FD}'));
+assertTrue(re.test('\u{1F64B}\u{1F3FE}'));
+assertTrue(re.test('\u{1F64B}\u{1F3FF}'));
+assertTrue(re.test('\u{1F64C}\u{1F3FB}'));
+assertTrue(re.test('\u{1F64C}\u{1F3FC}'));
+assertTrue(re.test('\u{1F64C}\u{1F3FD}'));
+assertTrue(re.test('\u{1F64C}\u{1F3FE}'));
+assertTrue(re.test('\u{1F64C}\u{1F3FF}'));
+assertTrue(re.test('\u{1F64D}\u{1F3FB}'));
+assertTrue(re.test('\u{1F64D}\u{1F3FC}'));
+assertTrue(re.test('\u{1F64D}\u{1F3FD}'));
+assertTrue(re.test('\u{1F64D}\u{1F3FE}'));
+assertTrue(re.test('\u{1F64D}\u{1F3FF}'));
+assertTrue(re.test('\u{1F64E}\u{1F3FB}'));
+assertTrue(re.test('\u{1F64E}\u{1F3FC}'));
+assertTrue(re.test('\u{1F64E}\u{1F3FD}'));
+assertTrue(re.test('\u{1F64E}\u{1F3FE}'));
+assertTrue(re.test('\u{1F64E}\u{1F3FF}'));
+assertTrue(re.test('\u{1F64F}\u{1F3FB}'));
+assertTrue(re.test('\u{1F64F}\u{1F3FC}'));
+assertTrue(re.test('\u{1F64F}\u{1F3FD}'));
+assertTrue(re.test('\u{1F64F}\u{1F3FE}'));
+assertTrue(re.test('\u{1F64F}\u{1F3FF}'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FB}'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FC}'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FD}'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FE}'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FF}'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FB}'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FC}'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FD}'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FE}'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FF}'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FB}'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FC}'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FD}'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FE}'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FF}'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FB}'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FC}'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FD}'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FE}'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FF}'));
+assertTrue(re.test('\u{1F6C0}\u{1F3FB}'));
+assertTrue(re.test('\u{1F6C0}\u{1F3FC}'));
+assertTrue(re.test('\u{1F6C0}\u{1F3FD}'));
+assertTrue(re.test('\u{1F6C0}\u{1F3FE}'));
+assertTrue(re.test('\u{1F6C0}\u{1F3FF}'));
+assertTrue(re.test('\u{1F6CC}\u{1F3FB}'));
+assertTrue(re.test('\u{1F6CC}\u{1F3FC}'));
+assertTrue(re.test('\u{1F6CC}\u{1F3FD}'));
+assertTrue(re.test('\u{1F6CC}\u{1F3FE}'));
+assertTrue(re.test('\u{1F6CC}\u{1F3FF}'));
+assertTrue(re.test('\u{1F918}\u{1F3FB}'));
+assertTrue(re.test('\u{1F918}\u{1F3FC}'));
+assertTrue(re.test('\u{1F918}\u{1F3FD}'));
+assertTrue(re.test('\u{1F918}\u{1F3FE}'));
+assertTrue(re.test('\u{1F918}\u{1F3FF}'));
+assertTrue(re.test('\u{1F919}\u{1F3FB}'));
+assertTrue(re.test('\u{1F919}\u{1F3FC}'));
+assertTrue(re.test('\u{1F919}\u{1F3FD}'));
+assertTrue(re.test('\u{1F919}\u{1F3FE}'));
+assertTrue(re.test('\u{1F919}\u{1F3FF}'));
+assertTrue(re.test('\u{1F91A}\u{1F3FB}'));
+assertTrue(re.test('\u{1F91A}\u{1F3FC}'));
+assertTrue(re.test('\u{1F91A}\u{1F3FD}'));
+assertTrue(re.test('\u{1F91A}\u{1F3FE}'));
+assertTrue(re.test('\u{1F91A}\u{1F3FF}'));
+assertTrue(re.test('\u{1F91B}\u{1F3FB}'));
+assertTrue(re.test('\u{1F91B}\u{1F3FC}'));
+assertTrue(re.test('\u{1F91B}\u{1F3FD}'));
+assertTrue(re.test('\u{1F91B}\u{1F3FE}'));
+assertTrue(re.test('\u{1F91B}\u{1F3FF}'));
+assertTrue(re.test('\u{1F91C}\u{1F3FB}'));
+assertTrue(re.test('\u{1F91C}\u{1F3FC}'));
+assertTrue(re.test('\u{1F91C}\u{1F3FD}'));
+assertTrue(re.test('\u{1F91C}\u{1F3FE}'));
+assertTrue(re.test('\u{1F91C}\u{1F3FF}'));
+assertTrue(re.test('\u{1F91E}\u{1F3FB}'));
+assertTrue(re.test('\u{1F91E}\u{1F3FC}'));
+assertTrue(re.test('\u{1F91E}\u{1F3FD}'));
+assertTrue(re.test('\u{1F91E}\u{1F3FE}'));
+assertTrue(re.test('\u{1F91E}\u{1F3FF}'));
+assertTrue(re.test('\u{1F91F}\u{1F3FB}'));
+assertTrue(re.test('\u{1F91F}\u{1F3FC}'));
+assertTrue(re.test('\u{1F91F}\u{1F3FD}'));
+assertTrue(re.test('\u{1F91F}\u{1F3FE}'));
+assertTrue(re.test('\u{1F91F}\u{1F3FF}'));
+assertTrue(re.test('\u{1F926}\u{1F3FB}'));
+assertTrue(re.test('\u{1F926}\u{1F3FC}'));
+assertTrue(re.test('\u{1F926}\u{1F3FD}'));
+assertTrue(re.test('\u{1F926}\u{1F3FE}'));
+assertTrue(re.test('\u{1F926}\u{1F3FF}'));
+assertTrue(re.test('\u{1F930}\u{1F3FB}'));
+assertTrue(re.test('\u{1F930}\u{1F3FC}'));
+assertTrue(re.test('\u{1F930}\u{1F3FD}'));
+assertTrue(re.test('\u{1F930}\u{1F3FE}'));
+assertTrue(re.test('\u{1F930}\u{1F3FF}'));
+assertTrue(re.test('\u{1F931}\u{1F3FB}'));
+assertTrue(re.test('\u{1F931}\u{1F3FC}'));
+assertTrue(re.test('\u{1F931}\u{1F3FD}'));
+assertTrue(re.test('\u{1F931}\u{1F3FE}'));
+assertTrue(re.test('\u{1F931}\u{1F3FF}'));
+assertTrue(re.test('\u{1F932}\u{1F3FB}'));
+assertTrue(re.test('\u{1F932}\u{1F3FC}'));
+assertTrue(re.test('\u{1F932}\u{1F3FD}'));
+assertTrue(re.test('\u{1F932}\u{1F3FE}'));
+assertTrue(re.test('\u{1F932}\u{1F3FF}'));
+assertTrue(re.test('\u{1F933}\u{1F3FB}'));
+assertTrue(re.test('\u{1F933}\u{1F3FC}'));
+assertTrue(re.test('\u{1F933}\u{1F3FD}'));
+assertTrue(re.test('\u{1F933}\u{1F3FE}'));
+assertTrue(re.test('\u{1F933}\u{1F3FF}'));
+assertTrue(re.test('\u{1F934}\u{1F3FB}'));
+assertTrue(re.test('\u{1F934}\u{1F3FC}'));
+assertTrue(re.test('\u{1F934}\u{1F3FD}'));
+assertTrue(re.test('\u{1F934}\u{1F3FE}'));
+assertTrue(re.test('\u{1F934}\u{1F3FF}'));
+assertTrue(re.test('\u{1F935}\u{1F3FB}'));
+assertTrue(re.test('\u{1F935}\u{1F3FC}'));
+assertTrue(re.test('\u{1F935}\u{1F3FD}'));
+assertTrue(re.test('\u{1F935}\u{1F3FE}'));
+assertTrue(re.test('\u{1F935}\u{1F3FF}'));
+assertTrue(re.test('\u{1F936}\u{1F3FB}'));
+assertTrue(re.test('\u{1F936}\u{1F3FC}'));
+assertTrue(re.test('\u{1F936}\u{1F3FD}'));
+assertTrue(re.test('\u{1F936}\u{1F3FE}'));
+assertTrue(re.test('\u{1F936}\u{1F3FF}'));
+assertTrue(re.test('\u{1F937}\u{1F3FB}'));
+assertTrue(re.test('\u{1F937}\u{1F3FC}'));
+assertTrue(re.test('\u{1F937}\u{1F3FD}'));
+assertTrue(re.test('\u{1F937}\u{1F3FE}'));
+assertTrue(re.test('\u{1F937}\u{1F3FF}'));
+assertTrue(re.test('\u{1F938}\u{1F3FB}'));
+assertTrue(re.test('\u{1F938}\u{1F3FC}'));
+assertTrue(re.test('\u{1F938}\u{1F3FD}'));
+assertTrue(re.test('\u{1F938}\u{1F3FE}'));
+assertTrue(re.test('\u{1F938}\u{1F3FF}'));
+assertTrue(re.test('\u{1F939}\u{1F3FB}'));
+assertTrue(re.test('\u{1F939}\u{1F3FC}'));
+assertTrue(re.test('\u{1F939}\u{1F3FD}'));
+assertTrue(re.test('\u{1F939}\u{1F3FE}'));
+assertTrue(re.test('\u{1F939}\u{1F3FF}'));
+assertTrue(re.test('\u{1F93D}\u{1F3FB}'));
+assertTrue(re.test('\u{1F93D}\u{1F3FC}'));
+assertTrue(re.test('\u{1F93D}\u{1F3FD}'));
+assertTrue(re.test('\u{1F93D}\u{1F3FE}'));
+assertTrue(re.test('\u{1F93D}\u{1F3FF}'));
+assertTrue(re.test('\u{1F93E}\u{1F3FB}'));
+assertTrue(re.test('\u{1F93E}\u{1F3FC}'));
+assertTrue(re.test('\u{1F93E}\u{1F3FD}'));
+assertTrue(re.test('\u{1F93E}\u{1F3FE}'));
+assertTrue(re.test('\u{1F93E}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9B5}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9B5}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9B5}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9B5}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9B5}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9B6}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9B6}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9B6}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9B6}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9B6}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D1}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D1}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D1}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D1}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D1}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D2}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D2}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D2}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D2}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D2}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D3}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D3}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D3}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D3}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D3}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D4}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D4}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D4}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D4}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D4}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D5}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D5}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D5}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D5}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D5}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FE}'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FF}'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FB}'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FC}'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FD}'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FE}'));
+assertTrue(re.test('\u{1F595}\u{1F3FB}'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js
new file mode 100644
index 0000000000..129e1c7777
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-sequence
+
+const re = /\p{Emoji_Tag_Sequence}/u;
+
+assertTrue(re.test('\u{1F3F4}\u{E0067}\u{E0062}\u{E0065}\u{E006E}\u{E0067}\u{E007F}'));
+assertTrue(re.test('\u{1F3F4}\u{E0067}\u{E0062}\u{E0073}\u{E0063}\u{E0074}\u{E007F}'));
+assertTrue(re.test('\u{1F3F4}\u{E0067}\u{E0062}\u{E0077}\u{E006C}\u{E0073}\u{E007F}'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js
new file mode 100644
index 0000000000..619bf46175
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js
@@ -0,0 +1,782 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-sequence
+
+const re = /\p{Emoji_ZWJ_Sequence}/u;
+
+assertTrue(re.test('\u{1F468}\u200D\u2764\uFE0F\u200D\u{1F468}'));
+assertTrue(re.test('\u{1F441}\uFE0F\u200D\u{1F5E8}\uFE0F'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F466}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F467}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F467}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F466}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F467}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F467}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F466}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F468}'));
+assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F469}'));
+assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F48B}\u200D\u{1F468}'));
+assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F48B}\u200D\u{1F469}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F466}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F467}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F467}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F466}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F466}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F467}'));
+assertTrue(re.test('\u{1F468}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F468}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F468}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F468}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F469}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F469}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F469}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F469}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u2695\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u2696\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u2708\uFE0F'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F33E}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F373}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F393}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3A4}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3A8}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3EB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3ED}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F4BB}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F4BC}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F527}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F52C}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F680}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F692}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B0}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B1}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B2}'));
+assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B3}'));
+assertTrue(re.test('\u{1F46E}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F46E}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F471}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F471}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F471}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F473}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F473}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F473}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F477}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F477}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F477}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F482}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F482}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F482}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F575}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F575}\uFE0F\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F575}\uFE0F\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D9}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DA}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DB}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F468}\u200D\u2764\uFE0F\u200D\u{1F48B}\u200D\u{1F468}'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DD}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9DF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9DF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u26F9\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u26F9\uFE0F\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u26F9\uFE0F\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C3}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3C4}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CA}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\uFE0F\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CB}\uFE0F\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\uFE0F\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F3CC}\uFE0F\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F46F}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F46F}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F486}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F486}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F486}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F487}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F487}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F487}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6A3}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B4}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B5}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F6B6}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F938}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F938}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F938}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F939}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F939}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F939}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93C}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93C}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93D}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F93E}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D6}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D7}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9D8}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F481}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F481}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F481}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F645}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F645}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F645}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F646}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F646}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F646}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F647}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F647}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F647}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64B}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64D}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F64E}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F926}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F926}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F926}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F937}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F937}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F937}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B8}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FB}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FB}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FC}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FC}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FD}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FD}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FE}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FE}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FF}\u200D\u2640\uFE0F'));
+assertTrue(re.test('\u{1F9B9}\u{1F3FF}\u200D\u2642\uFE0F'));
+assertTrue(re.test('\u{1F3F3}\uFE0F\u200D\u{1F308}'));
+assertTrue(re.test('\u{1F3F4}\u200D\u2620\uFE0F'));
+assertTrue(re.test('\u{1F9DC}\u{1F3FE}\u200D\u2640\uFE0F'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-invalid.js b/deps/v8/test/mjsunit/harmony/regexp-property-invalid.js
index 83c7b2e209..7fef5dfd0c 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-invalid.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-invalid.js
@@ -34,3 +34,5 @@ assertThrows("/\\p{In}/u");
assertThrows("/\\pI/u");
assertThrows("/\\p{I}/u");
assertThrows("/\\p{CJK}/u");
+
+assertThrows("/\\p{}/u");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-sequence.js b/deps/v8/test/mjsunit/harmony/regexp-property-sequence.js
new file mode 100644
index 0000000000..4d43298016
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-sequence.js
@@ -0,0 +1,88 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-sequence
+
+// Normal usage.
+assertDoesNotThrow("/\\p{Emoji_Flag_Sequence}/u");
+assertTrue(/\p{Emoji_Flag_Sequence}/u.test("\u{1F1E9}\u{1F1EA}"));
+
+assertDoesNotThrow("/\\p{Emoji_Keycap_Sequence}/u");
+assertTrue(/\p{Emoji_Keycap_Sequence}/u.test("\u0023\uFE0F\u20E3"));
+
+assertDoesNotThrow("/\\p{Emoji_Keycap_Sequence}/u");
+assertFalse(/\p{Emoji_Keycap_Sequence}/u.test("\u0022\uFE0F\u20E3"));
+
+assertDoesNotThrow("/\\p{Emoji_Modifier_Sequence}/u");
+assertTrue(/\p{Emoji_Modifier_Sequence}/u.test("\u26F9\u{1F3FF}"));
+
+assertDoesNotThrow("/\\p{Emoji_ZWJ_Sequence}/u");
+assertTrue(/\p{Emoji_ZWJ_Sequence}/u.test("\u{1F468}\u{200D}\u{1F467}"));
+
+// Without unicode flag.
+assertDoesNotThrow("/\\p{Emoji_Flag_Sequence}/");
+assertFalse(/\p{Emoji_Flag_Sequence}/.test("\u{1F1E9}\u{1F1EA}"));
+assertTrue(/\p{Emoji_Flag_Sequence}/.test("\\p{Emoji_Flag_Sequence}"));
+
+// Negated and/or inside a character class.
+assertThrows("/\\P{Emoji_Flag_Sequence}/u");
+assertThrows("/\\P{Emoji_Keycap_Sequence}/u");
+assertThrows("/\\P{Emoji_Modifier_Sequence}/u");
+assertThrows("/\\P{Emoji_Tag_Sequence}/u");
+assertThrows("/\\P{Emoji_ZWJ_Sequence}/u");
+
+assertThrows("/[\\p{Emoji_Flag_Sequence}]/u");
+assertThrows("/[\\p{Emoji_Keycap_Sequence}]/u");
+assertThrows("/[\\p{Emoji_Modifier_Sequence}]/u");
+assertThrows("/[\\p{Emoji_Tag_Sequence}]/u");
+assertThrows("/[\\p{Emoji_ZWJ_Sequence}]/u");
+
+assertThrows("/[\\P{Emoji_Flag_Sequence}]/u");
+assertThrows("/[\\P{Emoji_Keycap_Sequence}]/u");
+assertThrows("/[\\P{Emoji_Modifier_Sequence}]/u");
+assertThrows("/[\\P{Emoji_Tag_Sequence}]/u");
+assertThrows("/[\\P{Emoji_ZWJ_Sequence}]/u");
+
+assertThrows("/[\\w\\p{Emoji_Flag_Sequence}]/u");
+assertThrows("/[\\w\\p{Emoji_Keycap_Sequence}]/u");
+assertThrows("/[\\w\\p{Emoji_Modifier_Sequence}]/u");
+assertThrows("/[\\w\\p{Emoji_Tag_Sequence}]/u");
+assertThrows("/[\\w\\p{Emoji_ZWJ_Sequence}]/u");
+
+assertThrows("/[\\w\\P{Emoji_Flag_Sequence}]/u");
+assertThrows("/[\\w\\P{Emoji_Keycap_Sequence}]/u");
+assertThrows("/[\\w\\P{Emoji_Modifier_Sequence}]/u");
+assertThrows("/[\\w\\P{Emoji_Tag_Sequence}]/u");
+assertThrows("/[\\w\\P{Emoji_ZWJ_Sequence}]/u");
+
+// Two regional indicators, but not a country.
+assertFalse(/\p{Emoji_Flag_Sequence}/u.test("\u{1F1E6}\u{1F1E6}"));
+
+// ZWJ sequence as in two ZWJ elements joined by a ZWJ, but not in the list.
+assertFalse(/\p{Emoji_ZWJ_Sequence}/u.test("\u{1F467}\u{200D}\u{1F468}"));
+
+// More complex regexp
+assertEquals(
+ ["country flag: \u{1F1E6}\u{1F1F9}"],
+ /Country Flag: \p{Emoji_Flag_Sequence}/iu.exec(
+ "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
+assertEquals(
+ ["country flag: \u{1F1E6}\u{1F1F9}", "\u{1F1E6}\u{1F1F9}"],
+ /Country Flag: (\p{Emoji_Flag_Sequence})/iu.exec(
+ "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
+assertEquals(
+ ["country flag: \u{1F1E6}\u{1F1F9}"],
+ /Country Flag: ..(?<=\p{Emoji_Flag_Sequence})/iu.exec(
+ "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
+assertEquals(
+ ["flag: \u{1F1E6}\u{1F1F9}", "\u{1F1E6}\u{1F1F9}"],
+ /Flag: ..(?<=(\p{Emoji_Flag_Sequence})|\p{Emoji_Keycap_Sequence})/iu.exec(
+ "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
+
+// Partial sequences.
+assertFalse(/\p{Emoji_Flag_Sequence}/u.test("\u{1F1E6}_"));
+assertFalse(/\p{Emoji_Keycap_Sequence}/u.test("2\uFE0F_"));
+assertFalse(/\p{Emoji_Modifier_Sequence}/u.test("\u261D_"));
+assertFalse(/\p{Emoji_Tag_Sequence}/u.test("\u{1F3F4}\u{E0067}\u{E0062}\u{E0065}\u{E006E}\u{E0067}_"));
+assertFalse(/\p{Emoji_ZWJ_Sequence}/u.test("\u{1F468}\u200D\u2764\uFE0F\u200D_"));
diff --git a/deps/v8/test/mjsunit/harmony/to-number.js b/deps/v8/test/mjsunit/harmony/to-number.js
index 6dc4db59a2..a48a7d83f8 100644
--- a/deps/v8/test/mjsunit/harmony/to-number.js
+++ b/deps/v8/test/mjsunit/harmony/to-number.js
@@ -5,47 +5,34 @@
// Flags: --allow-natives-syntax
assertEquals(1, %ToNumber(1));
-assertEquals(1, %_ToNumber(1));
assertEquals(.5, %ToNumber(.5));
-assertEquals(.5, %_ToNumber(.5));
assertEquals(0, %ToNumber(null));
-assertEquals(0, %_ToNumber(null));
assertEquals(1, %ToNumber(true));
-assertEquals(1, %_ToNumber(true));
assertEquals(0, %ToNumber(false));
-assertEquals(0, %_ToNumber(false));
assertEquals(NaN, %ToNumber(undefined));
-assertEquals(NaN, %_ToNumber(undefined));
assertEquals(-1, %ToNumber("-1"));
-assertEquals(-1, %_ToNumber("-1"));
assertEquals(123, %ToNumber("123"));
-assertEquals(123, %_ToNumber("123"));
assertEquals(NaN, %ToNumber("random text"));
-assertEquals(NaN, %_ToNumber("random text"));
assertThrows(function() { %ToNumber(Symbol.toPrimitive) }, TypeError);
-assertThrows(function() { %_ToNumber(Symbol.toPrimitive) }, TypeError);
var a = { toString: function() { return 54321 }};
assertEquals(54321, %ToNumber(a));
-assertEquals(54321, %_ToNumber(a));
var b = { valueOf: function() { return 42 }};
assertEquals(42, %ToNumber(b));
-assertEquals(42, %_ToNumber(b));
var c = {
toString: function() { return "x"},
valueOf: function() { return 123 }
};
assertEquals(123, %ToNumber(c));
-assertEquals(123, %_ToNumber(c));
var d = {
[Symbol.toPrimitive]: function(hint) {
@@ -54,8 +41,6 @@ var d = {
}
};
assertEquals(987654321, %ToNumber(d));
-assertEquals(987654321, %_ToNumber(d));
var e = new Date(0);
assertEquals(0, %ToNumber(e));
-assertEquals(0, %_ToNumber(e));
diff --git a/deps/v8/test/mjsunit/harmony/to-primitive.js b/deps/v8/test/mjsunit/harmony/to-primitive.js
deleted file mode 100644
index 8decb04657..0000000000
--- a/deps/v8/test/mjsunit/harmony/to-primitive.js
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-assertEquals(1, %ToPrimitive(1));
-assertEquals(1, %ToPrimitive_Number(1));
-
-assertEquals(.5, %ToPrimitive(.5));
-assertEquals(.5, %ToPrimitive_Number(.5));
-
-assertEquals(null, %ToPrimitive(null));
-assertEquals(null, %ToPrimitive_Number(null));
-
-assertEquals(true, %ToPrimitive(true));
-assertEquals(true, %ToPrimitive_Number(true));
-
-assertEquals(false, %ToPrimitive(false));
-assertEquals(false, %ToPrimitive_Number(false));
-
-assertEquals(undefined, %ToPrimitive(undefined));
-assertEquals(undefined, %ToPrimitive_Number(undefined));
-
-assertEquals("random text", %ToPrimitive("random text"));
-assertEquals("random text", %ToPrimitive_Number("random text"));
-
-assertEquals(Symbol.toPrimitive, %ToPrimitive(Symbol.toPrimitive));
-assertEquals(Symbol.toPrimitive, %ToPrimitive_Number(Symbol.toPrimitive));
-
-var a = { toString: function() { return "xyz" }};
-assertEquals("xyz", %ToPrimitive(a));
-assertEquals("xyz", %ToPrimitive_Number(a));
-
-var b = { valueOf: function() { return 42 }};
-assertEquals(42, %ToPrimitive(b));
-assertEquals(42, %ToPrimitive_Number(b));
-
-var c = {
- toString: function() { return "x"},
- valueOf: function() { return 123 }
-};
-assertEquals(123, %ToPrimitive(c));
-assertEquals(123, %ToPrimitive_Number(c));
-
-var d = {
- [Symbol.toPrimitive]: function(hint) { return hint }
-};
-assertEquals("default", %ToPrimitive(d));
-assertEquals("number", %ToPrimitive_Number(d));
-
-var e = new Date(0);
-assertEquals(e.toString(), %ToPrimitive(e));
-assertEquals(0, %ToPrimitive_Number(e));
diff --git a/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js
new file mode 100644
index 0000000000..d1179d3855
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js
@@ -0,0 +1,2575 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-json-stringify
+
+// Test JSON.stringify for cases that hit
+// JsonStringifier::SerializeString_.
+
+// All code points from U+0000 to U+00FF.
+assertEquals('"___\\u0000"', JSON.stringify('___\0'));
+assertEquals('"___\\u0001"', JSON.stringify('___\x01'));
+assertEquals('"___\\u0002"', JSON.stringify('___\x02'));
+assertEquals('"___\\u0003"', JSON.stringify('___\x03'));
+assertEquals('"___\\u0004"', JSON.stringify('___\x04'));
+assertEquals('"___\\u0005"', JSON.stringify('___\x05'));
+assertEquals('"___\\u0006"', JSON.stringify('___\x06'));
+assertEquals('"___\\u0007"', JSON.stringify('___\x07'));
+assertEquals('"___\\b"', JSON.stringify('___\b'));
+assertEquals('"___\\t"', JSON.stringify('___\t'));
+assertEquals('"___\\n"', JSON.stringify('___\n'));
+assertEquals('"___\\u000b"', JSON.stringify('___\x0B'));
+assertEquals('"___\\f"', JSON.stringify('___\f'));
+assertEquals('"___\\r"', JSON.stringify('___\r'));
+assertEquals('"___\\u000e"', JSON.stringify('___\x0E'));
+assertEquals('"___\\u000f"', JSON.stringify('___\x0F'));
+assertEquals('"___\\u0010"', JSON.stringify('___\x10'));
+assertEquals('"___\\u0011"', JSON.stringify('___\x11'));
+assertEquals('"___\\u0012"', JSON.stringify('___\x12'));
+assertEquals('"___\\u0013"', JSON.stringify('___\x13'));
+assertEquals('"___\\u0014"', JSON.stringify('___\x14'));
+assertEquals('"___\\u0015"', JSON.stringify('___\x15'));
+assertEquals('"___\\u0016"', JSON.stringify('___\x16'));
+assertEquals('"___\\u0017"', JSON.stringify('___\x17'));
+assertEquals('"___\\u0018"', JSON.stringify('___\x18'));
+assertEquals('"___\\u0019"', JSON.stringify('___\x19'));
+assertEquals('"___\\u001a"', JSON.stringify('___\x1A'));
+assertEquals('"___\\u001b"', JSON.stringify('___\x1B'));
+assertEquals('"___\\u001c"', JSON.stringify('___\x1C'));
+assertEquals('"___\\u001d"', JSON.stringify('___\x1D'));
+assertEquals('"___\\u001e"', JSON.stringify('___\x1E'));
+assertEquals('"___\\u001f"', JSON.stringify('___\x1F'));
+assertEquals('"___ "', JSON.stringify('___ '));
+assertEquals('"___!"', JSON.stringify('___!'));
+assertEquals('"___\\""', JSON.stringify('___"'));
+assertEquals('"___#"', JSON.stringify('___#'));
+assertEquals('"___$"', JSON.stringify('___$'));
+assertEquals('"___%"', JSON.stringify('___%'));
+assertEquals('"___&"', JSON.stringify('___&'));
+assertEquals('"___\'"', JSON.stringify('___\''));
+assertEquals('"___("', JSON.stringify('___('));
+assertEquals('"___)"', JSON.stringify('___)'));
+assertEquals('"___*"', JSON.stringify('___*'));
+assertEquals('"___+"', JSON.stringify('___+'));
+assertEquals('"___,"', JSON.stringify('___,'));
+assertEquals('"___-"', JSON.stringify('___-'));
+assertEquals('"___."', JSON.stringify('___.'));
+assertEquals('"___/"', JSON.stringify('___/'));
+assertEquals('"___0"', JSON.stringify('___0'));
+assertEquals('"___1"', JSON.stringify('___1'));
+assertEquals('"___2"', JSON.stringify('___2'));
+assertEquals('"___3"', JSON.stringify('___3'));
+assertEquals('"___4"', JSON.stringify('___4'));
+assertEquals('"___5"', JSON.stringify('___5'));
+assertEquals('"___6"', JSON.stringify('___6'));
+assertEquals('"___7"', JSON.stringify('___7'));
+assertEquals('"___8"', JSON.stringify('___8'));
+assertEquals('"___9"', JSON.stringify('___9'));
+assertEquals('"___:"', JSON.stringify('___:'));
+assertEquals('"___;"', JSON.stringify('___;'));
+assertEquals('"___<"', JSON.stringify('___<'));
+assertEquals('"___="', JSON.stringify('___='));
+assertEquals('"___>"', JSON.stringify('___>'));
+assertEquals('"___?"', JSON.stringify('___?'));
+assertEquals('"___@"', JSON.stringify('___@'));
+assertEquals('"___A"', JSON.stringify('___A'));
+assertEquals('"___B"', JSON.stringify('___B'));
+assertEquals('"___C"', JSON.stringify('___C'));
+assertEquals('"___D"', JSON.stringify('___D'));
+assertEquals('"___E"', JSON.stringify('___E'));
+assertEquals('"___F"', JSON.stringify('___F'));
+assertEquals('"___G"', JSON.stringify('___G'));
+assertEquals('"___H"', JSON.stringify('___H'));
+assertEquals('"___I"', JSON.stringify('___I'));
+assertEquals('"___J"', JSON.stringify('___J'));
+assertEquals('"___K"', JSON.stringify('___K'));
+assertEquals('"___L"', JSON.stringify('___L'));
+assertEquals('"___M"', JSON.stringify('___M'));
+assertEquals('"___N"', JSON.stringify('___N'));
+assertEquals('"___O"', JSON.stringify('___O'));
+assertEquals('"___P"', JSON.stringify('___P'));
+assertEquals('"___Q"', JSON.stringify('___Q'));
+assertEquals('"___R"', JSON.stringify('___R'));
+assertEquals('"___S"', JSON.stringify('___S'));
+assertEquals('"___T"', JSON.stringify('___T'));
+assertEquals('"___U"', JSON.stringify('___U'));
+assertEquals('"___V"', JSON.stringify('___V'));
+assertEquals('"___W"', JSON.stringify('___W'));
+assertEquals('"___X"', JSON.stringify('___X'));
+assertEquals('"___Y"', JSON.stringify('___Y'));
+assertEquals('"___Z"', JSON.stringify('___Z'));
+assertEquals('"___["', JSON.stringify('___['));
+assertEquals('"___\\\\"', JSON.stringify('___\\'));
+assertEquals('"___]"', JSON.stringify('___]'));
+assertEquals('"___^"', JSON.stringify('___^'));
+assertEquals('"____"', JSON.stringify('____'));
+assertEquals('"___`"', JSON.stringify('___`'));
+assertEquals('"___a"', JSON.stringify('___a'));
+assertEquals('"___b"', JSON.stringify('___b'));
+assertEquals('"___c"', JSON.stringify('___c'));
+assertEquals('"___d"', JSON.stringify('___d'));
+assertEquals('"___e"', JSON.stringify('___e'));
+assertEquals('"___f"', JSON.stringify('___f'));
+assertEquals('"___g"', JSON.stringify('___g'));
+assertEquals('"___h"', JSON.stringify('___h'));
+assertEquals('"___i"', JSON.stringify('___i'));
+assertEquals('"___j"', JSON.stringify('___j'));
+assertEquals('"___k"', JSON.stringify('___k'));
+assertEquals('"___l"', JSON.stringify('___l'));
+assertEquals('"___m"', JSON.stringify('___m'));
+assertEquals('"___n"', JSON.stringify('___n'));
+assertEquals('"___o"', JSON.stringify('___o'));
+assertEquals('"___p"', JSON.stringify('___p'));
+assertEquals('"___q"', JSON.stringify('___q'));
+assertEquals('"___r"', JSON.stringify('___r'));
+assertEquals('"___s"', JSON.stringify('___s'));
+assertEquals('"___t"', JSON.stringify('___t'));
+assertEquals('"___u"', JSON.stringify('___u'));
+assertEquals('"___v"', JSON.stringify('___v'));
+assertEquals('"___w"', JSON.stringify('___w'));
+assertEquals('"___x"', JSON.stringify('___x'));
+assertEquals('"___y"', JSON.stringify('___y'));
+assertEquals('"___z"', JSON.stringify('___z'));
+assertEquals('"___{"', JSON.stringify('___{'));
+assertEquals('"___|"', JSON.stringify('___|'));
+assertEquals('"___}"', JSON.stringify('___}'));
+assertEquals('"___~"', JSON.stringify('___~'));
+assertEquals('"___\x7F"', JSON.stringify('___\x7F'));
+assertEquals('"___\x80"', JSON.stringify('___\x80'));
+assertEquals('"___\x81"', JSON.stringify('___\x81'));
+assertEquals('"___\x82"', JSON.stringify('___\x82'));
+assertEquals('"___\x83"', JSON.stringify('___\x83'));
+assertEquals('"___\x84"', JSON.stringify('___\x84'));
+assertEquals('"___\x85"', JSON.stringify('___\x85'));
+assertEquals('"___\x86"', JSON.stringify('___\x86'));
+assertEquals('"___\x87"', JSON.stringify('___\x87'));
+assertEquals('"___\x88"', JSON.stringify('___\x88'));
+assertEquals('"___\x89"', JSON.stringify('___\x89'));
+assertEquals('"___\x8A"', JSON.stringify('___\x8A'));
+assertEquals('"___\x8B"', JSON.stringify('___\x8B'));
+assertEquals('"___\x8C"', JSON.stringify('___\x8C'));
+assertEquals('"___\x8D"', JSON.stringify('___\x8D'));
+assertEquals('"___\x8E"', JSON.stringify('___\x8E'));
+assertEquals('"___\x8F"', JSON.stringify('___\x8F'));
+assertEquals('"___\x90"', JSON.stringify('___\x90'));
+assertEquals('"___\x91"', JSON.stringify('___\x91'));
+assertEquals('"___\x92"', JSON.stringify('___\x92'));
+assertEquals('"___\x93"', JSON.stringify('___\x93'));
+assertEquals('"___\x94"', JSON.stringify('___\x94'));
+assertEquals('"___\x95"', JSON.stringify('___\x95'));
+assertEquals('"___\x96"', JSON.stringify('___\x96'));
+assertEquals('"___\x97"', JSON.stringify('___\x97'));
+assertEquals('"___\x98"', JSON.stringify('___\x98'));
+assertEquals('"___\x99"', JSON.stringify('___\x99'));
+assertEquals('"___\x9A"', JSON.stringify('___\x9A'));
+assertEquals('"___\x9B"', JSON.stringify('___\x9B'));
+assertEquals('"___\x9C"', JSON.stringify('___\x9C'));
+assertEquals('"___\x9D"', JSON.stringify('___\x9D'));
+assertEquals('"___\x9E"', JSON.stringify('___\x9E'));
+assertEquals('"___\x9F"', JSON.stringify('___\x9F'));
+assertEquals('"___\xA0"', JSON.stringify('___\xA0'));
+assertEquals('"___\xA1"', JSON.stringify('___\xA1'));
+assertEquals('"___\xA2"', JSON.stringify('___\xA2'));
+assertEquals('"___\xA3"', JSON.stringify('___\xA3'));
+assertEquals('"___\xA4"', JSON.stringify('___\xA4'));
+assertEquals('"___\xA5"', JSON.stringify('___\xA5'));
+assertEquals('"___\xA6"', JSON.stringify('___\xA6'));
+assertEquals('"___\xA7"', JSON.stringify('___\xA7'));
+assertEquals('"___\xA8"', JSON.stringify('___\xA8'));
+assertEquals('"___\xA9"', JSON.stringify('___\xA9'));
+assertEquals('"___\xAA"', JSON.stringify('___\xAA'));
+assertEquals('"___\xAB"', JSON.stringify('___\xAB'));
+assertEquals('"___\xAC"', JSON.stringify('___\xAC'));
+assertEquals('"___\xAD"', JSON.stringify('___\xAD'));
+assertEquals('"___\xAE"', JSON.stringify('___\xAE'));
+assertEquals('"___\xAF"', JSON.stringify('___\xAF'));
+assertEquals('"___\xB0"', JSON.stringify('___\xB0'));
+assertEquals('"___\xB1"', JSON.stringify('___\xB1'));
+assertEquals('"___\xB2"', JSON.stringify('___\xB2'));
+assertEquals('"___\xB3"', JSON.stringify('___\xB3'));
+assertEquals('"___\xB4"', JSON.stringify('___\xB4'));
+assertEquals('"___\xB5"', JSON.stringify('___\xB5'));
+assertEquals('"___\xB6"', JSON.stringify('___\xB6'));
+assertEquals('"___\xB7"', JSON.stringify('___\xB7'));
+assertEquals('"___\xB8"', JSON.stringify('___\xB8'));
+assertEquals('"___\xB9"', JSON.stringify('___\xB9'));
+assertEquals('"___\xBA"', JSON.stringify('___\xBA'));
+assertEquals('"___\xBB"', JSON.stringify('___\xBB'));
+assertEquals('"___\xBC"', JSON.stringify('___\xBC'));
+assertEquals('"___\xBD"', JSON.stringify('___\xBD'));
+assertEquals('"___\xBE"', JSON.stringify('___\xBE'));
+assertEquals('"___\xBF"', JSON.stringify('___\xBF'));
+assertEquals('"___\xC0"', JSON.stringify('___\xC0'));
+assertEquals('"___\xC1"', JSON.stringify('___\xC1'));
+assertEquals('"___\xC2"', JSON.stringify('___\xC2'));
+assertEquals('"___\xC3"', JSON.stringify('___\xC3'));
+assertEquals('"___\xC4"', JSON.stringify('___\xC4'));
+assertEquals('"___\xC5"', JSON.stringify('___\xC5'));
+assertEquals('"___\xC6"', JSON.stringify('___\xC6'));
+assertEquals('"___\xC7"', JSON.stringify('___\xC7'));
+assertEquals('"___\xC8"', JSON.stringify('___\xC8'));
+assertEquals('"___\xC9"', JSON.stringify('___\xC9'));
+assertEquals('"___\xCA"', JSON.stringify('___\xCA'));
+assertEquals('"___\xCB"', JSON.stringify('___\xCB'));
+assertEquals('"___\xCC"', JSON.stringify('___\xCC'));
+assertEquals('"___\xCD"', JSON.stringify('___\xCD'));
+assertEquals('"___\xCE"', JSON.stringify('___\xCE'));
+assertEquals('"___\xCF"', JSON.stringify('___\xCF'));
+assertEquals('"___\xD0"', JSON.stringify('___\xD0'));
+assertEquals('"___\xD1"', JSON.stringify('___\xD1'));
+assertEquals('"___\xD2"', JSON.stringify('___\xD2'));
+assertEquals('"___\xD3"', JSON.stringify('___\xD3'));
+assertEquals('"___\xD4"', JSON.stringify('___\xD4'));
+assertEquals('"___\xD5"', JSON.stringify('___\xD5'));
+assertEquals('"___\xD6"', JSON.stringify('___\xD6'));
+assertEquals('"___\xD7"', JSON.stringify('___\xD7'));
+assertEquals('"___\xD8"', JSON.stringify('___\xD8'));
+assertEquals('"___\xD9"', JSON.stringify('___\xD9'));
+assertEquals('"___\xDA"', JSON.stringify('___\xDA'));
+assertEquals('"___\xDB"', JSON.stringify('___\xDB'));
+assertEquals('"___\xDC"', JSON.stringify('___\xDC'));
+assertEquals('"___\xDD"', JSON.stringify('___\xDD'));
+assertEquals('"___\xDE"', JSON.stringify('___\xDE'));
+assertEquals('"___\xDF"', JSON.stringify('___\xDF'));
+assertEquals('"___\xE0"', JSON.stringify('___\xE0'));
+assertEquals('"___\xE1"', JSON.stringify('___\xE1'));
+assertEquals('"___\xE2"', JSON.stringify('___\xE2'));
+assertEquals('"___\xE3"', JSON.stringify('___\xE3'));
+assertEquals('"___\xE4"', JSON.stringify('___\xE4'));
+assertEquals('"___\xE5"', JSON.stringify('___\xE5'));
+assertEquals('"___\xE6"', JSON.stringify('___\xE6'));
+assertEquals('"___\xE7"', JSON.stringify('___\xE7'));
+assertEquals('"___\xE8"', JSON.stringify('___\xE8'));
+assertEquals('"___\xE9"', JSON.stringify('___\xE9'));
+assertEquals('"___\xEA"', JSON.stringify('___\xEA'));
+assertEquals('"___\xEB"', JSON.stringify('___\xEB'));
+assertEquals('"___\xEC"', JSON.stringify('___\xEC'));
+assertEquals('"___\xED"', JSON.stringify('___\xED'));
+assertEquals('"___\xEE"', JSON.stringify('___\xEE'));
+assertEquals('"___\xEF"', JSON.stringify('___\xEF'));
+assertEquals('"___\xF0"', JSON.stringify('___\xF0'));
+assertEquals('"___\xF1"', JSON.stringify('___\xF1'));
+assertEquals('"___\xF2"', JSON.stringify('___\xF2'));
+assertEquals('"___\xF3"', JSON.stringify('___\xF3'));
+assertEquals('"___\xF4"', JSON.stringify('___\xF4'));
+assertEquals('"___\xF5"', JSON.stringify('___\xF5'));
+assertEquals('"___\xF6"', JSON.stringify('___\xF6'));
+assertEquals('"___\xF7"', JSON.stringify('___\xF7'));
+assertEquals('"___\xF8"', JSON.stringify('___\xF8'));
+assertEquals('"___\xF9"', JSON.stringify('___\xF9'));
+assertEquals('"___\xFA"', JSON.stringify('___\xFA'));
+assertEquals('"___\xFB"', JSON.stringify('___\xFB'));
+assertEquals('"___\xFC"', JSON.stringify('___\xFC'));
+assertEquals('"___\xFD"', JSON.stringify('___\xFD'));
+assertEquals('"___\xFE"', JSON.stringify('___\xFE'));
+assertEquals('"___\xFF"', JSON.stringify('___\xFF'));
+
+// A random selection of code points from U+0100 to U+D7FF.
+assertEquals('"___\u0100"', JSON.stringify('___\u0100'));
+assertEquals('"___\u0120"', JSON.stringify('___\u0120'));
+assertEquals('"___\u07D3"', JSON.stringify('___\u07D3'));
+assertEquals('"___\u0B8B"', JSON.stringify('___\u0B8B'));
+assertEquals('"___\u0C4C"', JSON.stringify('___\u0C4C'));
+assertEquals('"___\u178D"', JSON.stringify('___\u178D'));
+assertEquals('"___\u18B8"', JSON.stringify('___\u18B8'));
+assertEquals('"___\u193E"', JSON.stringify('___\u193E'));
+assertEquals('"___\u198A"', JSON.stringify('___\u198A'));
+assertEquals('"___\u1AF5"', JSON.stringify('___\u1AF5'));
+assertEquals('"___\u1D38"', JSON.stringify('___\u1D38'));
+assertEquals('"___\u1E37"', JSON.stringify('___\u1E37'));
+assertEquals('"___\u1FC2"', JSON.stringify('___\u1FC2'));
+assertEquals('"___\u22C7"', JSON.stringify('___\u22C7'));
+assertEquals('"___\u2619"', JSON.stringify('___\u2619'));
+assertEquals('"___\u272A"', JSON.stringify('___\u272A'));
+assertEquals('"___\u2B7F"', JSON.stringify('___\u2B7F'));
+assertEquals('"___\u2DFF"', JSON.stringify('___\u2DFF'));
+assertEquals('"___\u341B"', JSON.stringify('___\u341B'));
+assertEquals('"___\u3A3C"', JSON.stringify('___\u3A3C'));
+assertEquals('"___\u3E53"', JSON.stringify('___\u3E53'));
+assertEquals('"___\u3EC2"', JSON.stringify('___\u3EC2'));
+assertEquals('"___\u3F76"', JSON.stringify('___\u3F76'));
+assertEquals('"___\u3F85"', JSON.stringify('___\u3F85'));
+assertEquals('"___\u43C7"', JSON.stringify('___\u43C7'));
+assertEquals('"___\u4A19"', JSON.stringify('___\u4A19'));
+assertEquals('"___\u4A1C"', JSON.stringify('___\u4A1C'));
+assertEquals('"___\u4F80"', JSON.stringify('___\u4F80'));
+assertEquals('"___\u5A30"', JSON.stringify('___\u5A30'));
+assertEquals('"___\u5B55"', JSON.stringify('___\u5B55'));
+assertEquals('"___\u5C74"', JSON.stringify('___\u5C74'));
+assertEquals('"___\u6006"', JSON.stringify('___\u6006'));
+assertEquals('"___\u63CC"', JSON.stringify('___\u63CC'));
+assertEquals('"___\u6608"', JSON.stringify('___\u6608'));
+assertEquals('"___\u6ABF"', JSON.stringify('___\u6ABF'));
+assertEquals('"___\u6AE9"', JSON.stringify('___\u6AE9'));
+assertEquals('"___\u6C91"', JSON.stringify('___\u6C91'));
+assertEquals('"___\u714B"', JSON.stringify('___\u714B'));
+assertEquals('"___\u728A"', JSON.stringify('___\u728A'));
+assertEquals('"___\u7485"', JSON.stringify('___\u7485'));
+assertEquals('"___\u77C8"', JSON.stringify('___\u77C8'));
+assertEquals('"___\u7BE9"', JSON.stringify('___\u7BE9'));
+assertEquals('"___\u7CEF"', JSON.stringify('___\u7CEF'));
+assertEquals('"___\u7DD5"', JSON.stringify('___\u7DD5'));
+assertEquals('"___\u8DF1"', JSON.stringify('___\u8DF1'));
+assertEquals('"___\u94A9"', JSON.stringify('___\u94A9'));
+assertEquals('"___\u94F2"', JSON.stringify('___\u94F2'));
+assertEquals('"___\u9A7A"', JSON.stringify('___\u9A7A'));
+assertEquals('"___\u9AA6"', JSON.stringify('___\u9AA6'));
+assertEquals('"___\uA2B0"', JSON.stringify('___\uA2B0'));
+assertEquals('"___\uB711"', JSON.stringify('___\uB711'));
+assertEquals('"___\uBC01"', JSON.stringify('___\uBC01'));
+assertEquals('"___\uBCB6"', JSON.stringify('___\uBCB6'));
+assertEquals('"___\uBD70"', JSON.stringify('___\uBD70'));
+assertEquals('"___\uC3CD"', JSON.stringify('___\uC3CD'));
+assertEquals('"___\uC451"', JSON.stringify('___\uC451'));
+assertEquals('"___\uC677"', JSON.stringify('___\uC677'));
+assertEquals('"___\uC89B"', JSON.stringify('___\uC89B'));
+assertEquals('"___\uCBEF"', JSON.stringify('___\uCBEF'));
+assertEquals('"___\uCEF8"', JSON.stringify('___\uCEF8'));
+assertEquals('"___\uD089"', JSON.stringify('___\uD089'));
+assertEquals('"___\uD24D"', JSON.stringify('___\uD24D'));
+assertEquals('"___\uD3A7"', JSON.stringify('___\uD3A7'));
+assertEquals('"___\uD7FF"', JSON.stringify('___\uD7FF'));
+
+// All lone surrogates, i.e. code points from U+D800 to U+DFFF.
+assertEquals('"___\\ud800"', JSON.stringify('___\uD800'));
+assertEquals('"___\\ud801"', JSON.stringify('___\uD801'));
+assertEquals('"___\\ud802"', JSON.stringify('___\uD802'));
+assertEquals('"___\\ud803"', JSON.stringify('___\uD803'));
+assertEquals('"___\\ud804"', JSON.stringify('___\uD804'));
+assertEquals('"___\\ud805"', JSON.stringify('___\uD805'));
+assertEquals('"___\\ud806"', JSON.stringify('___\uD806'));
+assertEquals('"___\\ud807"', JSON.stringify('___\uD807'));
+assertEquals('"___\\ud808"', JSON.stringify('___\uD808'));
+assertEquals('"___\\ud809"', JSON.stringify('___\uD809'));
+assertEquals('"___\\ud80a"', JSON.stringify('___\uD80A'));
+assertEquals('"___\\ud80b"', JSON.stringify('___\uD80B'));
+assertEquals('"___\\ud80c"', JSON.stringify('___\uD80C'));
+assertEquals('"___\\ud80d"', JSON.stringify('___\uD80D'));
+assertEquals('"___\\ud80e"', JSON.stringify('___\uD80E'));
+assertEquals('"___\\ud80f"', JSON.stringify('___\uD80F'));
+assertEquals('"___\\ud810"', JSON.stringify('___\uD810'));
+assertEquals('"___\\ud811"', JSON.stringify('___\uD811'));
+assertEquals('"___\\ud812"', JSON.stringify('___\uD812'));
+assertEquals('"___\\ud813"', JSON.stringify('___\uD813'));
+assertEquals('"___\\ud814"', JSON.stringify('___\uD814'));
+assertEquals('"___\\ud815"', JSON.stringify('___\uD815'));
+assertEquals('"___\\ud816"', JSON.stringify('___\uD816'));
+assertEquals('"___\\ud817"', JSON.stringify('___\uD817'));
+assertEquals('"___\\ud818"', JSON.stringify('___\uD818'));
+assertEquals('"___\\ud819"', JSON.stringify('___\uD819'));
+assertEquals('"___\\ud81a"', JSON.stringify('___\uD81A'));
+assertEquals('"___\\ud81b"', JSON.stringify('___\uD81B'));
+assertEquals('"___\\ud81c"', JSON.stringify('___\uD81C'));
+assertEquals('"___\\ud81d"', JSON.stringify('___\uD81D'));
+assertEquals('"___\\ud81e"', JSON.stringify('___\uD81E'));
+assertEquals('"___\\ud81f"', JSON.stringify('___\uD81F'));
+assertEquals('"___\\ud820"', JSON.stringify('___\uD820'));
+assertEquals('"___\\ud821"', JSON.stringify('___\uD821'));
+assertEquals('"___\\ud822"', JSON.stringify('___\uD822'));
+assertEquals('"___\\ud823"', JSON.stringify('___\uD823'));
+assertEquals('"___\\ud824"', JSON.stringify('___\uD824'));
+assertEquals('"___\\ud825"', JSON.stringify('___\uD825'));
+assertEquals('"___\\ud826"', JSON.stringify('___\uD826'));
+assertEquals('"___\\ud827"', JSON.stringify('___\uD827'));
+assertEquals('"___\\ud828"', JSON.stringify('___\uD828'));
+assertEquals('"___\\ud829"', JSON.stringify('___\uD829'));
+assertEquals('"___\\ud82a"', JSON.stringify('___\uD82A'));
+assertEquals('"___\\ud82b"', JSON.stringify('___\uD82B'));
+assertEquals('"___\\ud82c"', JSON.stringify('___\uD82C'));
+assertEquals('"___\\ud82d"', JSON.stringify('___\uD82D'));
+assertEquals('"___\\ud82e"', JSON.stringify('___\uD82E'));
+assertEquals('"___\\ud82f"', JSON.stringify('___\uD82F'));
+assertEquals('"___\\ud830"', JSON.stringify('___\uD830'));
+assertEquals('"___\\ud831"', JSON.stringify('___\uD831'));
+assertEquals('"___\\ud832"', JSON.stringify('___\uD832'));
+assertEquals('"___\\ud833"', JSON.stringify('___\uD833'));
+assertEquals('"___\\ud834"', JSON.stringify('___\uD834'));
+assertEquals('"___\\ud835"', JSON.stringify('___\uD835'));
+assertEquals('"___\\ud836"', JSON.stringify('___\uD836'));
+assertEquals('"___\\ud837"', JSON.stringify('___\uD837'));
+assertEquals('"___\\ud838"', JSON.stringify('___\uD838'));
+assertEquals('"___\\ud839"', JSON.stringify('___\uD839'));
+assertEquals('"___\\ud83a"', JSON.stringify('___\uD83A'));
+assertEquals('"___\\ud83b"', JSON.stringify('___\uD83B'));
+assertEquals('"___\\ud83c"', JSON.stringify('___\uD83C'));
+assertEquals('"___\\ud83d"', JSON.stringify('___\uD83D'));
+assertEquals('"___\\ud83e"', JSON.stringify('___\uD83E'));
+assertEquals('"___\\ud83f"', JSON.stringify('___\uD83F'));
+assertEquals('"___\\ud840"', JSON.stringify('___\uD840'));
+assertEquals('"___\\ud841"', JSON.stringify('___\uD841'));
+assertEquals('"___\\ud842"', JSON.stringify('___\uD842'));
+assertEquals('"___\\ud843"', JSON.stringify('___\uD843'));
+assertEquals('"___\\ud844"', JSON.stringify('___\uD844'));
+assertEquals('"___\\ud845"', JSON.stringify('___\uD845'));
+assertEquals('"___\\ud846"', JSON.stringify('___\uD846'));
+assertEquals('"___\\ud847"', JSON.stringify('___\uD847'));
+assertEquals('"___\\ud848"', JSON.stringify('___\uD848'));
+assertEquals('"___\\ud849"', JSON.stringify('___\uD849'));
+assertEquals('"___\\ud84a"', JSON.stringify('___\uD84A'));
+assertEquals('"___\\ud84b"', JSON.stringify('___\uD84B'));
+assertEquals('"___\\ud84c"', JSON.stringify('___\uD84C'));
+assertEquals('"___\\ud84d"', JSON.stringify('___\uD84D'));
+assertEquals('"___\\ud84e"', JSON.stringify('___\uD84E'));
+assertEquals('"___\\ud84f"', JSON.stringify('___\uD84F'));
+assertEquals('"___\\ud850"', JSON.stringify('___\uD850'));
+assertEquals('"___\\ud851"', JSON.stringify('___\uD851'));
+assertEquals('"___\\ud852"', JSON.stringify('___\uD852'));
+assertEquals('"___\\ud853"', JSON.stringify('___\uD853'));
+assertEquals('"___\\ud854"', JSON.stringify('___\uD854'));
+assertEquals('"___\\ud855"', JSON.stringify('___\uD855'));
+assertEquals('"___\\ud856"', JSON.stringify('___\uD856'));
+assertEquals('"___\\ud857"', JSON.stringify('___\uD857'));
+assertEquals('"___\\ud858"', JSON.stringify('___\uD858'));
+assertEquals('"___\\ud859"', JSON.stringify('___\uD859'));
+assertEquals('"___\\ud85a"', JSON.stringify('___\uD85A'));
+assertEquals('"___\\ud85b"', JSON.stringify('___\uD85B'));
+assertEquals('"___\\ud85c"', JSON.stringify('___\uD85C'));
+assertEquals('"___\\ud85d"', JSON.stringify('___\uD85D'));
+assertEquals('"___\\ud85e"', JSON.stringify('___\uD85E'));
+assertEquals('"___\\ud85f"', JSON.stringify('___\uD85F'));
+assertEquals('"___\\ud860"', JSON.stringify('___\uD860'));
+assertEquals('"___\\ud861"', JSON.stringify('___\uD861'));
+assertEquals('"___\\ud862"', JSON.stringify('___\uD862'));
+assertEquals('"___\\ud863"', JSON.stringify('___\uD863'));
+assertEquals('"___\\ud864"', JSON.stringify('___\uD864'));
+assertEquals('"___\\ud865"', JSON.stringify('___\uD865'));
+assertEquals('"___\\ud866"', JSON.stringify('___\uD866'));
+assertEquals('"___\\ud867"', JSON.stringify('___\uD867'));
+assertEquals('"___\\ud868"', JSON.stringify('___\uD868'));
+assertEquals('"___\\ud869"', JSON.stringify('___\uD869'));
+assertEquals('"___\\ud86a"', JSON.stringify('___\uD86A'));
+assertEquals('"___\\ud86b"', JSON.stringify('___\uD86B'));
+assertEquals('"___\\ud86c"', JSON.stringify('___\uD86C'));
+assertEquals('"___\\ud86d"', JSON.stringify('___\uD86D'));
+assertEquals('"___\\ud86e"', JSON.stringify('___\uD86E'));
+assertEquals('"___\\ud86f"', JSON.stringify('___\uD86F'));
+assertEquals('"___\\ud870"', JSON.stringify('___\uD870'));
+assertEquals('"___\\ud871"', JSON.stringify('___\uD871'));
+assertEquals('"___\\ud872"', JSON.stringify('___\uD872'));
+assertEquals('"___\\ud873"', JSON.stringify('___\uD873'));
+assertEquals('"___\\ud874"', JSON.stringify('___\uD874'));
+assertEquals('"___\\ud875"', JSON.stringify('___\uD875'));
+assertEquals('"___\\ud876"', JSON.stringify('___\uD876'));
+assertEquals('"___\\ud877"', JSON.stringify('___\uD877'));
+assertEquals('"___\\ud878"', JSON.stringify('___\uD878'));
+assertEquals('"___\\ud879"', JSON.stringify('___\uD879'));
+assertEquals('"___\\ud87a"', JSON.stringify('___\uD87A'));
+assertEquals('"___\\ud87b"', JSON.stringify('___\uD87B'));
+assertEquals('"___\\ud87c"', JSON.stringify('___\uD87C'));
+assertEquals('"___\\ud87d"', JSON.stringify('___\uD87D'));
+assertEquals('"___\\ud87e"', JSON.stringify('___\uD87E'));
+assertEquals('"___\\ud87f"', JSON.stringify('___\uD87F'));
+assertEquals('"___\\ud880"', JSON.stringify('___\uD880'));
+assertEquals('"___\\ud881"', JSON.stringify('___\uD881'));
+assertEquals('"___\\ud882"', JSON.stringify('___\uD882'));
+assertEquals('"___\\ud883"', JSON.stringify('___\uD883'));
+assertEquals('"___\\ud884"', JSON.stringify('___\uD884'));
+assertEquals('"___\\ud885"', JSON.stringify('___\uD885'));
+assertEquals('"___\\ud886"', JSON.stringify('___\uD886'));
+assertEquals('"___\\ud887"', JSON.stringify('___\uD887'));
+assertEquals('"___\\ud888"', JSON.stringify('___\uD888'));
+assertEquals('"___\\ud889"', JSON.stringify('___\uD889'));
+assertEquals('"___\\ud88a"', JSON.stringify('___\uD88A'));
+assertEquals('"___\\ud88b"', JSON.stringify('___\uD88B'));
+assertEquals('"___\\ud88c"', JSON.stringify('___\uD88C'));
+assertEquals('"___\\ud88d"', JSON.stringify('___\uD88D'));
+assertEquals('"___\\ud88e"', JSON.stringify('___\uD88E'));
+assertEquals('"___\\ud88f"', JSON.stringify('___\uD88F'));
+assertEquals('"___\\ud890"', JSON.stringify('___\uD890'));
+assertEquals('"___\\ud891"', JSON.stringify('___\uD891'));
+assertEquals('"___\\ud892"', JSON.stringify('___\uD892'));
+assertEquals('"___\\ud893"', JSON.stringify('___\uD893'));
+assertEquals('"___\\ud894"', JSON.stringify('___\uD894'));
+assertEquals('"___\\ud895"', JSON.stringify('___\uD895'));
+assertEquals('"___\\ud896"', JSON.stringify('___\uD896'));
+assertEquals('"___\\ud897"', JSON.stringify('___\uD897'));
+assertEquals('"___\\ud898"', JSON.stringify('___\uD898'));
+assertEquals('"___\\ud899"', JSON.stringify('___\uD899'));
+assertEquals('"___\\ud89a"', JSON.stringify('___\uD89A'));
+assertEquals('"___\\ud89b"', JSON.stringify('___\uD89B'));
+assertEquals('"___\\ud89c"', JSON.stringify('___\uD89C'));
+assertEquals('"___\\ud89d"', JSON.stringify('___\uD89D'));
+assertEquals('"___\\ud89e"', JSON.stringify('___\uD89E'));
+assertEquals('"___\\ud89f"', JSON.stringify('___\uD89F'));
+assertEquals('"___\\ud8a0"', JSON.stringify('___\uD8A0'));
+assertEquals('"___\\ud8a1"', JSON.stringify('___\uD8A1'));
+assertEquals('"___\\ud8a2"', JSON.stringify('___\uD8A2'));
+assertEquals('"___\\ud8a3"', JSON.stringify('___\uD8A3'));
+assertEquals('"___\\ud8a4"', JSON.stringify('___\uD8A4'));
+assertEquals('"___\\ud8a5"', JSON.stringify('___\uD8A5'));
+assertEquals('"___\\ud8a6"', JSON.stringify('___\uD8A6'));
+assertEquals('"___\\ud8a7"', JSON.stringify('___\uD8A7'));
+assertEquals('"___\\ud8a8"', JSON.stringify('___\uD8A8'));
+assertEquals('"___\\ud8a9"', JSON.stringify('___\uD8A9'));
+assertEquals('"___\\ud8aa"', JSON.stringify('___\uD8AA'));
+assertEquals('"___\\ud8ab"', JSON.stringify('___\uD8AB'));
+assertEquals('"___\\ud8ac"', JSON.stringify('___\uD8AC'));
+assertEquals('"___\\ud8ad"', JSON.stringify('___\uD8AD'));
+assertEquals('"___\\ud8ae"', JSON.stringify('___\uD8AE'));
+assertEquals('"___\\ud8af"', JSON.stringify('___\uD8AF'));
+assertEquals('"___\\ud8b0"', JSON.stringify('___\uD8B0'));
+assertEquals('"___\\ud8b1"', JSON.stringify('___\uD8B1'));
+assertEquals('"___\\ud8b2"', JSON.stringify('___\uD8B2'));
+assertEquals('"___\\ud8b3"', JSON.stringify('___\uD8B3'));
+assertEquals('"___\\ud8b4"', JSON.stringify('___\uD8B4'));
+assertEquals('"___\\ud8b5"', JSON.stringify('___\uD8B5'));
+assertEquals('"___\\ud8b6"', JSON.stringify('___\uD8B6'));
+assertEquals('"___\\ud8b7"', JSON.stringify('___\uD8B7'));
+assertEquals('"___\\ud8b8"', JSON.stringify('___\uD8B8'));
+assertEquals('"___\\ud8b9"', JSON.stringify('___\uD8B9'));
+assertEquals('"___\\ud8ba"', JSON.stringify('___\uD8BA'));
+assertEquals('"___\\ud8bb"', JSON.stringify('___\uD8BB'));
+assertEquals('"___\\ud8bc"', JSON.stringify('___\uD8BC'));
+assertEquals('"___\\ud8bd"', JSON.stringify('___\uD8BD'));
+assertEquals('"___\\ud8be"', JSON.stringify('___\uD8BE'));
+assertEquals('"___\\ud8bf"', JSON.stringify('___\uD8BF'));
+assertEquals('"___\\ud8c0"', JSON.stringify('___\uD8C0'));
+assertEquals('"___\\ud8c1"', JSON.stringify('___\uD8C1'));
+assertEquals('"___\\ud8c2"', JSON.stringify('___\uD8C2'));
+assertEquals('"___\\ud8c3"', JSON.stringify('___\uD8C3'));
+assertEquals('"___\\ud8c4"', JSON.stringify('___\uD8C4'));
+assertEquals('"___\\ud8c5"', JSON.stringify('___\uD8C5'));
+assertEquals('"___\\ud8c6"', JSON.stringify('___\uD8C6'));
+assertEquals('"___\\ud8c7"', JSON.stringify('___\uD8C7'));
+assertEquals('"___\\ud8c8"', JSON.stringify('___\uD8C8'));
+assertEquals('"___\\ud8c9"', JSON.stringify('___\uD8C9'));
+assertEquals('"___\\ud8ca"', JSON.stringify('___\uD8CA'));
+assertEquals('"___\\ud8cb"', JSON.stringify('___\uD8CB'));
+assertEquals('"___\\ud8cc"', JSON.stringify('___\uD8CC'));
+assertEquals('"___\\ud8cd"', JSON.stringify('___\uD8CD'));
+assertEquals('"___\\ud8ce"', JSON.stringify('___\uD8CE'));
+assertEquals('"___\\ud8cf"', JSON.stringify('___\uD8CF'));
+assertEquals('"___\\ud8d0"', JSON.stringify('___\uD8D0'));
+assertEquals('"___\\ud8d1"', JSON.stringify('___\uD8D1'));
+assertEquals('"___\\ud8d2"', JSON.stringify('___\uD8D2'));
+assertEquals('"___\\ud8d3"', JSON.stringify('___\uD8D3'));
+assertEquals('"___\\ud8d4"', JSON.stringify('___\uD8D4'));
+assertEquals('"___\\ud8d5"', JSON.stringify('___\uD8D5'));
+assertEquals('"___\\ud8d6"', JSON.stringify('___\uD8D6'));
+assertEquals('"___\\ud8d7"', JSON.stringify('___\uD8D7'));
+assertEquals('"___\\ud8d8"', JSON.stringify('___\uD8D8'));
+assertEquals('"___\\ud8d9"', JSON.stringify('___\uD8D9'));
+assertEquals('"___\\ud8da"', JSON.stringify('___\uD8DA'));
+assertEquals('"___\\ud8db"', JSON.stringify('___\uD8DB'));
+assertEquals('"___\\ud8dc"', JSON.stringify('___\uD8DC'));
+assertEquals('"___\\ud8dd"', JSON.stringify('___\uD8DD'));
+assertEquals('"___\\ud8de"', JSON.stringify('___\uD8DE'));
+assertEquals('"___\\ud8df"', JSON.stringify('___\uD8DF'));
+assertEquals('"___\\ud8e0"', JSON.stringify('___\uD8E0'));
+assertEquals('"___\\ud8e1"', JSON.stringify('___\uD8E1'));
+assertEquals('"___\\ud8e2"', JSON.stringify('___\uD8E2'));
+assertEquals('"___\\ud8e3"', JSON.stringify('___\uD8E3'));
+assertEquals('"___\\ud8e4"', JSON.stringify('___\uD8E4'));
+assertEquals('"___\\ud8e5"', JSON.stringify('___\uD8E5'));
+assertEquals('"___\\ud8e6"', JSON.stringify('___\uD8E6'));
+assertEquals('"___\\ud8e7"', JSON.stringify('___\uD8E7'));
+assertEquals('"___\\ud8e8"', JSON.stringify('___\uD8E8'));
+assertEquals('"___\\ud8e9"', JSON.stringify('___\uD8E9'));
+assertEquals('"___\\ud8ea"', JSON.stringify('___\uD8EA'));
+assertEquals('"___\\ud8eb"', JSON.stringify('___\uD8EB'));
+assertEquals('"___\\ud8ec"', JSON.stringify('___\uD8EC'));
+assertEquals('"___\\ud8ed"', JSON.stringify('___\uD8ED'));
+assertEquals('"___\\ud8ee"', JSON.stringify('___\uD8EE'));
+assertEquals('"___\\ud8ef"', JSON.stringify('___\uD8EF'));
+assertEquals('"___\\ud8f0"', JSON.stringify('___\uD8F0'));
+assertEquals('"___\\ud8f1"', JSON.stringify('___\uD8F1'));
+assertEquals('"___\\ud8f2"', JSON.stringify('___\uD8F2'));
+assertEquals('"___\\ud8f3"', JSON.stringify('___\uD8F3'));
+assertEquals('"___\\ud8f4"', JSON.stringify('___\uD8F4'));
+assertEquals('"___\\ud8f5"', JSON.stringify('___\uD8F5'));
+assertEquals('"___\\ud8f6"', JSON.stringify('___\uD8F6'));
+assertEquals('"___\\ud8f7"', JSON.stringify('___\uD8F7'));
+assertEquals('"___\\ud8f8"', JSON.stringify('___\uD8F8'));
+assertEquals('"___\\ud8f9"', JSON.stringify('___\uD8F9'));
+assertEquals('"___\\ud8fa"', JSON.stringify('___\uD8FA'));
+assertEquals('"___\\ud8fb"', JSON.stringify('___\uD8FB'));
+assertEquals('"___\\ud8fc"', JSON.stringify('___\uD8FC'));
+assertEquals('"___\\ud8fd"', JSON.stringify('___\uD8FD'));
+assertEquals('"___\\ud8fe"', JSON.stringify('___\uD8FE'));
+assertEquals('"___\\ud8ff"', JSON.stringify('___\uD8FF'));
+assertEquals('"___\\ud900"', JSON.stringify('___\uD900'));
+assertEquals('"___\\ud901"', JSON.stringify('___\uD901'));
+assertEquals('"___\\ud902"', JSON.stringify('___\uD902'));
+assertEquals('"___\\ud903"', JSON.stringify('___\uD903'));
+assertEquals('"___\\ud904"', JSON.stringify('___\uD904'));
+assertEquals('"___\\ud905"', JSON.stringify('___\uD905'));
+assertEquals('"___\\ud906"', JSON.stringify('___\uD906'));
+assertEquals('"___\\ud907"', JSON.stringify('___\uD907'));
+assertEquals('"___\\ud908"', JSON.stringify('___\uD908'));
+assertEquals('"___\\ud909"', JSON.stringify('___\uD909'));
+assertEquals('"___\\ud90a"', JSON.stringify('___\uD90A'));
+assertEquals('"___\\ud90b"', JSON.stringify('___\uD90B'));
+assertEquals('"___\\ud90c"', JSON.stringify('___\uD90C'));
+assertEquals('"___\\ud90d"', JSON.stringify('___\uD90D'));
+assertEquals('"___\\ud90e"', JSON.stringify('___\uD90E'));
+assertEquals('"___\\ud90f"', JSON.stringify('___\uD90F'));
+assertEquals('"___\\ud910"', JSON.stringify('___\uD910'));
+assertEquals('"___\\ud911"', JSON.stringify('___\uD911'));
+assertEquals('"___\\ud912"', JSON.stringify('___\uD912'));
+assertEquals('"___\\ud913"', JSON.stringify('___\uD913'));
+assertEquals('"___\\ud914"', JSON.stringify('___\uD914'));
+assertEquals('"___\\ud915"', JSON.stringify('___\uD915'));
+assertEquals('"___\\ud916"', JSON.stringify('___\uD916'));
+assertEquals('"___\\ud917"', JSON.stringify('___\uD917'));
+assertEquals('"___\\ud918"', JSON.stringify('___\uD918'));
+assertEquals('"___\\ud919"', JSON.stringify('___\uD919'));
+assertEquals('"___\\ud91a"', JSON.stringify('___\uD91A'));
+assertEquals('"___\\ud91b"', JSON.stringify('___\uD91B'));
+assertEquals('"___\\ud91c"', JSON.stringify('___\uD91C'));
+assertEquals('"___\\ud91d"', JSON.stringify('___\uD91D'));
+assertEquals('"___\\ud91e"', JSON.stringify('___\uD91E'));
+assertEquals('"___\\ud91f"', JSON.stringify('___\uD91F'));
+assertEquals('"___\\ud920"', JSON.stringify('___\uD920'));
+assertEquals('"___\\ud921"', JSON.stringify('___\uD921'));
+assertEquals('"___\\ud922"', JSON.stringify('___\uD922'));
+assertEquals('"___\\ud923"', JSON.stringify('___\uD923'));
+assertEquals('"___\\ud924"', JSON.stringify('___\uD924'));
+assertEquals('"___\\ud925"', JSON.stringify('___\uD925'));
+assertEquals('"___\\ud926"', JSON.stringify('___\uD926'));
+assertEquals('"___\\ud927"', JSON.stringify('___\uD927'));
+assertEquals('"___\\ud928"', JSON.stringify('___\uD928'));
+assertEquals('"___\\ud929"', JSON.stringify('___\uD929'));
+assertEquals('"___\\ud92a"', JSON.stringify('___\uD92A'));
+assertEquals('"___\\ud92b"', JSON.stringify('___\uD92B'));
+assertEquals('"___\\ud92c"', JSON.stringify('___\uD92C'));
+assertEquals('"___\\ud92d"', JSON.stringify('___\uD92D'));
+assertEquals('"___\\ud92e"', JSON.stringify('___\uD92E'));
+assertEquals('"___\\ud92f"', JSON.stringify('___\uD92F'));
+assertEquals('"___\\ud930"', JSON.stringify('___\uD930'));
+assertEquals('"___\\ud931"', JSON.stringify('___\uD931'));
+assertEquals('"___\\ud932"', JSON.stringify('___\uD932'));
+assertEquals('"___\\ud933"', JSON.stringify('___\uD933'));
+assertEquals('"___\\ud934"', JSON.stringify('___\uD934'));
+assertEquals('"___\\ud935"', JSON.stringify('___\uD935'));
+assertEquals('"___\\ud936"', JSON.stringify('___\uD936'));
+assertEquals('"___\\ud937"', JSON.stringify('___\uD937'));
+assertEquals('"___\\ud938"', JSON.stringify('___\uD938'));
+assertEquals('"___\\ud939"', JSON.stringify('___\uD939'));
+assertEquals('"___\\ud93a"', JSON.stringify('___\uD93A'));
+assertEquals('"___\\ud93b"', JSON.stringify('___\uD93B'));
+assertEquals('"___\\ud93c"', JSON.stringify('___\uD93C'));
+assertEquals('"___\\ud93d"', JSON.stringify('___\uD93D'));
+assertEquals('"___\\ud93e"', JSON.stringify('___\uD93E'));
+assertEquals('"___\\ud93f"', JSON.stringify('___\uD93F'));
+assertEquals('"___\\ud940"', JSON.stringify('___\uD940'));
+assertEquals('"___\\ud941"', JSON.stringify('___\uD941'));
+assertEquals('"___\\ud942"', JSON.stringify('___\uD942'));
+assertEquals('"___\\ud943"', JSON.stringify('___\uD943'));
+assertEquals('"___\\ud944"', JSON.stringify('___\uD944'));
+assertEquals('"___\\ud945"', JSON.stringify('___\uD945'));
+assertEquals('"___\\ud946"', JSON.stringify('___\uD946'));
+assertEquals('"___\\ud947"', JSON.stringify('___\uD947'));
+assertEquals('"___\\ud948"', JSON.stringify('___\uD948'));
+assertEquals('"___\\ud949"', JSON.stringify('___\uD949'));
+assertEquals('"___\\ud94a"', JSON.stringify('___\uD94A'));
+assertEquals('"___\\ud94b"', JSON.stringify('___\uD94B'));
+assertEquals('"___\\ud94c"', JSON.stringify('___\uD94C'));
+assertEquals('"___\\ud94d"', JSON.stringify('___\uD94D'));
+assertEquals('"___\\ud94e"', JSON.stringify('___\uD94E'));
+assertEquals('"___\\ud94f"', JSON.stringify('___\uD94F'));
+assertEquals('"___\\ud950"', JSON.stringify('___\uD950'));
+assertEquals('"___\\ud951"', JSON.stringify('___\uD951'));
+assertEquals('"___\\ud952"', JSON.stringify('___\uD952'));
+assertEquals('"___\\ud953"', JSON.stringify('___\uD953'));
+assertEquals('"___\\ud954"', JSON.stringify('___\uD954'));
+assertEquals('"___\\ud955"', JSON.stringify('___\uD955'));
+assertEquals('"___\\ud956"', JSON.stringify('___\uD956'));
+assertEquals('"___\\ud957"', JSON.stringify('___\uD957'));
+assertEquals('"___\\ud958"', JSON.stringify('___\uD958'));
+assertEquals('"___\\ud959"', JSON.stringify('___\uD959'));
+assertEquals('"___\\ud95a"', JSON.stringify('___\uD95A'));
+assertEquals('"___\\ud95b"', JSON.stringify('___\uD95B'));
+assertEquals('"___\\ud95c"', JSON.stringify('___\uD95C'));
+assertEquals('"___\\ud95d"', JSON.stringify('___\uD95D'));
+assertEquals('"___\\ud95e"', JSON.stringify('___\uD95E'));
+assertEquals('"___\\ud95f"', JSON.stringify('___\uD95F'));
+assertEquals('"___\\ud960"', JSON.stringify('___\uD960'));
+assertEquals('"___\\ud961"', JSON.stringify('___\uD961'));
+assertEquals('"___\\ud962"', JSON.stringify('___\uD962'));
+assertEquals('"___\\ud963"', JSON.stringify('___\uD963'));
+assertEquals('"___\\ud964"', JSON.stringify('___\uD964'));
+assertEquals('"___\\ud965"', JSON.stringify('___\uD965'));
+assertEquals('"___\\ud966"', JSON.stringify('___\uD966'));
+assertEquals('"___\\ud967"', JSON.stringify('___\uD967'));
+assertEquals('"___\\ud968"', JSON.stringify('___\uD968'));
+assertEquals('"___\\ud969"', JSON.stringify('___\uD969'));
+assertEquals('"___\\ud96a"', JSON.stringify('___\uD96A'));
+assertEquals('"___\\ud96b"', JSON.stringify('___\uD96B'));
+assertEquals('"___\\ud96c"', JSON.stringify('___\uD96C'));
+assertEquals('"___\\ud96d"', JSON.stringify('___\uD96D'));
+assertEquals('"___\\ud96e"', JSON.stringify('___\uD96E'));
+assertEquals('"___\\ud96f"', JSON.stringify('___\uD96F'));
+assertEquals('"___\\ud970"', JSON.stringify('___\uD970'));
+assertEquals('"___\\ud971"', JSON.stringify('___\uD971'));
+assertEquals('"___\\ud972"', JSON.stringify('___\uD972'));
+assertEquals('"___\\ud973"', JSON.stringify('___\uD973'));
+assertEquals('"___\\ud974"', JSON.stringify('___\uD974'));
+assertEquals('"___\\ud975"', JSON.stringify('___\uD975'));
+assertEquals('"___\\ud976"', JSON.stringify('___\uD976'));
+assertEquals('"___\\ud977"', JSON.stringify('___\uD977'));
+assertEquals('"___\\ud978"', JSON.stringify('___\uD978'));
+assertEquals('"___\\ud979"', JSON.stringify('___\uD979'));
+assertEquals('"___\\ud97a"', JSON.stringify('___\uD97A'));
+assertEquals('"___\\ud97b"', JSON.stringify('___\uD97B'));
+assertEquals('"___\\ud97c"', JSON.stringify('___\uD97C'));
+assertEquals('"___\\ud97d"', JSON.stringify('___\uD97D'));
+assertEquals('"___\\ud97e"', JSON.stringify('___\uD97E'));
+assertEquals('"___\\ud97f"', JSON.stringify('___\uD97F'));
+assertEquals('"___\\ud980"', JSON.stringify('___\uD980'));
+assertEquals('"___\\ud981"', JSON.stringify('___\uD981'));
+assertEquals('"___\\ud982"', JSON.stringify('___\uD982'));
+assertEquals('"___\\ud983"', JSON.stringify('___\uD983'));
+assertEquals('"___\\ud984"', JSON.stringify('___\uD984'));
+assertEquals('"___\\ud985"', JSON.stringify('___\uD985'));
+assertEquals('"___\\ud986"', JSON.stringify('___\uD986'));
+assertEquals('"___\\ud987"', JSON.stringify('___\uD987'));
+assertEquals('"___\\ud988"', JSON.stringify('___\uD988'));
+assertEquals('"___\\ud989"', JSON.stringify('___\uD989'));
+assertEquals('"___\\ud98a"', JSON.stringify('___\uD98A'));
+assertEquals('"___\\ud98b"', JSON.stringify('___\uD98B'));
+assertEquals('"___\\ud98c"', JSON.stringify('___\uD98C'));
+assertEquals('"___\\ud98d"', JSON.stringify('___\uD98D'));
+assertEquals('"___\\ud98e"', JSON.stringify('___\uD98E'));
+assertEquals('"___\\ud98f"', JSON.stringify('___\uD98F'));
+assertEquals('"___\\ud990"', JSON.stringify('___\uD990'));
+assertEquals('"___\\ud991"', JSON.stringify('___\uD991'));
+assertEquals('"___\\ud992"', JSON.stringify('___\uD992'));
+assertEquals('"___\\ud993"', JSON.stringify('___\uD993'));
+assertEquals('"___\\ud994"', JSON.stringify('___\uD994'));
+assertEquals('"___\\ud995"', JSON.stringify('___\uD995'));
+assertEquals('"___\\ud996"', JSON.stringify('___\uD996'));
+assertEquals('"___\\ud997"', JSON.stringify('___\uD997'));
+assertEquals('"___\\ud998"', JSON.stringify('___\uD998'));
+assertEquals('"___\\ud999"', JSON.stringify('___\uD999'));
+assertEquals('"___\\ud99a"', JSON.stringify('___\uD99A'));
+assertEquals('"___\\ud99b"', JSON.stringify('___\uD99B'));
+assertEquals('"___\\ud99c"', JSON.stringify('___\uD99C'));
+assertEquals('"___\\ud99d"', JSON.stringify('___\uD99D'));
+assertEquals('"___\\ud99e"', JSON.stringify('___\uD99E'));
+assertEquals('"___\\ud99f"', JSON.stringify('___\uD99F'));
+assertEquals('"___\\ud9a0"', JSON.stringify('___\uD9A0'));
+assertEquals('"___\\ud9a1"', JSON.stringify('___\uD9A1'));
+assertEquals('"___\\ud9a2"', JSON.stringify('___\uD9A2'));
+assertEquals('"___\\ud9a3"', JSON.stringify('___\uD9A3'));
+assertEquals('"___\\ud9a4"', JSON.stringify('___\uD9A4'));
+assertEquals('"___\\ud9a5"', JSON.stringify('___\uD9A5'));
+assertEquals('"___\\ud9a6"', JSON.stringify('___\uD9A6'));
+assertEquals('"___\\ud9a7"', JSON.stringify('___\uD9A7'));
+assertEquals('"___\\ud9a8"', JSON.stringify('___\uD9A8'));
+assertEquals('"___\\ud9a9"', JSON.stringify('___\uD9A9'));
+assertEquals('"___\\ud9aa"', JSON.stringify('___\uD9AA'));
+assertEquals('"___\\ud9ab"', JSON.stringify('___\uD9AB'));
+assertEquals('"___\\ud9ac"', JSON.stringify('___\uD9AC'));
+assertEquals('"___\\ud9ad"', JSON.stringify('___\uD9AD'));
+assertEquals('"___\\ud9ae"', JSON.stringify('___\uD9AE'));
+assertEquals('"___\\ud9af"', JSON.stringify('___\uD9AF'));
+assertEquals('"___\\ud9b0"', JSON.stringify('___\uD9B0'));
+assertEquals('"___\\ud9b1"', JSON.stringify('___\uD9B1'));
+assertEquals('"___\\ud9b2"', JSON.stringify('___\uD9B2'));
+assertEquals('"___\\ud9b3"', JSON.stringify('___\uD9B3'));
+assertEquals('"___\\ud9b4"', JSON.stringify('___\uD9B4'));
+assertEquals('"___\\ud9b5"', JSON.stringify('___\uD9B5'));
+assertEquals('"___\\ud9b6"', JSON.stringify('___\uD9B6'));
+assertEquals('"___\\ud9b7"', JSON.stringify('___\uD9B7'));
+assertEquals('"___\\ud9b8"', JSON.stringify('___\uD9B8'));
+assertEquals('"___\\ud9b9"', JSON.stringify('___\uD9B9'));
+assertEquals('"___\\ud9ba"', JSON.stringify('___\uD9BA'));
+assertEquals('"___\\ud9bb"', JSON.stringify('___\uD9BB'));
+assertEquals('"___\\ud9bc"', JSON.stringify('___\uD9BC'));
+assertEquals('"___\\ud9bd"', JSON.stringify('___\uD9BD'));
+assertEquals('"___\\ud9be"', JSON.stringify('___\uD9BE'));
+assertEquals('"___\\ud9bf"', JSON.stringify('___\uD9BF'));
+assertEquals('"___\\ud9c0"', JSON.stringify('___\uD9C0'));
+assertEquals('"___\\ud9c1"', JSON.stringify('___\uD9C1'));
+assertEquals('"___\\ud9c2"', JSON.stringify('___\uD9C2'));
+assertEquals('"___\\ud9c3"', JSON.stringify('___\uD9C3'));
+assertEquals('"___\\ud9c4"', JSON.stringify('___\uD9C4'));
+assertEquals('"___\\ud9c5"', JSON.stringify('___\uD9C5'));
+assertEquals('"___\\ud9c6"', JSON.stringify('___\uD9C6'));
+assertEquals('"___\\ud9c7"', JSON.stringify('___\uD9C7'));
+assertEquals('"___\\ud9c8"', JSON.stringify('___\uD9C8'));
+assertEquals('"___\\ud9c9"', JSON.stringify('___\uD9C9'));
+assertEquals('"___\\ud9ca"', JSON.stringify('___\uD9CA'));
+assertEquals('"___\\ud9cb"', JSON.stringify('___\uD9CB'));
+assertEquals('"___\\ud9cc"', JSON.stringify('___\uD9CC'));
+assertEquals('"___\\ud9cd"', JSON.stringify('___\uD9CD'));
+assertEquals('"___\\ud9ce"', JSON.stringify('___\uD9CE'));
+assertEquals('"___\\ud9cf"', JSON.stringify('___\uD9CF'));
+assertEquals('"___\\ud9d0"', JSON.stringify('___\uD9D0'));
+assertEquals('"___\\ud9d1"', JSON.stringify('___\uD9D1'));
+assertEquals('"___\\ud9d2"', JSON.stringify('___\uD9D2'));
+assertEquals('"___\\ud9d3"', JSON.stringify('___\uD9D3'));
+assertEquals('"___\\ud9d4"', JSON.stringify('___\uD9D4'));
+assertEquals('"___\\ud9d5"', JSON.stringify('___\uD9D5'));
+assertEquals('"___\\ud9d6"', JSON.stringify('___\uD9D6'));
+assertEquals('"___\\ud9d7"', JSON.stringify('___\uD9D7'));
+assertEquals('"___\\ud9d8"', JSON.stringify('___\uD9D8'));
+assertEquals('"___\\ud9d9"', JSON.stringify('___\uD9D9'));
+assertEquals('"___\\ud9da"', JSON.stringify('___\uD9DA'));
+assertEquals('"___\\ud9db"', JSON.stringify('___\uD9DB'));
+assertEquals('"___\\ud9dc"', JSON.stringify('___\uD9DC'));
+assertEquals('"___\\ud9dd"', JSON.stringify('___\uD9DD'));
+assertEquals('"___\\ud9de"', JSON.stringify('___\uD9DE'));
+assertEquals('"___\\ud9df"', JSON.stringify('___\uD9DF'));
+assertEquals('"___\\ud9e0"', JSON.stringify('___\uD9E0'));
+assertEquals('"___\\ud9e1"', JSON.stringify('___\uD9E1'));
+assertEquals('"___\\ud9e2"', JSON.stringify('___\uD9E2'));
+assertEquals('"___\\ud9e3"', JSON.stringify('___\uD9E3'));
+assertEquals('"___\\ud9e4"', JSON.stringify('___\uD9E4'));
+assertEquals('"___\\ud9e5"', JSON.stringify('___\uD9E5'));
+assertEquals('"___\\ud9e6"', JSON.stringify('___\uD9E6'));
+assertEquals('"___\\ud9e7"', JSON.stringify('___\uD9E7'));
+assertEquals('"___\\ud9e8"', JSON.stringify('___\uD9E8'));
+assertEquals('"___\\ud9e9"', JSON.stringify('___\uD9E9'));
+assertEquals('"___\\ud9ea"', JSON.stringify('___\uD9EA'));
+assertEquals('"___\\ud9eb"', JSON.stringify('___\uD9EB'));
+assertEquals('"___\\ud9ec"', JSON.stringify('___\uD9EC'));
+assertEquals('"___\\ud9ed"', JSON.stringify('___\uD9ED'));
+assertEquals('"___\\ud9ee"', JSON.stringify('___\uD9EE'));
+assertEquals('"___\\ud9ef"', JSON.stringify('___\uD9EF'));
+assertEquals('"___\\ud9f0"', JSON.stringify('___\uD9F0'));
+assertEquals('"___\\ud9f1"', JSON.stringify('___\uD9F1'));
+assertEquals('"___\\ud9f2"', JSON.stringify('___\uD9F2'));
+assertEquals('"___\\ud9f3"', JSON.stringify('___\uD9F3'));
+assertEquals('"___\\ud9f4"', JSON.stringify('___\uD9F4'));
+assertEquals('"___\\ud9f5"', JSON.stringify('___\uD9F5'));
+assertEquals('"___\\ud9f6"', JSON.stringify('___\uD9F6'));
+assertEquals('"___\\ud9f7"', JSON.stringify('___\uD9F7'));
+assertEquals('"___\\ud9f8"', JSON.stringify('___\uD9F8'));
+assertEquals('"___\\ud9f9"', JSON.stringify('___\uD9F9'));
+assertEquals('"___\\ud9fa"', JSON.stringify('___\uD9FA'));
+assertEquals('"___\\ud9fb"', JSON.stringify('___\uD9FB'));
+assertEquals('"___\\ud9fc"', JSON.stringify('___\uD9FC'));
+assertEquals('"___\\ud9fd"', JSON.stringify('___\uD9FD'));
+assertEquals('"___\\ud9fe"', JSON.stringify('___\uD9FE'));
+assertEquals('"___\\ud9ff"', JSON.stringify('___\uD9FF'));
+assertEquals('"___\\uda00"', JSON.stringify('___\uDA00'));
+assertEquals('"___\\uda01"', JSON.stringify('___\uDA01'));
+assertEquals('"___\\uda02"', JSON.stringify('___\uDA02'));
+assertEquals('"___\\uda03"', JSON.stringify('___\uDA03'));
+assertEquals('"___\\uda04"', JSON.stringify('___\uDA04'));
+assertEquals('"___\\uda05"', JSON.stringify('___\uDA05'));
+assertEquals('"___\\uda06"', JSON.stringify('___\uDA06'));
+assertEquals('"___\\uda07"', JSON.stringify('___\uDA07'));
+assertEquals('"___\\uda08"', JSON.stringify('___\uDA08'));
+assertEquals('"___\\uda09"', JSON.stringify('___\uDA09'));
+assertEquals('"___\\uda0a"', JSON.stringify('___\uDA0A'));
+assertEquals('"___\\uda0b"', JSON.stringify('___\uDA0B'));
+assertEquals('"___\\uda0c"', JSON.stringify('___\uDA0C'));
+assertEquals('"___\\uda0d"', JSON.stringify('___\uDA0D'));
+assertEquals('"___\\uda0e"', JSON.stringify('___\uDA0E'));
+assertEquals('"___\\uda0f"', JSON.stringify('___\uDA0F'));
+assertEquals('"___\\uda10"', JSON.stringify('___\uDA10'));
+assertEquals('"___\\uda11"', JSON.stringify('___\uDA11'));
+assertEquals('"___\\uda12"', JSON.stringify('___\uDA12'));
+assertEquals('"___\\uda13"', JSON.stringify('___\uDA13'));
+assertEquals('"___\\uda14"', JSON.stringify('___\uDA14'));
+assertEquals('"___\\uda15"', JSON.stringify('___\uDA15'));
+assertEquals('"___\\uda16"', JSON.stringify('___\uDA16'));
+assertEquals('"___\\uda17"', JSON.stringify('___\uDA17'));
+assertEquals('"___\\uda18"', JSON.stringify('___\uDA18'));
+assertEquals('"___\\uda19"', JSON.stringify('___\uDA19'));
+assertEquals('"___\\uda1a"', JSON.stringify('___\uDA1A'));
+assertEquals('"___\\uda1b"', JSON.stringify('___\uDA1B'));
+assertEquals('"___\\uda1c"', JSON.stringify('___\uDA1C'));
+assertEquals('"___\\uda1d"', JSON.stringify('___\uDA1D'));
+assertEquals('"___\\uda1e"', JSON.stringify('___\uDA1E'));
+assertEquals('"___\\uda1f"', JSON.stringify('___\uDA1F'));
+assertEquals('"___\\uda20"', JSON.stringify('___\uDA20'));
+assertEquals('"___\\uda21"', JSON.stringify('___\uDA21'));
+assertEquals('"___\\uda22"', JSON.stringify('___\uDA22'));
+assertEquals('"___\\uda23"', JSON.stringify('___\uDA23'));
+assertEquals('"___\\uda24"', JSON.stringify('___\uDA24'));
+assertEquals('"___\\uda25"', JSON.stringify('___\uDA25'));
+assertEquals('"___\\uda26"', JSON.stringify('___\uDA26'));
+assertEquals('"___\\uda27"', JSON.stringify('___\uDA27'));
+assertEquals('"___\\uda28"', JSON.stringify('___\uDA28'));
+assertEquals('"___\\uda29"', JSON.stringify('___\uDA29'));
+assertEquals('"___\\uda2a"', JSON.stringify('___\uDA2A'));
+assertEquals('"___\\uda2b"', JSON.stringify('___\uDA2B'));
+assertEquals('"___\\uda2c"', JSON.stringify('___\uDA2C'));
+assertEquals('"___\\uda2d"', JSON.stringify('___\uDA2D'));
+assertEquals('"___\\uda2e"', JSON.stringify('___\uDA2E'));
+assertEquals('"___\\uda2f"', JSON.stringify('___\uDA2F'));
+assertEquals('"___\\uda30"', JSON.stringify('___\uDA30'));
+assertEquals('"___\\uda31"', JSON.stringify('___\uDA31'));
+assertEquals('"___\\uda32"', JSON.stringify('___\uDA32'));
+assertEquals('"___\\uda33"', JSON.stringify('___\uDA33'));
+assertEquals('"___\\uda34"', JSON.stringify('___\uDA34'));
+assertEquals('"___\\uda35"', JSON.stringify('___\uDA35'));
+assertEquals('"___\\uda36"', JSON.stringify('___\uDA36'));
+assertEquals('"___\\uda37"', JSON.stringify('___\uDA37'));
+assertEquals('"___\\uda38"', JSON.stringify('___\uDA38'));
+assertEquals('"___\\uda39"', JSON.stringify('___\uDA39'));
+assertEquals('"___\\uda3a"', JSON.stringify('___\uDA3A'));
+assertEquals('"___\\uda3b"', JSON.stringify('___\uDA3B'));
+assertEquals('"___\\uda3c"', JSON.stringify('___\uDA3C'));
+assertEquals('"___\\uda3d"', JSON.stringify('___\uDA3D'));
+assertEquals('"___\\uda3e"', JSON.stringify('___\uDA3E'));
+assertEquals('"___\\uda3f"', JSON.stringify('___\uDA3F'));
+assertEquals('"___\\uda40"', JSON.stringify('___\uDA40'));
+assertEquals('"___\\uda41"', JSON.stringify('___\uDA41'));
+assertEquals('"___\\uda42"', JSON.stringify('___\uDA42'));
+assertEquals('"___\\uda43"', JSON.stringify('___\uDA43'));
+assertEquals('"___\\uda44"', JSON.stringify('___\uDA44'));
+assertEquals('"___\\uda45"', JSON.stringify('___\uDA45'));
+assertEquals('"___\\uda46"', JSON.stringify('___\uDA46'));
+assertEquals('"___\\uda47"', JSON.stringify('___\uDA47'));
+assertEquals('"___\\uda48"', JSON.stringify('___\uDA48'));
+assertEquals('"___\\uda49"', JSON.stringify('___\uDA49'));
+assertEquals('"___\\uda4a"', JSON.stringify('___\uDA4A'));
+assertEquals('"___\\uda4b"', JSON.stringify('___\uDA4B'));
+assertEquals('"___\\uda4c"', JSON.stringify('___\uDA4C'));
+assertEquals('"___\\uda4d"', JSON.stringify('___\uDA4D'));
+assertEquals('"___\\uda4e"', JSON.stringify('___\uDA4E'));
+assertEquals('"___\\uda4f"', JSON.stringify('___\uDA4F'));
+assertEquals('"___\\uda50"', JSON.stringify('___\uDA50'));
+assertEquals('"___\\uda51"', JSON.stringify('___\uDA51'));
+assertEquals('"___\\uda52"', JSON.stringify('___\uDA52'));
+assertEquals('"___\\uda53"', JSON.stringify('___\uDA53'));
+assertEquals('"___\\uda54"', JSON.stringify('___\uDA54'));
+assertEquals('"___\\uda55"', JSON.stringify('___\uDA55'));
+assertEquals('"___\\uda56"', JSON.stringify('___\uDA56'));
+assertEquals('"___\\uda57"', JSON.stringify('___\uDA57'));
+assertEquals('"___\\uda58"', JSON.stringify('___\uDA58'));
+assertEquals('"___\\uda59"', JSON.stringify('___\uDA59'));
+assertEquals('"___\\uda5a"', JSON.stringify('___\uDA5A'));
+assertEquals('"___\\uda5b"', JSON.stringify('___\uDA5B'));
+assertEquals('"___\\uda5c"', JSON.stringify('___\uDA5C'));
+assertEquals('"___\\uda5d"', JSON.stringify('___\uDA5D'));
+assertEquals('"___\\uda5e"', JSON.stringify('___\uDA5E'));
+assertEquals('"___\\uda5f"', JSON.stringify('___\uDA5F'));
+assertEquals('"___\\uda60"', JSON.stringify('___\uDA60'));
+assertEquals('"___\\uda61"', JSON.stringify('___\uDA61'));
+assertEquals('"___\\uda62"', JSON.stringify('___\uDA62'));
+assertEquals('"___\\uda63"', JSON.stringify('___\uDA63'));
+assertEquals('"___\\uda64"', JSON.stringify('___\uDA64'));
+assertEquals('"___\\uda65"', JSON.stringify('___\uDA65'));
+assertEquals('"___\\uda66"', JSON.stringify('___\uDA66'));
+assertEquals('"___\\uda67"', JSON.stringify('___\uDA67'));
+assertEquals('"___\\uda68"', JSON.stringify('___\uDA68'));
+assertEquals('"___\\uda69"', JSON.stringify('___\uDA69'));
+assertEquals('"___\\uda6a"', JSON.stringify('___\uDA6A'));
+assertEquals('"___\\uda6b"', JSON.stringify('___\uDA6B'));
+assertEquals('"___\\uda6c"', JSON.stringify('___\uDA6C'));
+assertEquals('"___\\uda6d"', JSON.stringify('___\uDA6D'));
+assertEquals('"___\\uda6e"', JSON.stringify('___\uDA6E'));
+assertEquals('"___\\uda6f"', JSON.stringify('___\uDA6F'));
+assertEquals('"___\\uda70"', JSON.stringify('___\uDA70'));
+assertEquals('"___\\uda71"', JSON.stringify('___\uDA71'));
+assertEquals('"___\\uda72"', JSON.stringify('___\uDA72'));
+assertEquals('"___\\uda73"', JSON.stringify('___\uDA73'));
+assertEquals('"___\\uda74"', JSON.stringify('___\uDA74'));
+assertEquals('"___\\uda75"', JSON.stringify('___\uDA75'));
+assertEquals('"___\\uda76"', JSON.stringify('___\uDA76'));
+assertEquals('"___\\uda77"', JSON.stringify('___\uDA77'));
+assertEquals('"___\\uda78"', JSON.stringify('___\uDA78'));
+assertEquals('"___\\uda79"', JSON.stringify('___\uDA79'));
+assertEquals('"___\\uda7a"', JSON.stringify('___\uDA7A'));
+assertEquals('"___\\uda7b"', JSON.stringify('___\uDA7B'));
+assertEquals('"___\\uda7c"', JSON.stringify('___\uDA7C'));
+assertEquals('"___\\uda7d"', JSON.stringify('___\uDA7D'));
+assertEquals('"___\\uda7e"', JSON.stringify('___\uDA7E'));
+assertEquals('"___\\uda7f"', JSON.stringify('___\uDA7F'));
+assertEquals('"___\\uda80"', JSON.stringify('___\uDA80'));
+assertEquals('"___\\uda81"', JSON.stringify('___\uDA81'));
+assertEquals('"___\\uda82"', JSON.stringify('___\uDA82'));
+assertEquals('"___\\uda83"', JSON.stringify('___\uDA83'));
+assertEquals('"___\\uda84"', JSON.stringify('___\uDA84'));
+assertEquals('"___\\uda85"', JSON.stringify('___\uDA85'));
+assertEquals('"___\\uda86"', JSON.stringify('___\uDA86'));
+assertEquals('"___\\uda87"', JSON.stringify('___\uDA87'));
+assertEquals('"___\\uda88"', JSON.stringify('___\uDA88'));
+assertEquals('"___\\uda89"', JSON.stringify('___\uDA89'));
+assertEquals('"___\\uda8a"', JSON.stringify('___\uDA8A'));
+assertEquals('"___\\uda8b"', JSON.stringify('___\uDA8B'));
+assertEquals('"___\\uda8c"', JSON.stringify('___\uDA8C'));
+assertEquals('"___\\uda8d"', JSON.stringify('___\uDA8D'));
+assertEquals('"___\\uda8e"', JSON.stringify('___\uDA8E'));
+assertEquals('"___\\uda8f"', JSON.stringify('___\uDA8F'));
+assertEquals('"___\\uda90"', JSON.stringify('___\uDA90'));
+assertEquals('"___\\uda91"', JSON.stringify('___\uDA91'));
+assertEquals('"___\\uda92"', JSON.stringify('___\uDA92'));
+assertEquals('"___\\uda93"', JSON.stringify('___\uDA93'));
+assertEquals('"___\\uda94"', JSON.stringify('___\uDA94'));
+assertEquals('"___\\uda95"', JSON.stringify('___\uDA95'));
+assertEquals('"___\\uda96"', JSON.stringify('___\uDA96'));
+assertEquals('"___\\uda97"', JSON.stringify('___\uDA97'));
+assertEquals('"___\\uda98"', JSON.stringify('___\uDA98'));
+assertEquals('"___\\uda99"', JSON.stringify('___\uDA99'));
+assertEquals('"___\\uda9a"', JSON.stringify('___\uDA9A'));
+assertEquals('"___\\uda9b"', JSON.stringify('___\uDA9B'));
+assertEquals('"___\\uda9c"', JSON.stringify('___\uDA9C'));
+assertEquals('"___\\uda9d"', JSON.stringify('___\uDA9D'));
+assertEquals('"___\\uda9e"', JSON.stringify('___\uDA9E'));
+assertEquals('"___\\uda9f"', JSON.stringify('___\uDA9F'));
+assertEquals('"___\\udaa0"', JSON.stringify('___\uDAA0'));
+assertEquals('"___\\udaa1"', JSON.stringify('___\uDAA1'));
+assertEquals('"___\\udaa2"', JSON.stringify('___\uDAA2'));
+assertEquals('"___\\udaa3"', JSON.stringify('___\uDAA3'));
+assertEquals('"___\\udaa4"', JSON.stringify('___\uDAA4'));
+assertEquals('"___\\udaa5"', JSON.stringify('___\uDAA5'));
+assertEquals('"___\\udaa6"', JSON.stringify('___\uDAA6'));
+assertEquals('"___\\udaa7"', JSON.stringify('___\uDAA7'));
+assertEquals('"___\\udaa8"', JSON.stringify('___\uDAA8'));
+assertEquals('"___\\udaa9"', JSON.stringify('___\uDAA9'));
+assertEquals('"___\\udaaa"', JSON.stringify('___\uDAAA'));
+assertEquals('"___\\udaab"', JSON.stringify('___\uDAAB'));
+assertEquals('"___\\udaac"', JSON.stringify('___\uDAAC'));
+assertEquals('"___\\udaad"', JSON.stringify('___\uDAAD'));
+assertEquals('"___\\udaae"', JSON.stringify('___\uDAAE'));
+assertEquals('"___\\udaaf"', JSON.stringify('___\uDAAF'));
+assertEquals('"___\\udab0"', JSON.stringify('___\uDAB0'));
+assertEquals('"___\\udab1"', JSON.stringify('___\uDAB1'));
+assertEquals('"___\\udab2"', JSON.stringify('___\uDAB2'));
+assertEquals('"___\\udab3"', JSON.stringify('___\uDAB3'));
+assertEquals('"___\\udab4"', JSON.stringify('___\uDAB4'));
+assertEquals('"___\\udab5"', JSON.stringify('___\uDAB5'));
+assertEquals('"___\\udab6"', JSON.stringify('___\uDAB6'));
+assertEquals('"___\\udab7"', JSON.stringify('___\uDAB7'));
+assertEquals('"___\\udab8"', JSON.stringify('___\uDAB8'));
+assertEquals('"___\\udab9"', JSON.stringify('___\uDAB9'));
+assertEquals('"___\\udaba"', JSON.stringify('___\uDABA'));
+assertEquals('"___\\udabb"', JSON.stringify('___\uDABB'));
+assertEquals('"___\\udabc"', JSON.stringify('___\uDABC'));
+assertEquals('"___\\udabd"', JSON.stringify('___\uDABD'));
+assertEquals('"___\\udabe"', JSON.stringify('___\uDABE'));
+assertEquals('"___\\udabf"', JSON.stringify('___\uDABF'));
+assertEquals('"___\\udac0"', JSON.stringify('___\uDAC0'));
+assertEquals('"___\\udac1"', JSON.stringify('___\uDAC1'));
+assertEquals('"___\\udac2"', JSON.stringify('___\uDAC2'));
+assertEquals('"___\\udac3"', JSON.stringify('___\uDAC3'));
+assertEquals('"___\\udac4"', JSON.stringify('___\uDAC4'));
+assertEquals('"___\\udac5"', JSON.stringify('___\uDAC5'));
+assertEquals('"___\\udac6"', JSON.stringify('___\uDAC6'));
+assertEquals('"___\\udac7"', JSON.stringify('___\uDAC7'));
+assertEquals('"___\\udac8"', JSON.stringify('___\uDAC8'));
+assertEquals('"___\\udac9"', JSON.stringify('___\uDAC9'));
+assertEquals('"___\\udaca"', JSON.stringify('___\uDACA'));
+assertEquals('"___\\udacb"', JSON.stringify('___\uDACB'));
+assertEquals('"___\\udacc"', JSON.stringify('___\uDACC'));
+assertEquals('"___\\udacd"', JSON.stringify('___\uDACD'));
+assertEquals('"___\\udace"', JSON.stringify('___\uDACE'));
+assertEquals('"___\\udacf"', JSON.stringify('___\uDACF'));
+assertEquals('"___\\udad0"', JSON.stringify('___\uDAD0'));
+assertEquals('"___\\udad1"', JSON.stringify('___\uDAD1'));
+assertEquals('"___\\udad2"', JSON.stringify('___\uDAD2'));
+assertEquals('"___\\udad3"', JSON.stringify('___\uDAD3'));
+assertEquals('"___\\udad4"', JSON.stringify('___\uDAD4'));
+assertEquals('"___\\udad5"', JSON.stringify('___\uDAD5'));
+assertEquals('"___\\udad6"', JSON.stringify('___\uDAD6'));
+assertEquals('"___\\udad7"', JSON.stringify('___\uDAD7'));
+assertEquals('"___\\udad8"', JSON.stringify('___\uDAD8'));
+assertEquals('"___\\udad9"', JSON.stringify('___\uDAD9'));
+assertEquals('"___\\udada"', JSON.stringify('___\uDADA'));
+assertEquals('"___\\udadb"', JSON.stringify('___\uDADB'));
+assertEquals('"___\\udadc"', JSON.stringify('___\uDADC'));
+assertEquals('"___\\udadd"', JSON.stringify('___\uDADD'));
+assertEquals('"___\\udade"', JSON.stringify('___\uDADE'));
+assertEquals('"___\\udadf"', JSON.stringify('___\uDADF'));
+assertEquals('"___\\udae0"', JSON.stringify('___\uDAE0'));
+assertEquals('"___\\udae1"', JSON.stringify('___\uDAE1'));
+assertEquals('"___\\udae2"', JSON.stringify('___\uDAE2'));
+assertEquals('"___\\udae3"', JSON.stringify('___\uDAE3'));
+assertEquals('"___\\udae4"', JSON.stringify('___\uDAE4'));
+assertEquals('"___\\udae5"', JSON.stringify('___\uDAE5'));
+assertEquals('"___\\udae6"', JSON.stringify('___\uDAE6'));
+assertEquals('"___\\udae7"', JSON.stringify('___\uDAE7'));
+assertEquals('"___\\udae8"', JSON.stringify('___\uDAE8'));
+assertEquals('"___\\udae9"', JSON.stringify('___\uDAE9'));
+assertEquals('"___\\udaea"', JSON.stringify('___\uDAEA'));
+assertEquals('"___\\udaeb"', JSON.stringify('___\uDAEB'));
+assertEquals('"___\\udaec"', JSON.stringify('___\uDAEC'));
+assertEquals('"___\\udaed"', JSON.stringify('___\uDAED'));
+assertEquals('"___\\udaee"', JSON.stringify('___\uDAEE'));
+assertEquals('"___\\udaef"', JSON.stringify('___\uDAEF'));
+assertEquals('"___\\udaf0"', JSON.stringify('___\uDAF0'));
+assertEquals('"___\\udaf1"', JSON.stringify('___\uDAF1'));
+assertEquals('"___\\udaf2"', JSON.stringify('___\uDAF2'));
+assertEquals('"___\\udaf3"', JSON.stringify('___\uDAF3'));
+assertEquals('"___\\udaf4"', JSON.stringify('___\uDAF4'));
+assertEquals('"___\\udaf5"', JSON.stringify('___\uDAF5'));
+assertEquals('"___\\udaf6"', JSON.stringify('___\uDAF6'));
+assertEquals('"___\\udaf7"', JSON.stringify('___\uDAF7'));
+assertEquals('"___\\udaf8"', JSON.stringify('___\uDAF8'));
+assertEquals('"___\\udaf9"', JSON.stringify('___\uDAF9'));
+assertEquals('"___\\udafa"', JSON.stringify('___\uDAFA'));
+assertEquals('"___\\udafb"', JSON.stringify('___\uDAFB'));
+assertEquals('"___\\udafc"', JSON.stringify('___\uDAFC'));
+assertEquals('"___\\udafd"', JSON.stringify('___\uDAFD'));
+assertEquals('"___\\udafe"', JSON.stringify('___\uDAFE'));
+assertEquals('"___\\udaff"', JSON.stringify('___\uDAFF'));
+assertEquals('"___\\udb00"', JSON.stringify('___\uDB00'));
+assertEquals('"___\\udb01"', JSON.stringify('___\uDB01'));
+assertEquals('"___\\udb02"', JSON.stringify('___\uDB02'));
+assertEquals('"___\\udb03"', JSON.stringify('___\uDB03'));
+assertEquals('"___\\udb04"', JSON.stringify('___\uDB04'));
+assertEquals('"___\\udb05"', JSON.stringify('___\uDB05'));
+assertEquals('"___\\udb06"', JSON.stringify('___\uDB06'));
+assertEquals('"___\\udb07"', JSON.stringify('___\uDB07'));
+assertEquals('"___\\udb08"', JSON.stringify('___\uDB08'));
+assertEquals('"___\\udb09"', JSON.stringify('___\uDB09'));
+assertEquals('"___\\udb0a"', JSON.stringify('___\uDB0A'));
+assertEquals('"___\\udb0b"', JSON.stringify('___\uDB0B'));
+assertEquals('"___\\udb0c"', JSON.stringify('___\uDB0C'));
+assertEquals('"___\\udb0d"', JSON.stringify('___\uDB0D'));
+assertEquals('"___\\udb0e"', JSON.stringify('___\uDB0E'));
+assertEquals('"___\\udb0f"', JSON.stringify('___\uDB0F'));
+assertEquals('"___\\udb10"', JSON.stringify('___\uDB10'));
+assertEquals('"___\\udb11"', JSON.stringify('___\uDB11'));
+assertEquals('"___\\udb12"', JSON.stringify('___\uDB12'));
+assertEquals('"___\\udb13"', JSON.stringify('___\uDB13'));
+assertEquals('"___\\udb14"', JSON.stringify('___\uDB14'));
+assertEquals('"___\\udb15"', JSON.stringify('___\uDB15'));
+assertEquals('"___\\udb16"', JSON.stringify('___\uDB16'));
+assertEquals('"___\\udb17"', JSON.stringify('___\uDB17'));
+assertEquals('"___\\udb18"', JSON.stringify('___\uDB18'));
+assertEquals('"___\\udb19"', JSON.stringify('___\uDB19'));
+assertEquals('"___\\udb1a"', JSON.stringify('___\uDB1A'));
+assertEquals('"___\\udb1b"', JSON.stringify('___\uDB1B'));
+assertEquals('"___\\udb1c"', JSON.stringify('___\uDB1C'));
+assertEquals('"___\\udb1d"', JSON.stringify('___\uDB1D'));
+assertEquals('"___\\udb1e"', JSON.stringify('___\uDB1E'));
+assertEquals('"___\\udb1f"', JSON.stringify('___\uDB1F'));
+assertEquals('"___\\udb20"', JSON.stringify('___\uDB20'));
+assertEquals('"___\\udb21"', JSON.stringify('___\uDB21'));
+assertEquals('"___\\udb22"', JSON.stringify('___\uDB22'));
+assertEquals('"___\\udb23"', JSON.stringify('___\uDB23'));
+assertEquals('"___\\udb24"', JSON.stringify('___\uDB24'));
+assertEquals('"___\\udb25"', JSON.stringify('___\uDB25'));
+assertEquals('"___\\udb26"', JSON.stringify('___\uDB26'));
+assertEquals('"___\\udb27"', JSON.stringify('___\uDB27'));
+assertEquals('"___\\udb28"', JSON.stringify('___\uDB28'));
+assertEquals('"___\\udb29"', JSON.stringify('___\uDB29'));
+assertEquals('"___\\udb2a"', JSON.stringify('___\uDB2A'));
+assertEquals('"___\\udb2b"', JSON.stringify('___\uDB2B'));
+assertEquals('"___\\udb2c"', JSON.stringify('___\uDB2C'));
+assertEquals('"___\\udb2d"', JSON.stringify('___\uDB2D'));
+assertEquals('"___\\udb2e"', JSON.stringify('___\uDB2E'));
+assertEquals('"___\\udb2f"', JSON.stringify('___\uDB2F'));
+assertEquals('"___\\udb30"', JSON.stringify('___\uDB30'));
+assertEquals('"___\\udb31"', JSON.stringify('___\uDB31'));
+assertEquals('"___\\udb32"', JSON.stringify('___\uDB32'));
+assertEquals('"___\\udb33"', JSON.stringify('___\uDB33'));
+assertEquals('"___\\udb34"', JSON.stringify('___\uDB34'));
+assertEquals('"___\\udb35"', JSON.stringify('___\uDB35'));
+assertEquals('"___\\udb36"', JSON.stringify('___\uDB36'));
+assertEquals('"___\\udb37"', JSON.stringify('___\uDB37'));
+assertEquals('"___\\udb38"', JSON.stringify('___\uDB38'));
+assertEquals('"___\\udb39"', JSON.stringify('___\uDB39'));
+assertEquals('"___\\udb3a"', JSON.stringify('___\uDB3A'));
+assertEquals('"___\\udb3b"', JSON.stringify('___\uDB3B'));
+assertEquals('"___\\udb3c"', JSON.stringify('___\uDB3C'));
+assertEquals('"___\\udb3d"', JSON.stringify('___\uDB3D'));
+assertEquals('"___\\udb3e"', JSON.stringify('___\uDB3E'));
+assertEquals('"___\\udb3f"', JSON.stringify('___\uDB3F'));
+assertEquals('"___\\udb40"', JSON.stringify('___\uDB40'));
+assertEquals('"___\\udb41"', JSON.stringify('___\uDB41'));
+assertEquals('"___\\udb42"', JSON.stringify('___\uDB42'));
+assertEquals('"___\\udb43"', JSON.stringify('___\uDB43'));
+assertEquals('"___\\udb44"', JSON.stringify('___\uDB44'));
+assertEquals('"___\\udb45"', JSON.stringify('___\uDB45'));
+assertEquals('"___\\udb46"', JSON.stringify('___\uDB46'));
+assertEquals('"___\\udb47"', JSON.stringify('___\uDB47'));
+assertEquals('"___\\udb48"', JSON.stringify('___\uDB48'));
+assertEquals('"___\\udb49"', JSON.stringify('___\uDB49'));
+assertEquals('"___\\udb4a"', JSON.stringify('___\uDB4A'));
+assertEquals('"___\\udb4b"', JSON.stringify('___\uDB4B'));
+assertEquals('"___\\udb4c"', JSON.stringify('___\uDB4C'));
+assertEquals('"___\\udb4d"', JSON.stringify('___\uDB4D'));
+assertEquals('"___\\udb4e"', JSON.stringify('___\uDB4E'));
+assertEquals('"___\\udb4f"', JSON.stringify('___\uDB4F'));
+assertEquals('"___\\udb50"', JSON.stringify('___\uDB50'));
+assertEquals('"___\\udb51"', JSON.stringify('___\uDB51'));
+assertEquals('"___\\udb52"', JSON.stringify('___\uDB52'));
+assertEquals('"___\\udb53"', JSON.stringify('___\uDB53'));
+assertEquals('"___\\udb54"', JSON.stringify('___\uDB54'));
+assertEquals('"___\\udb55"', JSON.stringify('___\uDB55'));
+assertEquals('"___\\udb56"', JSON.stringify('___\uDB56'));
+assertEquals('"___\\udb57"', JSON.stringify('___\uDB57'));
+assertEquals('"___\\udb58"', JSON.stringify('___\uDB58'));
+assertEquals('"___\\udb59"', JSON.stringify('___\uDB59'));
+assertEquals('"___\\udb5a"', JSON.stringify('___\uDB5A'));
+assertEquals('"___\\udb5b"', JSON.stringify('___\uDB5B'));
+assertEquals('"___\\udb5c"', JSON.stringify('___\uDB5C'));
+assertEquals('"___\\udb5d"', JSON.stringify('___\uDB5D'));
+assertEquals('"___\\udb5e"', JSON.stringify('___\uDB5E'));
+assertEquals('"___\\udb5f"', JSON.stringify('___\uDB5F'));
+assertEquals('"___\\udb60"', JSON.stringify('___\uDB60'));
+assertEquals('"___\\udb61"', JSON.stringify('___\uDB61'));
+assertEquals('"___\\udb62"', JSON.stringify('___\uDB62'));
+assertEquals('"___\\udb63"', JSON.stringify('___\uDB63'));
+assertEquals('"___\\udb64"', JSON.stringify('___\uDB64'));
+assertEquals('"___\\udb65"', JSON.stringify('___\uDB65'));
+assertEquals('"___\\udb66"', JSON.stringify('___\uDB66'));
+assertEquals('"___\\udb67"', JSON.stringify('___\uDB67'));
+assertEquals('"___\\udb68"', JSON.stringify('___\uDB68'));
+assertEquals('"___\\udb69"', JSON.stringify('___\uDB69'));
+assertEquals('"___\\udb6a"', JSON.stringify('___\uDB6A'));
+assertEquals('"___\\udb6b"', JSON.stringify('___\uDB6B'));
+assertEquals('"___\\udb6c"', JSON.stringify('___\uDB6C'));
+assertEquals('"___\\udb6d"', JSON.stringify('___\uDB6D'));
+assertEquals('"___\\udb6e"', JSON.stringify('___\uDB6E'));
+assertEquals('"___\\udb6f"', JSON.stringify('___\uDB6F'));
+assertEquals('"___\\udb70"', JSON.stringify('___\uDB70'));
+assertEquals('"___\\udb71"', JSON.stringify('___\uDB71'));
+assertEquals('"___\\udb72"', JSON.stringify('___\uDB72'));
+assertEquals('"___\\udb73"', JSON.stringify('___\uDB73'));
+assertEquals('"___\\udb74"', JSON.stringify('___\uDB74'));
+assertEquals('"___\\udb75"', JSON.stringify('___\uDB75'));
+assertEquals('"___\\udb76"', JSON.stringify('___\uDB76'));
+assertEquals('"___\\udb77"', JSON.stringify('___\uDB77'));
+assertEquals('"___\\udb78"', JSON.stringify('___\uDB78'));
+assertEquals('"___\\udb79"', JSON.stringify('___\uDB79'));
+assertEquals('"___\\udb7a"', JSON.stringify('___\uDB7A'));
+assertEquals('"___\\udb7b"', JSON.stringify('___\uDB7B'));
+assertEquals('"___\\udb7c"', JSON.stringify('___\uDB7C'));
+assertEquals('"___\\udb7d"', JSON.stringify('___\uDB7D'));
+assertEquals('"___\\udb7e"', JSON.stringify('___\uDB7E'));
+assertEquals('"___\\udb7f"', JSON.stringify('___\uDB7F'));
+assertEquals('"___\\udb80"', JSON.stringify('___\uDB80'));
+assertEquals('"___\\udb81"', JSON.stringify('___\uDB81'));
+assertEquals('"___\\udb82"', JSON.stringify('___\uDB82'));
+assertEquals('"___\\udb83"', JSON.stringify('___\uDB83'));
+assertEquals('"___\\udb84"', JSON.stringify('___\uDB84'));
+assertEquals('"___\\udb85"', JSON.stringify('___\uDB85'));
+assertEquals('"___\\udb86"', JSON.stringify('___\uDB86'));
+assertEquals('"___\\udb87"', JSON.stringify('___\uDB87'));
+assertEquals('"___\\udb88"', JSON.stringify('___\uDB88'));
+assertEquals('"___\\udb89"', JSON.stringify('___\uDB89'));
+assertEquals('"___\\udb8a"', JSON.stringify('___\uDB8A'));
+assertEquals('"___\\udb8b"', JSON.stringify('___\uDB8B'));
+assertEquals('"___\\udb8c"', JSON.stringify('___\uDB8C'));
+assertEquals('"___\\udb8d"', JSON.stringify('___\uDB8D'));
+assertEquals('"___\\udb8e"', JSON.stringify('___\uDB8E'));
+assertEquals('"___\\udb8f"', JSON.stringify('___\uDB8F'));
+assertEquals('"___\\udb90"', JSON.stringify('___\uDB90'));
+assertEquals('"___\\udb91"', JSON.stringify('___\uDB91'));
+assertEquals('"___\\udb92"', JSON.stringify('___\uDB92'));
+assertEquals('"___\\udb93"', JSON.stringify('___\uDB93'));
+assertEquals('"___\\udb94"', JSON.stringify('___\uDB94'));
+assertEquals('"___\\udb95"', JSON.stringify('___\uDB95'));
+assertEquals('"___\\udb96"', JSON.stringify('___\uDB96'));
+assertEquals('"___\\udb97"', JSON.stringify('___\uDB97'));
+assertEquals('"___\\udb98"', JSON.stringify('___\uDB98'));
+assertEquals('"___\\udb99"', JSON.stringify('___\uDB99'));
+assertEquals('"___\\udb9a"', JSON.stringify('___\uDB9A'));
+assertEquals('"___\\udb9b"', JSON.stringify('___\uDB9B'));
+assertEquals('"___\\udb9c"', JSON.stringify('___\uDB9C'));
+assertEquals('"___\\udb9d"', JSON.stringify('___\uDB9D'));
+assertEquals('"___\\udb9e"', JSON.stringify('___\uDB9E'));
+assertEquals('"___\\udb9f"', JSON.stringify('___\uDB9F'));
+assertEquals('"___\\udba0"', JSON.stringify('___\uDBA0'));
+assertEquals('"___\\udba1"', JSON.stringify('___\uDBA1'));
+assertEquals('"___\\udba2"', JSON.stringify('___\uDBA2'));
+assertEquals('"___\\udba3"', JSON.stringify('___\uDBA3'));
+assertEquals('"___\\udba4"', JSON.stringify('___\uDBA4'));
+assertEquals('"___\\udba5"', JSON.stringify('___\uDBA5'));
+assertEquals('"___\\udba6"', JSON.stringify('___\uDBA6'));
+assertEquals('"___\\udba7"', JSON.stringify('___\uDBA7'));
+assertEquals('"___\\udba8"', JSON.stringify('___\uDBA8'));
+assertEquals('"___\\udba9"', JSON.stringify('___\uDBA9'));
+assertEquals('"___\\udbaa"', JSON.stringify('___\uDBAA'));
+assertEquals('"___\\udbab"', JSON.stringify('___\uDBAB'));
+assertEquals('"___\\udbac"', JSON.stringify('___\uDBAC'));
+assertEquals('"___\\udbad"', JSON.stringify('___\uDBAD'));
+assertEquals('"___\\udbae"', JSON.stringify('___\uDBAE'));
+assertEquals('"___\\udbaf"', JSON.stringify('___\uDBAF'));
+assertEquals('"___\\udbb0"', JSON.stringify('___\uDBB0'));
+assertEquals('"___\\udbb1"', JSON.stringify('___\uDBB1'));
+assertEquals('"___\\udbb2"', JSON.stringify('___\uDBB2'));
+assertEquals('"___\\udbb3"', JSON.stringify('___\uDBB3'));
+assertEquals('"___\\udbb4"', JSON.stringify('___\uDBB4'));
+assertEquals('"___\\udbb5"', JSON.stringify('___\uDBB5'));
+assertEquals('"___\\udbb6"', JSON.stringify('___\uDBB6'));
+assertEquals('"___\\udbb7"', JSON.stringify('___\uDBB7'));
+assertEquals('"___\\udbb8"', JSON.stringify('___\uDBB8'));
+assertEquals('"___\\udbb9"', JSON.stringify('___\uDBB9'));
+assertEquals('"___\\udbba"', JSON.stringify('___\uDBBA'));
+assertEquals('"___\\udbbb"', JSON.stringify('___\uDBBB'));
+assertEquals('"___\\udbbc"', JSON.stringify('___\uDBBC'));
+assertEquals('"___\\udbbd"', JSON.stringify('___\uDBBD'));
+assertEquals('"___\\udbbe"', JSON.stringify('___\uDBBE'));
+assertEquals('"___\\udbbf"', JSON.stringify('___\uDBBF'));
+assertEquals('"___\\udbc0"', JSON.stringify('___\uDBC0'));
+assertEquals('"___\\udbc1"', JSON.stringify('___\uDBC1'));
+assertEquals('"___\\udbc2"', JSON.stringify('___\uDBC2'));
+assertEquals('"___\\udbc3"', JSON.stringify('___\uDBC3'));
+assertEquals('"___\\udbc4"', JSON.stringify('___\uDBC4'));
+assertEquals('"___\\udbc5"', JSON.stringify('___\uDBC5'));
+assertEquals('"___\\udbc6"', JSON.stringify('___\uDBC6'));
+assertEquals('"___\\udbc7"', JSON.stringify('___\uDBC7'));
+assertEquals('"___\\udbc8"', JSON.stringify('___\uDBC8'));
+assertEquals('"___\\udbc9"', JSON.stringify('___\uDBC9'));
+assertEquals('"___\\udbca"', JSON.stringify('___\uDBCA'));
+assertEquals('"___\\udbcb"', JSON.stringify('___\uDBCB'));
+assertEquals('"___\\udbcc"', JSON.stringify('___\uDBCC'));
+assertEquals('"___\\udbcd"', JSON.stringify('___\uDBCD'));
+assertEquals('"___\\udbce"', JSON.stringify('___\uDBCE'));
+assertEquals('"___\\udbcf"', JSON.stringify('___\uDBCF'));
+assertEquals('"___\\udbd0"', JSON.stringify('___\uDBD0'));
+assertEquals('"___\\udbd1"', JSON.stringify('___\uDBD1'));
+assertEquals('"___\\udbd2"', JSON.stringify('___\uDBD2'));
+assertEquals('"___\\udbd3"', JSON.stringify('___\uDBD3'));
+assertEquals('"___\\udbd4"', JSON.stringify('___\uDBD4'));
+assertEquals('"___\\udbd5"', JSON.stringify('___\uDBD5'));
+assertEquals('"___\\udbd6"', JSON.stringify('___\uDBD6'));
+assertEquals('"___\\udbd7"', JSON.stringify('___\uDBD7'));
+assertEquals('"___\\udbd8"', JSON.stringify('___\uDBD8'));
+assertEquals('"___\\udbd9"', JSON.stringify('___\uDBD9'));
+assertEquals('"___\\udbda"', JSON.stringify('___\uDBDA'));
+assertEquals('"___\\udbdb"', JSON.stringify('___\uDBDB'));
+assertEquals('"___\\udbdc"', JSON.stringify('___\uDBDC'));
+assertEquals('"___\\udbdd"', JSON.stringify('___\uDBDD'));
+assertEquals('"___\\udbde"', JSON.stringify('___\uDBDE'));
+assertEquals('"___\\udbdf"', JSON.stringify('___\uDBDF'));
+assertEquals('"___\\udbe0"', JSON.stringify('___\uDBE0'));
+assertEquals('"___\\udbe1"', JSON.stringify('___\uDBE1'));
+assertEquals('"___\\udbe2"', JSON.stringify('___\uDBE2'));
+assertEquals('"___\\udbe3"', JSON.stringify('___\uDBE3'));
+assertEquals('"___\\udbe4"', JSON.stringify('___\uDBE4'));
+assertEquals('"___\\udbe5"', JSON.stringify('___\uDBE5'));
+assertEquals('"___\\udbe6"', JSON.stringify('___\uDBE6'));
+assertEquals('"___\\udbe7"', JSON.stringify('___\uDBE7'));
+assertEquals('"___\\udbe8"', JSON.stringify('___\uDBE8'));
+assertEquals('"___\\udbe9"', JSON.stringify('___\uDBE9'));
+assertEquals('"___\\udbea"', JSON.stringify('___\uDBEA'));
+assertEquals('"___\\udbeb"', JSON.stringify('___\uDBEB'));
+assertEquals('"___\\udbec"', JSON.stringify('___\uDBEC'));
+assertEquals('"___\\udbed"', JSON.stringify('___\uDBED'));
+assertEquals('"___\\udbee"', JSON.stringify('___\uDBEE'));
+assertEquals('"___\\udbef"', JSON.stringify('___\uDBEF'));
+assertEquals('"___\\udbf0"', JSON.stringify('___\uDBF0'));
+assertEquals('"___\\udbf1"', JSON.stringify('___\uDBF1'));
+assertEquals('"___\\udbf2"', JSON.stringify('___\uDBF2'));
+assertEquals('"___\\udbf3"', JSON.stringify('___\uDBF3'));
+assertEquals('"___\\udbf4"', JSON.stringify('___\uDBF4'));
+assertEquals('"___\\udbf5"', JSON.stringify('___\uDBF5'));
+assertEquals('"___\\udbf6"', JSON.stringify('___\uDBF6'));
+assertEquals('"___\\udbf7"', JSON.stringify('___\uDBF7'));
+assertEquals('"___\\udbf8"', JSON.stringify('___\uDBF8'));
+assertEquals('"___\\udbf9"', JSON.stringify('___\uDBF9'));
+assertEquals('"___\\udbfa"', JSON.stringify('___\uDBFA'));
+assertEquals('"___\\udbfb"', JSON.stringify('___\uDBFB'));
+assertEquals('"___\\udbfc"', JSON.stringify('___\uDBFC'));
+assertEquals('"___\\udbfd"', JSON.stringify('___\uDBFD'));
+assertEquals('"___\\udbfe"', JSON.stringify('___\uDBFE'));
+assertEquals('"___\\udbff"', JSON.stringify('___\uDBFF'));
+assertEquals('"___\\udc00"', JSON.stringify('___\uDC00'));
+assertEquals('"___\\udc01"', JSON.stringify('___\uDC01'));
+assertEquals('"___\\udc02"', JSON.stringify('___\uDC02'));
+assertEquals('"___\\udc03"', JSON.stringify('___\uDC03'));
+assertEquals('"___\\udc04"', JSON.stringify('___\uDC04'));
+assertEquals('"___\\udc05"', JSON.stringify('___\uDC05'));
+assertEquals('"___\\udc06"', JSON.stringify('___\uDC06'));
+assertEquals('"___\\udc07"', JSON.stringify('___\uDC07'));
+assertEquals('"___\\udc08"', JSON.stringify('___\uDC08'));
+assertEquals('"___\\udc09"', JSON.stringify('___\uDC09'));
+assertEquals('"___\\udc0a"', JSON.stringify('___\uDC0A'));
+assertEquals('"___\\udc0b"', JSON.stringify('___\uDC0B'));
+assertEquals('"___\\udc0c"', JSON.stringify('___\uDC0C'));
+assertEquals('"___\\udc0d"', JSON.stringify('___\uDC0D'));
+assertEquals('"___\\udc0e"', JSON.stringify('___\uDC0E'));
+assertEquals('"___\\udc0f"', JSON.stringify('___\uDC0F'));
+assertEquals('"___\\udc10"', JSON.stringify('___\uDC10'));
+assertEquals('"___\\udc11"', JSON.stringify('___\uDC11'));
+assertEquals('"___\\udc12"', JSON.stringify('___\uDC12'));
+assertEquals('"___\\udc13"', JSON.stringify('___\uDC13'));
+assertEquals('"___\\udc14"', JSON.stringify('___\uDC14'));
+assertEquals('"___\\udc15"', JSON.stringify('___\uDC15'));
+assertEquals('"___\\udc16"', JSON.stringify('___\uDC16'));
+assertEquals('"___\\udc17"', JSON.stringify('___\uDC17'));
+assertEquals('"___\\udc18"', JSON.stringify('___\uDC18'));
+assertEquals('"___\\udc19"', JSON.stringify('___\uDC19'));
+assertEquals('"___\\udc1a"', JSON.stringify('___\uDC1A'));
+assertEquals('"___\\udc1b"', JSON.stringify('___\uDC1B'));
+assertEquals('"___\\udc1c"', JSON.stringify('___\uDC1C'));
+assertEquals('"___\\udc1d"', JSON.stringify('___\uDC1D'));
+assertEquals('"___\\udc1e"', JSON.stringify('___\uDC1E'));
+assertEquals('"___\\udc1f"', JSON.stringify('___\uDC1F'));
+assertEquals('"___\\udc20"', JSON.stringify('___\uDC20'));
+assertEquals('"___\\udc21"', JSON.stringify('___\uDC21'));
+assertEquals('"___\\udc22"', JSON.stringify('___\uDC22'));
+assertEquals('"___\\udc23"', JSON.stringify('___\uDC23'));
+assertEquals('"___\\udc24"', JSON.stringify('___\uDC24'));
+assertEquals('"___\\udc25"', JSON.stringify('___\uDC25'));
+assertEquals('"___\\udc26"', JSON.stringify('___\uDC26'));
+assertEquals('"___\\udc27"', JSON.stringify('___\uDC27'));
+assertEquals('"___\\udc28"', JSON.stringify('___\uDC28'));
+assertEquals('"___\\udc29"', JSON.stringify('___\uDC29'));
+assertEquals('"___\\udc2a"', JSON.stringify('___\uDC2A'));
+assertEquals('"___\\udc2b"', JSON.stringify('___\uDC2B'));
+assertEquals('"___\\udc2c"', JSON.stringify('___\uDC2C'));
+assertEquals('"___\\udc2d"', JSON.stringify('___\uDC2D'));
+assertEquals('"___\\udc2e"', JSON.stringify('___\uDC2E'));
+assertEquals('"___\\udc2f"', JSON.stringify('___\uDC2F'));
+assertEquals('"___\\udc30"', JSON.stringify('___\uDC30'));
+assertEquals('"___\\udc31"', JSON.stringify('___\uDC31'));
+assertEquals('"___\\udc32"', JSON.stringify('___\uDC32'));
+assertEquals('"___\\udc33"', JSON.stringify('___\uDC33'));
+assertEquals('"___\\udc34"', JSON.stringify('___\uDC34'));
+assertEquals('"___\\udc35"', JSON.stringify('___\uDC35'));
+assertEquals('"___\\udc36"', JSON.stringify('___\uDC36'));
+assertEquals('"___\\udc37"', JSON.stringify('___\uDC37'));
+assertEquals('"___\\udc38"', JSON.stringify('___\uDC38'));
+assertEquals('"___\\udc39"', JSON.stringify('___\uDC39'));
+assertEquals('"___\\udc3a"', JSON.stringify('___\uDC3A'));
+assertEquals('"___\\udc3b"', JSON.stringify('___\uDC3B'));
+assertEquals('"___\\udc3c"', JSON.stringify('___\uDC3C'));
+assertEquals('"___\\udc3d"', JSON.stringify('___\uDC3D'));
+assertEquals('"___\\udc3e"', JSON.stringify('___\uDC3E'));
+assertEquals('"___\\udc3f"', JSON.stringify('___\uDC3F'));
+assertEquals('"___\\udc40"', JSON.stringify('___\uDC40'));
+assertEquals('"___\\udc41"', JSON.stringify('___\uDC41'));
+assertEquals('"___\\udc42"', JSON.stringify('___\uDC42'));
+assertEquals('"___\\udc43"', JSON.stringify('___\uDC43'));
+assertEquals('"___\\udc44"', JSON.stringify('___\uDC44'));
+assertEquals('"___\\udc45"', JSON.stringify('___\uDC45'));
+assertEquals('"___\\udc46"', JSON.stringify('___\uDC46'));
+assertEquals('"___\\udc47"', JSON.stringify('___\uDC47'));
+assertEquals('"___\\udc48"', JSON.stringify('___\uDC48'));
+assertEquals('"___\\udc49"', JSON.stringify('___\uDC49'));
+assertEquals('"___\\udc4a"', JSON.stringify('___\uDC4A'));
+assertEquals('"___\\udc4b"', JSON.stringify('___\uDC4B'));
+assertEquals('"___\\udc4c"', JSON.stringify('___\uDC4C'));
+assertEquals('"___\\udc4d"', JSON.stringify('___\uDC4D'));
+assertEquals('"___\\udc4e"', JSON.stringify('___\uDC4E'));
+assertEquals('"___\\udc4f"', JSON.stringify('___\uDC4F'));
+assertEquals('"___\\udc50"', JSON.stringify('___\uDC50'));
+assertEquals('"___\\udc51"', JSON.stringify('___\uDC51'));
+assertEquals('"___\\udc52"', JSON.stringify('___\uDC52'));
+assertEquals('"___\\udc53"', JSON.stringify('___\uDC53'));
+assertEquals('"___\\udc54"', JSON.stringify('___\uDC54'));
+assertEquals('"___\\udc55"', JSON.stringify('___\uDC55'));
+assertEquals('"___\\udc56"', JSON.stringify('___\uDC56'));
+assertEquals('"___\\udc57"', JSON.stringify('___\uDC57'));
+assertEquals('"___\\udc58"', JSON.stringify('___\uDC58'));
+assertEquals('"___\\udc59"', JSON.stringify('___\uDC59'));
+assertEquals('"___\\udc5a"', JSON.stringify('___\uDC5A'));
+assertEquals('"___\\udc5b"', JSON.stringify('___\uDC5B'));
+assertEquals('"___\\udc5c"', JSON.stringify('___\uDC5C'));
+assertEquals('"___\\udc5d"', JSON.stringify('___\uDC5D'));
+assertEquals('"___\\udc5e"', JSON.stringify('___\uDC5E'));
+assertEquals('"___\\udc5f"', JSON.stringify('___\uDC5F'));
+assertEquals('"___\\udc60"', JSON.stringify('___\uDC60'));
+assertEquals('"___\\udc61"', JSON.stringify('___\uDC61'));
+assertEquals('"___\\udc62"', JSON.stringify('___\uDC62'));
+assertEquals('"___\\udc63"', JSON.stringify('___\uDC63'));
+assertEquals('"___\\udc64"', JSON.stringify('___\uDC64'));
+assertEquals('"___\\udc65"', JSON.stringify('___\uDC65'));
+assertEquals('"___\\udc66"', JSON.stringify('___\uDC66'));
+assertEquals('"___\\udc67"', JSON.stringify('___\uDC67'));
+assertEquals('"___\\udc68"', JSON.stringify('___\uDC68'));
+assertEquals('"___\\udc69"', JSON.stringify('___\uDC69'));
+assertEquals('"___\\udc6a"', JSON.stringify('___\uDC6A'));
+assertEquals('"___\\udc6b"', JSON.stringify('___\uDC6B'));
+assertEquals('"___\\udc6c"', JSON.stringify('___\uDC6C'));
+assertEquals('"___\\udc6d"', JSON.stringify('___\uDC6D'));
+assertEquals('"___\\udc6e"', JSON.stringify('___\uDC6E'));
+assertEquals('"___\\udc6f"', JSON.stringify('___\uDC6F'));
+assertEquals('"___\\udc70"', JSON.stringify('___\uDC70'));
+assertEquals('"___\\udc71"', JSON.stringify('___\uDC71'));
+assertEquals('"___\\udc72"', JSON.stringify('___\uDC72'));
+assertEquals('"___\\udc73"', JSON.stringify('___\uDC73'));
+assertEquals('"___\\udc74"', JSON.stringify('___\uDC74'));
+assertEquals('"___\\udc75"', JSON.stringify('___\uDC75'));
+assertEquals('"___\\udc76"', JSON.stringify('___\uDC76'));
+assertEquals('"___\\udc77"', JSON.stringify('___\uDC77'));
+assertEquals('"___\\udc78"', JSON.stringify('___\uDC78'));
+assertEquals('"___\\udc79"', JSON.stringify('___\uDC79'));
+assertEquals('"___\\udc7a"', JSON.stringify('___\uDC7A'));
+assertEquals('"___\\udc7b"', JSON.stringify('___\uDC7B'));
+assertEquals('"___\\udc7c"', JSON.stringify('___\uDC7C'));
+assertEquals('"___\\udc7d"', JSON.stringify('___\uDC7D'));
+assertEquals('"___\\udc7e"', JSON.stringify('___\uDC7E'));
+assertEquals('"___\\udc7f"', JSON.stringify('___\uDC7F'));
+assertEquals('"___\\udc80"', JSON.stringify('___\uDC80'));
+assertEquals('"___\\udc81"', JSON.stringify('___\uDC81'));
+assertEquals('"___\\udc82"', JSON.stringify('___\uDC82'));
+assertEquals('"___\\udc83"', JSON.stringify('___\uDC83'));
+assertEquals('"___\\udc84"', JSON.stringify('___\uDC84'));
+assertEquals('"___\\udc85"', JSON.stringify('___\uDC85'));
+assertEquals('"___\\udc86"', JSON.stringify('___\uDC86'));
+assertEquals('"___\\udc87"', JSON.stringify('___\uDC87'));
+assertEquals('"___\\udc88"', JSON.stringify('___\uDC88'));
+assertEquals('"___\\udc89"', JSON.stringify('___\uDC89'));
+assertEquals('"___\\udc8a"', JSON.stringify('___\uDC8A'));
+assertEquals('"___\\udc8b"', JSON.stringify('___\uDC8B'));
+assertEquals('"___\\udc8c"', JSON.stringify('___\uDC8C'));
+assertEquals('"___\\udc8d"', JSON.stringify('___\uDC8D'));
+assertEquals('"___\\udc8e"', JSON.stringify('___\uDC8E'));
+assertEquals('"___\\udc8f"', JSON.stringify('___\uDC8F'));
+assertEquals('"___\\udc90"', JSON.stringify('___\uDC90'));
+assertEquals('"___\\udc91"', JSON.stringify('___\uDC91'));
+assertEquals('"___\\udc92"', JSON.stringify('___\uDC92'));
+assertEquals('"___\\udc93"', JSON.stringify('___\uDC93'));
+assertEquals('"___\\udc94"', JSON.stringify('___\uDC94'));
+assertEquals('"___\\udc95"', JSON.stringify('___\uDC95'));
+assertEquals('"___\\udc96"', JSON.stringify('___\uDC96'));
+assertEquals('"___\\udc97"', JSON.stringify('___\uDC97'));
+assertEquals('"___\\udc98"', JSON.stringify('___\uDC98'));
+assertEquals('"___\\udc99"', JSON.stringify('___\uDC99'));
+assertEquals('"___\\udc9a"', JSON.stringify('___\uDC9A'));
+assertEquals('"___\\udc9b"', JSON.stringify('___\uDC9B'));
+assertEquals('"___\\udc9c"', JSON.stringify('___\uDC9C'));
+assertEquals('"___\\udc9d"', JSON.stringify('___\uDC9D'));
+assertEquals('"___\\udc9e"', JSON.stringify('___\uDC9E'));
+assertEquals('"___\\udc9f"', JSON.stringify('___\uDC9F'));
+assertEquals('"___\\udca0"', JSON.stringify('___\uDCA0'));
+assertEquals('"___\\udca1"', JSON.stringify('___\uDCA1'));
+assertEquals('"___\\udca2"', JSON.stringify('___\uDCA2'));
+assertEquals('"___\\udca3"', JSON.stringify('___\uDCA3'));
+assertEquals('"___\\udca4"', JSON.stringify('___\uDCA4'));
+assertEquals('"___\\udca5"', JSON.stringify('___\uDCA5'));
+assertEquals('"___\\udca6"', JSON.stringify('___\uDCA6'));
+assertEquals('"___\\udca7"', JSON.stringify('___\uDCA7'));
+assertEquals('"___\\udca8"', JSON.stringify('___\uDCA8'));
+assertEquals('"___\\udca9"', JSON.stringify('___\uDCA9'));
+assertEquals('"___\\udcaa"', JSON.stringify('___\uDCAA'));
+assertEquals('"___\\udcab"', JSON.stringify('___\uDCAB'));
+assertEquals('"___\\udcac"', JSON.stringify('___\uDCAC'));
+assertEquals('"___\\udcad"', JSON.stringify('___\uDCAD'));
+assertEquals('"___\\udcae"', JSON.stringify('___\uDCAE'));
+assertEquals('"___\\udcaf"', JSON.stringify('___\uDCAF'));
+assertEquals('"___\\udcb0"', JSON.stringify('___\uDCB0'));
+assertEquals('"___\\udcb1"', JSON.stringify('___\uDCB1'));
+assertEquals('"___\\udcb2"', JSON.stringify('___\uDCB2'));
+assertEquals('"___\\udcb3"', JSON.stringify('___\uDCB3'));
+assertEquals('"___\\udcb4"', JSON.stringify('___\uDCB4'));
+assertEquals('"___\\udcb5"', JSON.stringify('___\uDCB5'));
+assertEquals('"___\\udcb6"', JSON.stringify('___\uDCB6'));
+assertEquals('"___\\udcb7"', JSON.stringify('___\uDCB7'));
+assertEquals('"___\\udcb8"', JSON.stringify('___\uDCB8'));
+assertEquals('"___\\udcb9"', JSON.stringify('___\uDCB9'));
+assertEquals('"___\\udcba"', JSON.stringify('___\uDCBA'));
+assertEquals('"___\\udcbb"', JSON.stringify('___\uDCBB'));
+assertEquals('"___\\udcbc"', JSON.stringify('___\uDCBC'));
+assertEquals('"___\\udcbd"', JSON.stringify('___\uDCBD'));
+assertEquals('"___\\udcbe"', JSON.stringify('___\uDCBE'));
+assertEquals('"___\\udcbf"', JSON.stringify('___\uDCBF'));
+assertEquals('"___\\udcc0"', JSON.stringify('___\uDCC0'));
+assertEquals('"___\\udcc1"', JSON.stringify('___\uDCC1'));
+assertEquals('"___\\udcc2"', JSON.stringify('___\uDCC2'));
+assertEquals('"___\\udcc3"', JSON.stringify('___\uDCC3'));
+assertEquals('"___\\udcc4"', JSON.stringify('___\uDCC4'));
+assertEquals('"___\\udcc5"', JSON.stringify('___\uDCC5'));
+assertEquals('"___\\udcc6"', JSON.stringify('___\uDCC6'));
+assertEquals('"___\\udcc7"', JSON.stringify('___\uDCC7'));
+assertEquals('"___\\udcc8"', JSON.stringify('___\uDCC8'));
+assertEquals('"___\\udcc9"', JSON.stringify('___\uDCC9'));
+assertEquals('"___\\udcca"', JSON.stringify('___\uDCCA'));
+assertEquals('"___\\udccb"', JSON.stringify('___\uDCCB'));
+assertEquals('"___\\udccc"', JSON.stringify('___\uDCCC'));
+assertEquals('"___\\udccd"', JSON.stringify('___\uDCCD'));
+assertEquals('"___\\udcce"', JSON.stringify('___\uDCCE'));
+assertEquals('"___\\udccf"', JSON.stringify('___\uDCCF'));
+assertEquals('"___\\udcd0"', JSON.stringify('___\uDCD0'));
+assertEquals('"___\\udcd1"', JSON.stringify('___\uDCD1'));
+assertEquals('"___\\udcd2"', JSON.stringify('___\uDCD2'));
+assertEquals('"___\\udcd3"', JSON.stringify('___\uDCD3'));
+assertEquals('"___\\udcd4"', JSON.stringify('___\uDCD4'));
+assertEquals('"___\\udcd5"', JSON.stringify('___\uDCD5'));
+assertEquals('"___\\udcd6"', JSON.stringify('___\uDCD6'));
+assertEquals('"___\\udcd7"', JSON.stringify('___\uDCD7'));
+assertEquals('"___\\udcd8"', JSON.stringify('___\uDCD8'));
+assertEquals('"___\\udcd9"', JSON.stringify('___\uDCD9'));
+assertEquals('"___\\udcda"', JSON.stringify('___\uDCDA'));
+assertEquals('"___\\udcdb"', JSON.stringify('___\uDCDB'));
+assertEquals('"___\\udcdc"', JSON.stringify('___\uDCDC'));
+assertEquals('"___\\udcdd"', JSON.stringify('___\uDCDD'));
+assertEquals('"___\\udcde"', JSON.stringify('___\uDCDE'));
+assertEquals('"___\\udcdf"', JSON.stringify('___\uDCDF'));
+assertEquals('"___\\udce0"', JSON.stringify('___\uDCE0'));
+assertEquals('"___\\udce1"', JSON.stringify('___\uDCE1'));
+assertEquals('"___\\udce2"', JSON.stringify('___\uDCE2'));
+assertEquals('"___\\udce3"', JSON.stringify('___\uDCE3'));
+assertEquals('"___\\udce4"', JSON.stringify('___\uDCE4'));
+assertEquals('"___\\udce5"', JSON.stringify('___\uDCE5'));
+assertEquals('"___\\udce6"', JSON.stringify('___\uDCE6'));
+assertEquals('"___\\udce7"', JSON.stringify('___\uDCE7'));
+assertEquals('"___\\udce8"', JSON.stringify('___\uDCE8'));
+assertEquals('"___\\udce9"', JSON.stringify('___\uDCE9'));
+assertEquals('"___\\udcea"', JSON.stringify('___\uDCEA'));
+assertEquals('"___\\udceb"', JSON.stringify('___\uDCEB'));
+assertEquals('"___\\udcec"', JSON.stringify('___\uDCEC'));
+assertEquals('"___\\udced"', JSON.stringify('___\uDCED'));
+assertEquals('"___\\udcee"', JSON.stringify('___\uDCEE'));
+assertEquals('"___\\udcef"', JSON.stringify('___\uDCEF'));
+assertEquals('"___\\udcf0"', JSON.stringify('___\uDCF0'));
+assertEquals('"___\\udcf1"', JSON.stringify('___\uDCF1'));
+assertEquals('"___\\udcf2"', JSON.stringify('___\uDCF2'));
+assertEquals('"___\\udcf3"', JSON.stringify('___\uDCF3'));
+assertEquals('"___\\udcf4"', JSON.stringify('___\uDCF4'));
+assertEquals('"___\\udcf5"', JSON.stringify('___\uDCF5'));
+assertEquals('"___\\udcf6"', JSON.stringify('___\uDCF6'));
+assertEquals('"___\\udcf7"', JSON.stringify('___\uDCF7'));
+assertEquals('"___\\udcf8"', JSON.stringify('___\uDCF8'));
+assertEquals('"___\\udcf9"', JSON.stringify('___\uDCF9'));
+assertEquals('"___\\udcfa"', JSON.stringify('___\uDCFA'));
+assertEquals('"___\\udcfb"', JSON.stringify('___\uDCFB'));
+assertEquals('"___\\udcfc"', JSON.stringify('___\uDCFC'));
+assertEquals('"___\\udcfd"', JSON.stringify('___\uDCFD'));
+assertEquals('"___\\udcfe"', JSON.stringify('___\uDCFE'));
+assertEquals('"___\\udcff"', JSON.stringify('___\uDCFF'));
+assertEquals('"___\\udd00"', JSON.stringify('___\uDD00'));
+assertEquals('"___\\udd01"', JSON.stringify('___\uDD01'));
+assertEquals('"___\\udd02"', JSON.stringify('___\uDD02'));
+assertEquals('"___\\udd03"', JSON.stringify('___\uDD03'));
+assertEquals('"___\\udd04"', JSON.stringify('___\uDD04'));
+assertEquals('"___\\udd05"', JSON.stringify('___\uDD05'));
+assertEquals('"___\\udd06"', JSON.stringify('___\uDD06'));
+assertEquals('"___\\udd07"', JSON.stringify('___\uDD07'));
+assertEquals('"___\\udd08"', JSON.stringify('___\uDD08'));
+assertEquals('"___\\udd09"', JSON.stringify('___\uDD09'));
+assertEquals('"___\\udd0a"', JSON.stringify('___\uDD0A'));
+assertEquals('"___\\udd0b"', JSON.stringify('___\uDD0B'));
+assertEquals('"___\\udd0c"', JSON.stringify('___\uDD0C'));
+assertEquals('"___\\udd0d"', JSON.stringify('___\uDD0D'));
+assertEquals('"___\\udd0e"', JSON.stringify('___\uDD0E'));
+assertEquals('"___\\udd0f"', JSON.stringify('___\uDD0F'));
+assertEquals('"___\\udd10"', JSON.stringify('___\uDD10'));
+assertEquals('"___\\udd11"', JSON.stringify('___\uDD11'));
+assertEquals('"___\\udd12"', JSON.stringify('___\uDD12'));
+assertEquals('"___\\udd13"', JSON.stringify('___\uDD13'));
+assertEquals('"___\\udd14"', JSON.stringify('___\uDD14'));
+assertEquals('"___\\udd15"', JSON.stringify('___\uDD15'));
+assertEquals('"___\\udd16"', JSON.stringify('___\uDD16'));
+assertEquals('"___\\udd17"', JSON.stringify('___\uDD17'));
+assertEquals('"___\\udd18"', JSON.stringify('___\uDD18'));
+assertEquals('"___\\udd19"', JSON.stringify('___\uDD19'));
+assertEquals('"___\\udd1a"', JSON.stringify('___\uDD1A'));
+assertEquals('"___\\udd1b"', JSON.stringify('___\uDD1B'));
+assertEquals('"___\\udd1c"', JSON.stringify('___\uDD1C'));
+assertEquals('"___\\udd1d"', JSON.stringify('___\uDD1D'));
+assertEquals('"___\\udd1e"', JSON.stringify('___\uDD1E'));
+assertEquals('"___\\udd1f"', JSON.stringify('___\uDD1F'));
+assertEquals('"___\\udd20"', JSON.stringify('___\uDD20'));
+assertEquals('"___\\udd21"', JSON.stringify('___\uDD21'));
+assertEquals('"___\\udd22"', JSON.stringify('___\uDD22'));
+assertEquals('"___\\udd23"', JSON.stringify('___\uDD23'));
+assertEquals('"___\\udd24"', JSON.stringify('___\uDD24'));
+assertEquals('"___\\udd25"', JSON.stringify('___\uDD25'));
+assertEquals('"___\\udd26"', JSON.stringify('___\uDD26'));
+assertEquals('"___\\udd27"', JSON.stringify('___\uDD27'));
+assertEquals('"___\\udd28"', JSON.stringify('___\uDD28'));
+assertEquals('"___\\udd29"', JSON.stringify('___\uDD29'));
+assertEquals('"___\\udd2a"', JSON.stringify('___\uDD2A'));
+assertEquals('"___\\udd2b"', JSON.stringify('___\uDD2B'));
+assertEquals('"___\\udd2c"', JSON.stringify('___\uDD2C'));
+assertEquals('"___\\udd2d"', JSON.stringify('___\uDD2D'));
+assertEquals('"___\\udd2e"', JSON.stringify('___\uDD2E'));
+assertEquals('"___\\udd2f"', JSON.stringify('___\uDD2F'));
+assertEquals('"___\\udd30"', JSON.stringify('___\uDD30'));
+assertEquals('"___\\udd31"', JSON.stringify('___\uDD31'));
+assertEquals('"___\\udd32"', JSON.stringify('___\uDD32'));
+assertEquals('"___\\udd33"', JSON.stringify('___\uDD33'));
+assertEquals('"___\\udd34"', JSON.stringify('___\uDD34'));
+assertEquals('"___\\udd35"', JSON.stringify('___\uDD35'));
+assertEquals('"___\\udd36"', JSON.stringify('___\uDD36'));
+assertEquals('"___\\udd37"', JSON.stringify('___\uDD37'));
+assertEquals('"___\\udd38"', JSON.stringify('___\uDD38'));
+assertEquals('"___\\udd39"', JSON.stringify('___\uDD39'));
+assertEquals('"___\\udd3a"', JSON.stringify('___\uDD3A'));
+assertEquals('"___\\udd3b"', JSON.stringify('___\uDD3B'));
+assertEquals('"___\\udd3c"', JSON.stringify('___\uDD3C'));
+assertEquals('"___\\udd3d"', JSON.stringify('___\uDD3D'));
+assertEquals('"___\\udd3e"', JSON.stringify('___\uDD3E'));
+assertEquals('"___\\udd3f"', JSON.stringify('___\uDD3F'));
+assertEquals('"___\\udd40"', JSON.stringify('___\uDD40'));
+assertEquals('"___\\udd41"', JSON.stringify('___\uDD41'));
+assertEquals('"___\\udd42"', JSON.stringify('___\uDD42'));
+assertEquals('"___\\udd43"', JSON.stringify('___\uDD43'));
+assertEquals('"___\\udd44"', JSON.stringify('___\uDD44'));
+assertEquals('"___\\udd45"', JSON.stringify('___\uDD45'));
+assertEquals('"___\\udd46"', JSON.stringify('___\uDD46'));
+assertEquals('"___\\udd47"', JSON.stringify('___\uDD47'));
+assertEquals('"___\\udd48"', JSON.stringify('___\uDD48'));
+assertEquals('"___\\udd49"', JSON.stringify('___\uDD49'));
+assertEquals('"___\\udd4a"', JSON.stringify('___\uDD4A'));
+assertEquals('"___\\udd4b"', JSON.stringify('___\uDD4B'));
+assertEquals('"___\\udd4c"', JSON.stringify('___\uDD4C'));
+assertEquals('"___\\udd4d"', JSON.stringify('___\uDD4D'));
+assertEquals('"___\\udd4e"', JSON.stringify('___\uDD4E'));
+assertEquals('"___\\udd4f"', JSON.stringify('___\uDD4F'));
+assertEquals('"___\\udd50"', JSON.stringify('___\uDD50'));
+assertEquals('"___\\udd51"', JSON.stringify('___\uDD51'));
+assertEquals('"___\\udd52"', JSON.stringify('___\uDD52'));
+assertEquals('"___\\udd53"', JSON.stringify('___\uDD53'));
+assertEquals('"___\\udd54"', JSON.stringify('___\uDD54'));
+assertEquals('"___\\udd55"', JSON.stringify('___\uDD55'));
+assertEquals('"___\\udd56"', JSON.stringify('___\uDD56'));
+assertEquals('"___\\udd57"', JSON.stringify('___\uDD57'));
+assertEquals('"___\\udd58"', JSON.stringify('___\uDD58'));
+assertEquals('"___\\udd59"', JSON.stringify('___\uDD59'));
+assertEquals('"___\\udd5a"', JSON.stringify('___\uDD5A'));
+assertEquals('"___\\udd5b"', JSON.stringify('___\uDD5B'));
+assertEquals('"___\\udd5c"', JSON.stringify('___\uDD5C'));
+assertEquals('"___\\udd5d"', JSON.stringify('___\uDD5D'));
+assertEquals('"___\\udd5e"', JSON.stringify('___\uDD5E'));
+assertEquals('"___\\udd5f"', JSON.stringify('___\uDD5F'));
+assertEquals('"___\\udd60"', JSON.stringify('___\uDD60'));
+assertEquals('"___\\udd61"', JSON.stringify('___\uDD61'));
+assertEquals('"___\\udd62"', JSON.stringify('___\uDD62'));
+assertEquals('"___\\udd63"', JSON.stringify('___\uDD63'));
+assertEquals('"___\\udd64"', JSON.stringify('___\uDD64'));
+assertEquals('"___\\udd65"', JSON.stringify('___\uDD65'));
+assertEquals('"___\\udd66"', JSON.stringify('___\uDD66'));
+assertEquals('"___\\udd67"', JSON.stringify('___\uDD67'));
+assertEquals('"___\\udd68"', JSON.stringify('___\uDD68'));
+assertEquals('"___\\udd69"', JSON.stringify('___\uDD69'));
+assertEquals('"___\\udd6a"', JSON.stringify('___\uDD6A'));
+assertEquals('"___\\udd6b"', JSON.stringify('___\uDD6B'));
+assertEquals('"___\\udd6c"', JSON.stringify('___\uDD6C'));
+assertEquals('"___\\udd6d"', JSON.stringify('___\uDD6D'));
+assertEquals('"___\\udd6e"', JSON.stringify('___\uDD6E'));
+assertEquals('"___\\udd6f"', JSON.stringify('___\uDD6F'));
+assertEquals('"___\\udd70"', JSON.stringify('___\uDD70'));
+assertEquals('"___\\udd71"', JSON.stringify('___\uDD71'));
+assertEquals('"___\\udd72"', JSON.stringify('___\uDD72'));
+assertEquals('"___\\udd73"', JSON.stringify('___\uDD73'));
+assertEquals('"___\\udd74"', JSON.stringify('___\uDD74'));
+assertEquals('"___\\udd75"', JSON.stringify('___\uDD75'));
+assertEquals('"___\\udd76"', JSON.stringify('___\uDD76'));
+assertEquals('"___\\udd77"', JSON.stringify('___\uDD77'));
+assertEquals('"___\\udd78"', JSON.stringify('___\uDD78'));
+assertEquals('"___\\udd79"', JSON.stringify('___\uDD79'));
+assertEquals('"___\\udd7a"', JSON.stringify('___\uDD7A'));
+assertEquals('"___\\udd7b"', JSON.stringify('___\uDD7B'));
+assertEquals('"___\\udd7c"', JSON.stringify('___\uDD7C'));
+assertEquals('"___\\udd7d"', JSON.stringify('___\uDD7D'));
+assertEquals('"___\\udd7e"', JSON.stringify('___\uDD7E'));
+assertEquals('"___\\udd7f"', JSON.stringify('___\uDD7F'));
+assertEquals('"___\\udd80"', JSON.stringify('___\uDD80'));
+assertEquals('"___\\udd81"', JSON.stringify('___\uDD81'));
+assertEquals('"___\\udd82"', JSON.stringify('___\uDD82'));
+assertEquals('"___\\udd83"', JSON.stringify('___\uDD83'));
+assertEquals('"___\\udd84"', JSON.stringify('___\uDD84'));
+assertEquals('"___\\udd85"', JSON.stringify('___\uDD85'));
+assertEquals('"___\\udd86"', JSON.stringify('___\uDD86'));
+assertEquals('"___\\udd87"', JSON.stringify('___\uDD87'));
+assertEquals('"___\\udd88"', JSON.stringify('___\uDD88'));
+assertEquals('"___\\udd89"', JSON.stringify('___\uDD89'));
+assertEquals('"___\\udd8a"', JSON.stringify('___\uDD8A'));
+assertEquals('"___\\udd8b"', JSON.stringify('___\uDD8B'));
+assertEquals('"___\\udd8c"', JSON.stringify('___\uDD8C'));
+assertEquals('"___\\udd8d"', JSON.stringify('___\uDD8D'));
+assertEquals('"___\\udd8e"', JSON.stringify('___\uDD8E'));
+assertEquals('"___\\udd8f"', JSON.stringify('___\uDD8F'));
+assertEquals('"___\\udd90"', JSON.stringify('___\uDD90'));
+assertEquals('"___\\udd91"', JSON.stringify('___\uDD91'));
+assertEquals('"___\\udd92"', JSON.stringify('___\uDD92'));
+assertEquals('"___\\udd93"', JSON.stringify('___\uDD93'));
+assertEquals('"___\\udd94"', JSON.stringify('___\uDD94'));
+assertEquals('"___\\udd95"', JSON.stringify('___\uDD95'));
+assertEquals('"___\\udd96"', JSON.stringify('___\uDD96'));
+assertEquals('"___\\udd97"', JSON.stringify('___\uDD97'));
+assertEquals('"___\\udd98"', JSON.stringify('___\uDD98'));
+assertEquals('"___\\udd99"', JSON.stringify('___\uDD99'));
+assertEquals('"___\\udd9a"', JSON.stringify('___\uDD9A'));
+assertEquals('"___\\udd9b"', JSON.stringify('___\uDD9B'));
+assertEquals('"___\\udd9c"', JSON.stringify('___\uDD9C'));
+assertEquals('"___\\udd9d"', JSON.stringify('___\uDD9D'));
+assertEquals('"___\\udd9e"', JSON.stringify('___\uDD9E'));
+assertEquals('"___\\udd9f"', JSON.stringify('___\uDD9F'));
+assertEquals('"___\\udda0"', JSON.stringify('___\uDDA0'));
+assertEquals('"___\\udda1"', JSON.stringify('___\uDDA1'));
+assertEquals('"___\\udda2"', JSON.stringify('___\uDDA2'));
+assertEquals('"___\\udda3"', JSON.stringify('___\uDDA3'));
+assertEquals('"___\\udda4"', JSON.stringify('___\uDDA4'));
+assertEquals('"___\\udda5"', JSON.stringify('___\uDDA5'));
+assertEquals('"___\\udda6"', JSON.stringify('___\uDDA6'));
+assertEquals('"___\\udda7"', JSON.stringify('___\uDDA7'));
+assertEquals('"___\\udda8"', JSON.stringify('___\uDDA8'));
+assertEquals('"___\\udda9"', JSON.stringify('___\uDDA9'));
+assertEquals('"___\\uddaa"', JSON.stringify('___\uDDAA'));
+assertEquals('"___\\uddab"', JSON.stringify('___\uDDAB'));
+assertEquals('"___\\uddac"', JSON.stringify('___\uDDAC'));
+assertEquals('"___\\uddad"', JSON.stringify('___\uDDAD'));
+assertEquals('"___\\uddae"', JSON.stringify('___\uDDAE'));
+assertEquals('"___\\uddaf"', JSON.stringify('___\uDDAF'));
+assertEquals('"___\\uddb0"', JSON.stringify('___\uDDB0'));
+assertEquals('"___\\uddb1"', JSON.stringify('___\uDDB1'));
+assertEquals('"___\\uddb2"', JSON.stringify('___\uDDB2'));
+assertEquals('"___\\uddb3"', JSON.stringify('___\uDDB3'));
+assertEquals('"___\\uddb4"', JSON.stringify('___\uDDB4'));
+assertEquals('"___\\uddb5"', JSON.stringify('___\uDDB5'));
+assertEquals('"___\\uddb6"', JSON.stringify('___\uDDB6'));
+assertEquals('"___\\uddb7"', JSON.stringify('___\uDDB7'));
+assertEquals('"___\\uddb8"', JSON.stringify('___\uDDB8'));
+assertEquals('"___\\uddb9"', JSON.stringify('___\uDDB9'));
+assertEquals('"___\\uddba"', JSON.stringify('___\uDDBA'));
+assertEquals('"___\\uddbb"', JSON.stringify('___\uDDBB'));
+assertEquals('"___\\uddbc"', JSON.stringify('___\uDDBC'));
+assertEquals('"___\\uddbd"', JSON.stringify('___\uDDBD'));
+assertEquals('"___\\uddbe"', JSON.stringify('___\uDDBE'));
+assertEquals('"___\\uddbf"', JSON.stringify('___\uDDBF'));
+assertEquals('"___\\uddc0"', JSON.stringify('___\uDDC0'));
+assertEquals('"___\\uddc1"', JSON.stringify('___\uDDC1'));
+assertEquals('"___\\uddc2"', JSON.stringify('___\uDDC2'));
+assertEquals('"___\\uddc3"', JSON.stringify('___\uDDC3'));
+assertEquals('"___\\uddc4"', JSON.stringify('___\uDDC4'));
+assertEquals('"___\\uddc5"', JSON.stringify('___\uDDC5'));
+assertEquals('"___\\uddc6"', JSON.stringify('___\uDDC6'));
+assertEquals('"___\\uddc7"', JSON.stringify('___\uDDC7'));
+assertEquals('"___\\uddc8"', JSON.stringify('___\uDDC8'));
+assertEquals('"___\\uddc9"', JSON.stringify('___\uDDC9'));
+assertEquals('"___\\uddca"', JSON.stringify('___\uDDCA'));
+assertEquals('"___\\uddcb"', JSON.stringify('___\uDDCB'));
+assertEquals('"___\\uddcc"', JSON.stringify('___\uDDCC'));
+assertEquals('"___\\uddcd"', JSON.stringify('___\uDDCD'));
+assertEquals('"___\\uddce"', JSON.stringify('___\uDDCE'));
+assertEquals('"___\\uddcf"', JSON.stringify('___\uDDCF'));
+assertEquals('"___\\uddd0"', JSON.stringify('___\uDDD0'));
+assertEquals('"___\\uddd1"', JSON.stringify('___\uDDD1'));
+assertEquals('"___\\uddd2"', JSON.stringify('___\uDDD2'));
+assertEquals('"___\\uddd3"', JSON.stringify('___\uDDD3'));
+assertEquals('"___\\uddd4"', JSON.stringify('___\uDDD4'));
+assertEquals('"___\\uddd5"', JSON.stringify('___\uDDD5'));
+assertEquals('"___\\uddd6"', JSON.stringify('___\uDDD6'));
+assertEquals('"___\\uddd7"', JSON.stringify('___\uDDD7'));
+assertEquals('"___\\uddd8"', JSON.stringify('___\uDDD8'));
+assertEquals('"___\\uddd9"', JSON.stringify('___\uDDD9'));
+assertEquals('"___\\uddda"', JSON.stringify('___\uDDDA'));
+assertEquals('"___\\udddb"', JSON.stringify('___\uDDDB'));
+assertEquals('"___\\udddc"', JSON.stringify('___\uDDDC'));
+assertEquals('"___\\udddd"', JSON.stringify('___\uDDDD'));
+assertEquals('"___\\uddde"', JSON.stringify('___\uDDDE'));
+assertEquals('"___\\udddf"', JSON.stringify('___\uDDDF'));
+assertEquals('"___\\udde0"', JSON.stringify('___\uDDE0'));
+assertEquals('"___\\udde1"', JSON.stringify('___\uDDE1'));
+assertEquals('"___\\udde2"', JSON.stringify('___\uDDE2'));
+assertEquals('"___\\udde3"', JSON.stringify('___\uDDE3'));
+assertEquals('"___\\udde4"', JSON.stringify('___\uDDE4'));
+assertEquals('"___\\udde5"', JSON.stringify('___\uDDE5'));
+assertEquals('"___\\udde6"', JSON.stringify('___\uDDE6'));
+assertEquals('"___\\udde7"', JSON.stringify('___\uDDE7'));
+assertEquals('"___\\udde8"', JSON.stringify('___\uDDE8'));
+assertEquals('"___\\udde9"', JSON.stringify('___\uDDE9'));
+assertEquals('"___\\uddea"', JSON.stringify('___\uDDEA'));
+assertEquals('"___\\uddeb"', JSON.stringify('___\uDDEB'));
+assertEquals('"___\\uddec"', JSON.stringify('___\uDDEC'));
+assertEquals('"___\\udded"', JSON.stringify('___\uDDED'));
+assertEquals('"___\\uddee"', JSON.stringify('___\uDDEE'));
+assertEquals('"___\\uddef"', JSON.stringify('___\uDDEF'));
+assertEquals('"___\\uddf0"', JSON.stringify('___\uDDF0'));
+assertEquals('"___\\uddf1"', JSON.stringify('___\uDDF1'));
+assertEquals('"___\\uddf2"', JSON.stringify('___\uDDF2'));
+assertEquals('"___\\uddf3"', JSON.stringify('___\uDDF3'));
+assertEquals('"___\\uddf4"', JSON.stringify('___\uDDF4'));
+assertEquals('"___\\uddf5"', JSON.stringify('___\uDDF5'));
+assertEquals('"___\\uddf6"', JSON.stringify('___\uDDF6'));
+assertEquals('"___\\uddf7"', JSON.stringify('___\uDDF7'));
+assertEquals('"___\\uddf8"', JSON.stringify('___\uDDF8'));
+assertEquals('"___\\uddf9"', JSON.stringify('___\uDDF9'));
+assertEquals('"___\\uddfa"', JSON.stringify('___\uDDFA'));
+assertEquals('"___\\uddfb"', JSON.stringify('___\uDDFB'));
+assertEquals('"___\\uddfc"', JSON.stringify('___\uDDFC'));
+assertEquals('"___\\uddfd"', JSON.stringify('___\uDDFD'));
+assertEquals('"___\\uddfe"', JSON.stringify('___\uDDFE'));
+assertEquals('"___\\uddff"', JSON.stringify('___\uDDFF'));
+assertEquals('"___\\ude00"', JSON.stringify('___\uDE00'));
+assertEquals('"___\\ude01"', JSON.stringify('___\uDE01'));
+assertEquals('"___\\ude02"', JSON.stringify('___\uDE02'));
+assertEquals('"___\\ude03"', JSON.stringify('___\uDE03'));
+assertEquals('"___\\ude04"', JSON.stringify('___\uDE04'));
+assertEquals('"___\\ude05"', JSON.stringify('___\uDE05'));
+assertEquals('"___\\ude06"', JSON.stringify('___\uDE06'));
+assertEquals('"___\\ude07"', JSON.stringify('___\uDE07'));
+assertEquals('"___\\ude08"', JSON.stringify('___\uDE08'));
+assertEquals('"___\\ude09"', JSON.stringify('___\uDE09'));
+assertEquals('"___\\ude0a"', JSON.stringify('___\uDE0A'));
+assertEquals('"___\\ude0b"', JSON.stringify('___\uDE0B'));
+assertEquals('"___\\ude0c"', JSON.stringify('___\uDE0C'));
+assertEquals('"___\\ude0d"', JSON.stringify('___\uDE0D'));
+assertEquals('"___\\ude0e"', JSON.stringify('___\uDE0E'));
+assertEquals('"___\\ude0f"', JSON.stringify('___\uDE0F'));
+assertEquals('"___\\ude10"', JSON.stringify('___\uDE10'));
+assertEquals('"___\\ude11"', JSON.stringify('___\uDE11'));
+assertEquals('"___\\ude12"', JSON.stringify('___\uDE12'));
+assertEquals('"___\\ude13"', JSON.stringify('___\uDE13'));
+assertEquals('"___\\ude14"', JSON.stringify('___\uDE14'));
+assertEquals('"___\\ude15"', JSON.stringify('___\uDE15'));
+assertEquals('"___\\ude16"', JSON.stringify('___\uDE16'));
+assertEquals('"___\\ude17"', JSON.stringify('___\uDE17'));
+assertEquals('"___\\ude18"', JSON.stringify('___\uDE18'));
+assertEquals('"___\\ude19"', JSON.stringify('___\uDE19'));
+assertEquals('"___\\ude1a"', JSON.stringify('___\uDE1A'));
+assertEquals('"___\\ude1b"', JSON.stringify('___\uDE1B'));
+assertEquals('"___\\ude1c"', JSON.stringify('___\uDE1C'));
+assertEquals('"___\\ude1d"', JSON.stringify('___\uDE1D'));
+assertEquals('"___\\ude1e"', JSON.stringify('___\uDE1E'));
+assertEquals('"___\\ude1f"', JSON.stringify('___\uDE1F'));
+assertEquals('"___\\ude20"', JSON.stringify('___\uDE20'));
+assertEquals('"___\\ude21"', JSON.stringify('___\uDE21'));
+assertEquals('"___\\ude22"', JSON.stringify('___\uDE22'));
+assertEquals('"___\\ude23"', JSON.stringify('___\uDE23'));
+assertEquals('"___\\ude24"', JSON.stringify('___\uDE24'));
+assertEquals('"___\\ude25"', JSON.stringify('___\uDE25'));
+assertEquals('"___\\ude26"', JSON.stringify('___\uDE26'));
+assertEquals('"___\\ude27"', JSON.stringify('___\uDE27'));
+assertEquals('"___\\ude28"', JSON.stringify('___\uDE28'));
+assertEquals('"___\\ude29"', JSON.stringify('___\uDE29'));
+assertEquals('"___\\ude2a"', JSON.stringify('___\uDE2A'));
+assertEquals('"___\\ude2b"', JSON.stringify('___\uDE2B'));
+assertEquals('"___\\ude2c"', JSON.stringify('___\uDE2C'));
+assertEquals('"___\\ude2d"', JSON.stringify('___\uDE2D'));
+assertEquals('"___\\ude2e"', JSON.stringify('___\uDE2E'));
+assertEquals('"___\\ude2f"', JSON.stringify('___\uDE2F'));
+assertEquals('"___\\ude30"', JSON.stringify('___\uDE30'));
+assertEquals('"___\\ude31"', JSON.stringify('___\uDE31'));
+assertEquals('"___\\ude32"', JSON.stringify('___\uDE32'));
+assertEquals('"___\\ude33"', JSON.stringify('___\uDE33'));
+assertEquals('"___\\ude34"', JSON.stringify('___\uDE34'));
+assertEquals('"___\\ude35"', JSON.stringify('___\uDE35'));
+assertEquals('"___\\ude36"', JSON.stringify('___\uDE36'));
+assertEquals('"___\\ude37"', JSON.stringify('___\uDE37'));
+assertEquals('"___\\ude38"', JSON.stringify('___\uDE38'));
+assertEquals('"___\\ude39"', JSON.stringify('___\uDE39'));
+assertEquals('"___\\ude3a"', JSON.stringify('___\uDE3A'));
+assertEquals('"___\\ude3b"', JSON.stringify('___\uDE3B'));
+assertEquals('"___\\ude3c"', JSON.stringify('___\uDE3C'));
+assertEquals('"___\\ude3d"', JSON.stringify('___\uDE3D'));
+assertEquals('"___\\ude3e"', JSON.stringify('___\uDE3E'));
+assertEquals('"___\\ude3f"', JSON.stringify('___\uDE3F'));
+assertEquals('"___\\ude40"', JSON.stringify('___\uDE40'));
+assertEquals('"___\\ude41"', JSON.stringify('___\uDE41'));
+assertEquals('"___\\ude42"', JSON.stringify('___\uDE42'));
+assertEquals('"___\\ude43"', JSON.stringify('___\uDE43'));
+assertEquals('"___\\ude44"', JSON.stringify('___\uDE44'));
+assertEquals('"___\\ude45"', JSON.stringify('___\uDE45'));
+assertEquals('"___\\ude46"', JSON.stringify('___\uDE46'));
+assertEquals('"___\\ude47"', JSON.stringify('___\uDE47'));
+assertEquals('"___\\ude48"', JSON.stringify('___\uDE48'));
+assertEquals('"___\\ude49"', JSON.stringify('___\uDE49'));
+assertEquals('"___\\ude4a"', JSON.stringify('___\uDE4A'));
+assertEquals('"___\\ude4b"', JSON.stringify('___\uDE4B'));
+assertEquals('"___\\ude4c"', JSON.stringify('___\uDE4C'));
+assertEquals('"___\\ude4d"', JSON.stringify('___\uDE4D'));
+assertEquals('"___\\ude4e"', JSON.stringify('___\uDE4E'));
+assertEquals('"___\\ude4f"', JSON.stringify('___\uDE4F'));
+assertEquals('"___\\ude50"', JSON.stringify('___\uDE50'));
+assertEquals('"___\\ude51"', JSON.stringify('___\uDE51'));
+assertEquals('"___\\ude52"', JSON.stringify('___\uDE52'));
+assertEquals('"___\\ude53"', JSON.stringify('___\uDE53'));
+assertEquals('"___\\ude54"', JSON.stringify('___\uDE54'));
+assertEquals('"___\\ude55"', JSON.stringify('___\uDE55'));
+assertEquals('"___\\ude56"', JSON.stringify('___\uDE56'));
+assertEquals('"___\\ude57"', JSON.stringify('___\uDE57'));
+assertEquals('"___\\ude58"', JSON.stringify('___\uDE58'));
+assertEquals('"___\\ude59"', JSON.stringify('___\uDE59'));
+assertEquals('"___\\ude5a"', JSON.stringify('___\uDE5A'));
+assertEquals('"___\\ude5b"', JSON.stringify('___\uDE5B'));
+assertEquals('"___\\ude5c"', JSON.stringify('___\uDE5C'));
+assertEquals('"___\\ude5d"', JSON.stringify('___\uDE5D'));
+assertEquals('"___\\ude5e"', JSON.stringify('___\uDE5E'));
+assertEquals('"___\\ude5f"', JSON.stringify('___\uDE5F'));
+assertEquals('"___\\ude60"', JSON.stringify('___\uDE60'));
+assertEquals('"___\\ude61"', JSON.stringify('___\uDE61'));
+assertEquals('"___\\ude62"', JSON.stringify('___\uDE62'));
+assertEquals('"___\\ude63"', JSON.stringify('___\uDE63'));
+assertEquals('"___\\ude64"', JSON.stringify('___\uDE64'));
+assertEquals('"___\\ude65"', JSON.stringify('___\uDE65'));
+assertEquals('"___\\ude66"', JSON.stringify('___\uDE66'));
+assertEquals('"___\\ude67"', JSON.stringify('___\uDE67'));
+assertEquals('"___\\ude68"', JSON.stringify('___\uDE68'));
+assertEquals('"___\\ude69"', JSON.stringify('___\uDE69'));
+assertEquals('"___\\ude6a"', JSON.stringify('___\uDE6A'));
+assertEquals('"___\\ude6b"', JSON.stringify('___\uDE6B'));
+assertEquals('"___\\ude6c"', JSON.stringify('___\uDE6C'));
+assertEquals('"___\\ude6d"', JSON.stringify('___\uDE6D'));
+assertEquals('"___\\ude6e"', JSON.stringify('___\uDE6E'));
+assertEquals('"___\\ude6f"', JSON.stringify('___\uDE6F'));
+assertEquals('"___\\ude70"', JSON.stringify('___\uDE70'));
+assertEquals('"___\\ude71"', JSON.stringify('___\uDE71'));
+assertEquals('"___\\ude72"', JSON.stringify('___\uDE72'));
+assertEquals('"___\\ude73"', JSON.stringify('___\uDE73'));
+assertEquals('"___\\ude74"', JSON.stringify('___\uDE74'));
+assertEquals('"___\\ude75"', JSON.stringify('___\uDE75'));
+assertEquals('"___\\ude76"', JSON.stringify('___\uDE76'));
+assertEquals('"___\\ude77"', JSON.stringify('___\uDE77'));
+assertEquals('"___\\ude78"', JSON.stringify('___\uDE78'));
+assertEquals('"___\\ude79"', JSON.stringify('___\uDE79'));
+assertEquals('"___\\ude7a"', JSON.stringify('___\uDE7A'));
+assertEquals('"___\\ude7b"', JSON.stringify('___\uDE7B'));
+assertEquals('"___\\ude7c"', JSON.stringify('___\uDE7C'));
+assertEquals('"___\\ude7d"', JSON.stringify('___\uDE7D'));
+assertEquals('"___\\ude7e"', JSON.stringify('___\uDE7E'));
+assertEquals('"___\\ude7f"', JSON.stringify('___\uDE7F'));
+assertEquals('"___\\ude80"', JSON.stringify('___\uDE80'));
+assertEquals('"___\\ude81"', JSON.stringify('___\uDE81'));
+assertEquals('"___\\ude82"', JSON.stringify('___\uDE82'));
+assertEquals('"___\\ude83"', JSON.stringify('___\uDE83'));
+assertEquals('"___\\ude84"', JSON.stringify('___\uDE84'));
+assertEquals('"___\\ude85"', JSON.stringify('___\uDE85'));
+assertEquals('"___\\ude86"', JSON.stringify('___\uDE86'));
+assertEquals('"___\\ude87"', JSON.stringify('___\uDE87'));
+assertEquals('"___\\ude88"', JSON.stringify('___\uDE88'));
+assertEquals('"___\\ude89"', JSON.stringify('___\uDE89'));
+assertEquals('"___\\ude8a"', JSON.stringify('___\uDE8A'));
+assertEquals('"___\\ude8b"', JSON.stringify('___\uDE8B'));
+assertEquals('"___\\ude8c"', JSON.stringify('___\uDE8C'));
+assertEquals('"___\\ude8d"', JSON.stringify('___\uDE8D'));
+assertEquals('"___\\ude8e"', JSON.stringify('___\uDE8E'));
+assertEquals('"___\\ude8f"', JSON.stringify('___\uDE8F'));
+assertEquals('"___\\ude90"', JSON.stringify('___\uDE90'));
+assertEquals('"___\\ude91"', JSON.stringify('___\uDE91'));
+assertEquals('"___\\ude92"', JSON.stringify('___\uDE92'));
+assertEquals('"___\\ude93"', JSON.stringify('___\uDE93'));
+assertEquals('"___\\ude94"', JSON.stringify('___\uDE94'));
+assertEquals('"___\\ude95"', JSON.stringify('___\uDE95'));
+assertEquals('"___\\ude96"', JSON.stringify('___\uDE96'));
+assertEquals('"___\\ude97"', JSON.stringify('___\uDE97'));
+assertEquals('"___\\ude98"', JSON.stringify('___\uDE98'));
+assertEquals('"___\\ude99"', JSON.stringify('___\uDE99'));
+assertEquals('"___\\ude9a"', JSON.stringify('___\uDE9A'));
+assertEquals('"___\\ude9b"', JSON.stringify('___\uDE9B'));
+assertEquals('"___\\ude9c"', JSON.stringify('___\uDE9C'));
+assertEquals('"___\\ude9d"', JSON.stringify('___\uDE9D'));
+assertEquals('"___\\ude9e"', JSON.stringify('___\uDE9E'));
+assertEquals('"___\\ude9f"', JSON.stringify('___\uDE9F'));
+assertEquals('"___\\udea0"', JSON.stringify('___\uDEA0'));
+assertEquals('"___\\udea1"', JSON.stringify('___\uDEA1'));
+assertEquals('"___\\udea2"', JSON.stringify('___\uDEA2'));
+assertEquals('"___\\udea3"', JSON.stringify('___\uDEA3'));
+assertEquals('"___\\udea4"', JSON.stringify('___\uDEA4'));
+assertEquals('"___\\udea5"', JSON.stringify('___\uDEA5'));
+assertEquals('"___\\udea6"', JSON.stringify('___\uDEA6'));
+assertEquals('"___\\udea7"', JSON.stringify('___\uDEA7'));
+assertEquals('"___\\udea8"', JSON.stringify('___\uDEA8'));
+assertEquals('"___\\udea9"', JSON.stringify('___\uDEA9'));
+assertEquals('"___\\udeaa"', JSON.stringify('___\uDEAA'));
+assertEquals('"___\\udeab"', JSON.stringify('___\uDEAB'));
+assertEquals('"___\\udeac"', JSON.stringify('___\uDEAC'));
+assertEquals('"___\\udead"', JSON.stringify('___\uDEAD'));
+assertEquals('"___\\udeae"', JSON.stringify('___\uDEAE'));
+assertEquals('"___\\udeaf"', JSON.stringify('___\uDEAF'));
+assertEquals('"___\\udeb0"', JSON.stringify('___\uDEB0'));
+assertEquals('"___\\udeb1"', JSON.stringify('___\uDEB1'));
+assertEquals('"___\\udeb2"', JSON.stringify('___\uDEB2'));
+assertEquals('"___\\udeb3"', JSON.stringify('___\uDEB3'));
+assertEquals('"___\\udeb4"', JSON.stringify('___\uDEB4'));
+assertEquals('"___\\udeb5"', JSON.stringify('___\uDEB5'));
+assertEquals('"___\\udeb6"', JSON.stringify('___\uDEB6'));
+assertEquals('"___\\udeb7"', JSON.stringify('___\uDEB7'));
+assertEquals('"___\\udeb8"', JSON.stringify('___\uDEB8'));
+assertEquals('"___\\udeb9"', JSON.stringify('___\uDEB9'));
+assertEquals('"___\\udeba"', JSON.stringify('___\uDEBA'));
+assertEquals('"___\\udebb"', JSON.stringify('___\uDEBB'));
+assertEquals('"___\\udebc"', JSON.stringify('___\uDEBC'));
+assertEquals('"___\\udebd"', JSON.stringify('___\uDEBD'));
+assertEquals('"___\\udebe"', JSON.stringify('___\uDEBE'));
+assertEquals('"___\\udebf"', JSON.stringify('___\uDEBF'));
+assertEquals('"___\\udec0"', JSON.stringify('___\uDEC0'));
+assertEquals('"___\\udec1"', JSON.stringify('___\uDEC1'));
+assertEquals('"___\\udec2"', JSON.stringify('___\uDEC2'));
+assertEquals('"___\\udec3"', JSON.stringify('___\uDEC3'));
+assertEquals('"___\\udec4"', JSON.stringify('___\uDEC4'));
+assertEquals('"___\\udec5"', JSON.stringify('___\uDEC5'));
+assertEquals('"___\\udec6"', JSON.stringify('___\uDEC6'));
+assertEquals('"___\\udec7"', JSON.stringify('___\uDEC7'));
+assertEquals('"___\\udec8"', JSON.stringify('___\uDEC8'));
+assertEquals('"___\\udec9"', JSON.stringify('___\uDEC9'));
+assertEquals('"___\\udeca"', JSON.stringify('___\uDECA'));
+assertEquals('"___\\udecb"', JSON.stringify('___\uDECB'));
+assertEquals('"___\\udecc"', JSON.stringify('___\uDECC'));
+assertEquals('"___\\udecd"', JSON.stringify('___\uDECD'));
+assertEquals('"___\\udece"', JSON.stringify('___\uDECE'));
+assertEquals('"___\\udecf"', JSON.stringify('___\uDECF'));
+assertEquals('"___\\uded0"', JSON.stringify('___\uDED0'));
+assertEquals('"___\\uded1"', JSON.stringify('___\uDED1'));
+assertEquals('"___\\uded2"', JSON.stringify('___\uDED2'));
+assertEquals('"___\\uded3"', JSON.stringify('___\uDED3'));
+assertEquals('"___\\uded4"', JSON.stringify('___\uDED4'));
+assertEquals('"___\\uded5"', JSON.stringify('___\uDED5'));
+assertEquals('"___\\uded6"', JSON.stringify('___\uDED6'));
+assertEquals('"___\\uded7"', JSON.stringify('___\uDED7'));
+assertEquals('"___\\uded8"', JSON.stringify('___\uDED8'));
+assertEquals('"___\\uded9"', JSON.stringify('___\uDED9'));
+assertEquals('"___\\udeda"', JSON.stringify('___\uDEDA'));
+assertEquals('"___\\udedb"', JSON.stringify('___\uDEDB'));
+assertEquals('"___\\udedc"', JSON.stringify('___\uDEDC'));
+assertEquals('"___\\udedd"', JSON.stringify('___\uDEDD'));
+assertEquals('"___\\udede"', JSON.stringify('___\uDEDE'));
+assertEquals('"___\\udedf"', JSON.stringify('___\uDEDF'));
+assertEquals('"___\\udee0"', JSON.stringify('___\uDEE0'));
+assertEquals('"___\\udee1"', JSON.stringify('___\uDEE1'));
+assertEquals('"___\\udee2"', JSON.stringify('___\uDEE2'));
+assertEquals('"___\\udee3"', JSON.stringify('___\uDEE3'));
+assertEquals('"___\\udee4"', JSON.stringify('___\uDEE4'));
+assertEquals('"___\\udee5"', JSON.stringify('___\uDEE5'));
+assertEquals('"___\\udee6"', JSON.stringify('___\uDEE6'));
+assertEquals('"___\\udee7"', JSON.stringify('___\uDEE7'));
+assertEquals('"___\\udee8"', JSON.stringify('___\uDEE8'));
+assertEquals('"___\\udee9"', JSON.stringify('___\uDEE9'));
+assertEquals('"___\\udeea"', JSON.stringify('___\uDEEA'));
+assertEquals('"___\\udeeb"', JSON.stringify('___\uDEEB'));
+assertEquals('"___\\udeec"', JSON.stringify('___\uDEEC'));
+assertEquals('"___\\udeed"', JSON.stringify('___\uDEED'));
+assertEquals('"___\\udeee"', JSON.stringify('___\uDEEE'));
+assertEquals('"___\\udeef"', JSON.stringify('___\uDEEF'));
+assertEquals('"___\\udef0"', JSON.stringify('___\uDEF0'));
+assertEquals('"___\\udef1"', JSON.stringify('___\uDEF1'));
+assertEquals('"___\\udef2"', JSON.stringify('___\uDEF2'));
+assertEquals('"___\\udef3"', JSON.stringify('___\uDEF3'));
+assertEquals('"___\\udef4"', JSON.stringify('___\uDEF4'));
+assertEquals('"___\\udef5"', JSON.stringify('___\uDEF5'));
+assertEquals('"___\\udef6"', JSON.stringify('___\uDEF6'));
+assertEquals('"___\\udef7"', JSON.stringify('___\uDEF7'));
+assertEquals('"___\\udef8"', JSON.stringify('___\uDEF8'));
+assertEquals('"___\\udef9"', JSON.stringify('___\uDEF9'));
+assertEquals('"___\\udefa"', JSON.stringify('___\uDEFA'));
+assertEquals('"___\\udefb"', JSON.stringify('___\uDEFB'));
+assertEquals('"___\\udefc"', JSON.stringify('___\uDEFC'));
+assertEquals('"___\\udefd"', JSON.stringify('___\uDEFD'));
+assertEquals('"___\\udefe"', JSON.stringify('___\uDEFE'));
+assertEquals('"___\\udeff"', JSON.stringify('___\uDEFF'));
+assertEquals('"___\\udf00"', JSON.stringify('___\uDF00'));
+assertEquals('"___\\udf01"', JSON.stringify('___\uDF01'));
+assertEquals('"___\\udf02"', JSON.stringify('___\uDF02'));
+assertEquals('"___\\udf03"', JSON.stringify('___\uDF03'));
+assertEquals('"___\\udf04"', JSON.stringify('___\uDF04'));
+assertEquals('"___\\udf05"', JSON.stringify('___\uDF05'));
+assertEquals('"___\\udf06"', JSON.stringify('___\uDF06'));
+assertEquals('"___\\udf07"', JSON.stringify('___\uDF07'));
+assertEquals('"___\\udf08"', JSON.stringify('___\uDF08'));
+assertEquals('"___\\udf09"', JSON.stringify('___\uDF09'));
+assertEquals('"___\\udf0a"', JSON.stringify('___\uDF0A'));
+assertEquals('"___\\udf0b"', JSON.stringify('___\uDF0B'));
+assertEquals('"___\\udf0c"', JSON.stringify('___\uDF0C'));
+assertEquals('"___\\udf0d"', JSON.stringify('___\uDF0D'));
+assertEquals('"___\\udf0e"', JSON.stringify('___\uDF0E'));
+assertEquals('"___\\udf0f"', JSON.stringify('___\uDF0F'));
+assertEquals('"___\\udf10"', JSON.stringify('___\uDF10'));
+assertEquals('"___\\udf11"', JSON.stringify('___\uDF11'));
+assertEquals('"___\\udf12"', JSON.stringify('___\uDF12'));
+assertEquals('"___\\udf13"', JSON.stringify('___\uDF13'));
+assertEquals('"___\\udf14"', JSON.stringify('___\uDF14'));
+assertEquals('"___\\udf15"', JSON.stringify('___\uDF15'));
+assertEquals('"___\\udf16"', JSON.stringify('___\uDF16'));
+assertEquals('"___\\udf17"', JSON.stringify('___\uDF17'));
+assertEquals('"___\\udf18"', JSON.stringify('___\uDF18'));
+assertEquals('"___\\udf19"', JSON.stringify('___\uDF19'));
+assertEquals('"___\\udf1a"', JSON.stringify('___\uDF1A'));
+assertEquals('"___\\udf1b"', JSON.stringify('___\uDF1B'));
+assertEquals('"___\\udf1c"', JSON.stringify('___\uDF1C'));
+assertEquals('"___\\udf1d"', JSON.stringify('___\uDF1D'));
+assertEquals('"___\\udf1e"', JSON.stringify('___\uDF1E'));
+assertEquals('"___\\udf1f"', JSON.stringify('___\uDF1F'));
+assertEquals('"___\\udf20"', JSON.stringify('___\uDF20'));
+assertEquals('"___\\udf21"', JSON.stringify('___\uDF21'));
+assertEquals('"___\\udf22"', JSON.stringify('___\uDF22'));
+assertEquals('"___\\udf23"', JSON.stringify('___\uDF23'));
+assertEquals('"___\\udf24"', JSON.stringify('___\uDF24'));
+assertEquals('"___\\udf25"', JSON.stringify('___\uDF25'));
+assertEquals('"___\\udf26"', JSON.stringify('___\uDF26'));
+assertEquals('"___\\udf27"', JSON.stringify('___\uDF27'));
+assertEquals('"___\\udf28"', JSON.stringify('___\uDF28'));
+assertEquals('"___\\udf29"', JSON.stringify('___\uDF29'));
+assertEquals('"___\\udf2a"', JSON.stringify('___\uDF2A'));
+assertEquals('"___\\udf2b"', JSON.stringify('___\uDF2B'));
+assertEquals('"___\\udf2c"', JSON.stringify('___\uDF2C'));
+assertEquals('"___\\udf2d"', JSON.stringify('___\uDF2D'));
+assertEquals('"___\\udf2e"', JSON.stringify('___\uDF2E'));
+assertEquals('"___\\udf2f"', JSON.stringify('___\uDF2F'));
+assertEquals('"___\\udf30"', JSON.stringify('___\uDF30'));
+assertEquals('"___\\udf31"', JSON.stringify('___\uDF31'));
+assertEquals('"___\\udf32"', JSON.stringify('___\uDF32'));
+assertEquals('"___\\udf33"', JSON.stringify('___\uDF33'));
+assertEquals('"___\\udf34"', JSON.stringify('___\uDF34'));
+assertEquals('"___\\udf35"', JSON.stringify('___\uDF35'));
+assertEquals('"___\\udf36"', JSON.stringify('___\uDF36'));
+assertEquals('"___\\udf37"', JSON.stringify('___\uDF37'));
+assertEquals('"___\\udf38"', JSON.stringify('___\uDF38'));
+assertEquals('"___\\udf39"', JSON.stringify('___\uDF39'));
+assertEquals('"___\\udf3a"', JSON.stringify('___\uDF3A'));
+assertEquals('"___\\udf3b"', JSON.stringify('___\uDF3B'));
+assertEquals('"___\\udf3c"', JSON.stringify('___\uDF3C'));
+assertEquals('"___\\udf3d"', JSON.stringify('___\uDF3D'));
+assertEquals('"___\\udf3e"', JSON.stringify('___\uDF3E'));
+assertEquals('"___\\udf3f"', JSON.stringify('___\uDF3F'));
+assertEquals('"___\\udf40"', JSON.stringify('___\uDF40'));
+assertEquals('"___\\udf41"', JSON.stringify('___\uDF41'));
+assertEquals('"___\\udf42"', JSON.stringify('___\uDF42'));
+assertEquals('"___\\udf43"', JSON.stringify('___\uDF43'));
+assertEquals('"___\\udf44"', JSON.stringify('___\uDF44'));
+assertEquals('"___\\udf45"', JSON.stringify('___\uDF45'));
+assertEquals('"___\\udf46"', JSON.stringify('___\uDF46'));
+assertEquals('"___\\udf47"', JSON.stringify('___\uDF47'));
+assertEquals('"___\\udf48"', JSON.stringify('___\uDF48'));
+assertEquals('"___\\udf49"', JSON.stringify('___\uDF49'));
+assertEquals('"___\\udf4a"', JSON.stringify('___\uDF4A'));
+assertEquals('"___\\udf4b"', JSON.stringify('___\uDF4B'));
+assertEquals('"___\\udf4c"', JSON.stringify('___\uDF4C'));
+assertEquals('"___\\udf4d"', JSON.stringify('___\uDF4D'));
+assertEquals('"___\\udf4e"', JSON.stringify('___\uDF4E'));
+assertEquals('"___\\udf4f"', JSON.stringify('___\uDF4F'));
+assertEquals('"___\\udf50"', JSON.stringify('___\uDF50'));
+assertEquals('"___\\udf51"', JSON.stringify('___\uDF51'));
+assertEquals('"___\\udf52"', JSON.stringify('___\uDF52'));
+assertEquals('"___\\udf53"', JSON.stringify('___\uDF53'));
+assertEquals('"___\\udf54"', JSON.stringify('___\uDF54'));
+assertEquals('"___\\udf55"', JSON.stringify('___\uDF55'));
+assertEquals('"___\\udf56"', JSON.stringify('___\uDF56'));
+assertEquals('"___\\udf57"', JSON.stringify('___\uDF57'));
+assertEquals('"___\\udf58"', JSON.stringify('___\uDF58'));
+assertEquals('"___\\udf59"', JSON.stringify('___\uDF59'));
+assertEquals('"___\\udf5a"', JSON.stringify('___\uDF5A'));
+assertEquals('"___\\udf5b"', JSON.stringify('___\uDF5B'));
+assertEquals('"___\\udf5c"', JSON.stringify('___\uDF5C'));
+assertEquals('"___\\udf5d"', JSON.stringify('___\uDF5D'));
+assertEquals('"___\\udf5e"', JSON.stringify('___\uDF5E'));
+assertEquals('"___\\udf5f"', JSON.stringify('___\uDF5F'));
+assertEquals('"___\\udf60"', JSON.stringify('___\uDF60'));
+assertEquals('"___\\udf61"', JSON.stringify('___\uDF61'));
+assertEquals('"___\\udf62"', JSON.stringify('___\uDF62'));
+assertEquals('"___\\udf63"', JSON.stringify('___\uDF63'));
+assertEquals('"___\\udf64"', JSON.stringify('___\uDF64'));
+assertEquals('"___\\udf65"', JSON.stringify('___\uDF65'));
+assertEquals('"___\\udf66"', JSON.stringify('___\uDF66'));
+assertEquals('"___\\udf67"', JSON.stringify('___\uDF67'));
+assertEquals('"___\\udf68"', JSON.stringify('___\uDF68'));
+assertEquals('"___\\udf69"', JSON.stringify('___\uDF69'));
+assertEquals('"___\\udf6a"', JSON.stringify('___\uDF6A'));
+assertEquals('"___\\udf6b"', JSON.stringify('___\uDF6B'));
+assertEquals('"___\\udf6c"', JSON.stringify('___\uDF6C'));
+assertEquals('"___\\udf6d"', JSON.stringify('___\uDF6D'));
+assertEquals('"___\\udf6e"', JSON.stringify('___\uDF6E'));
+assertEquals('"___\\udf6f"', JSON.stringify('___\uDF6F'));
+assertEquals('"___\\udf70"', JSON.stringify('___\uDF70'));
+assertEquals('"___\\udf71"', JSON.stringify('___\uDF71'));
+assertEquals('"___\\udf72"', JSON.stringify('___\uDF72'));
+assertEquals('"___\\udf73"', JSON.stringify('___\uDF73'));
+assertEquals('"___\\udf74"', JSON.stringify('___\uDF74'));
+assertEquals('"___\\udf75"', JSON.stringify('___\uDF75'));
+assertEquals('"___\\udf76"', JSON.stringify('___\uDF76'));
+assertEquals('"___\\udf77"', JSON.stringify('___\uDF77'));
+assertEquals('"___\\udf78"', JSON.stringify('___\uDF78'));
+assertEquals('"___\\udf79"', JSON.stringify('___\uDF79'));
+assertEquals('"___\\udf7a"', JSON.stringify('___\uDF7A'));
+assertEquals('"___\\udf7b"', JSON.stringify('___\uDF7B'));
+assertEquals('"___\\udf7c"', JSON.stringify('___\uDF7C'));
+assertEquals('"___\\udf7d"', JSON.stringify('___\uDF7D'));
+assertEquals('"___\\udf7e"', JSON.stringify('___\uDF7E'));
+assertEquals('"___\\udf7f"', JSON.stringify('___\uDF7F'));
+assertEquals('"___\\udf80"', JSON.stringify('___\uDF80'));
+assertEquals('"___\\udf81"', JSON.stringify('___\uDF81'));
+assertEquals('"___\\udf82"', JSON.stringify('___\uDF82'));
+assertEquals('"___\\udf83"', JSON.stringify('___\uDF83'));
+assertEquals('"___\\udf84"', JSON.stringify('___\uDF84'));
+assertEquals('"___\\udf85"', JSON.stringify('___\uDF85'));
+assertEquals('"___\\udf86"', JSON.stringify('___\uDF86'));
+assertEquals('"___\\udf87"', JSON.stringify('___\uDF87'));
+assertEquals('"___\\udf88"', JSON.stringify('___\uDF88'));
+assertEquals('"___\\udf89"', JSON.stringify('___\uDF89'));
+assertEquals('"___\\udf8a"', JSON.stringify('___\uDF8A'));
+assertEquals('"___\\udf8b"', JSON.stringify('___\uDF8B'));
+assertEquals('"___\\udf8c"', JSON.stringify('___\uDF8C'));
+assertEquals('"___\\udf8d"', JSON.stringify('___\uDF8D'));
+assertEquals('"___\\udf8e"', JSON.stringify('___\uDF8E'));
+assertEquals('"___\\udf8f"', JSON.stringify('___\uDF8F'));
+assertEquals('"___\\udf90"', JSON.stringify('___\uDF90'));
+assertEquals('"___\\udf91"', JSON.stringify('___\uDF91'));
+assertEquals('"___\\udf92"', JSON.stringify('___\uDF92'));
+assertEquals('"___\\udf93"', JSON.stringify('___\uDF93'));
+assertEquals('"___\\udf94"', JSON.stringify('___\uDF94'));
+assertEquals('"___\\udf95"', JSON.stringify('___\uDF95'));
+assertEquals('"___\\udf96"', JSON.stringify('___\uDF96'));
+assertEquals('"___\\udf97"', JSON.stringify('___\uDF97'));
+assertEquals('"___\\udf98"', JSON.stringify('___\uDF98'));
+assertEquals('"___\\udf99"', JSON.stringify('___\uDF99'));
+assertEquals('"___\\udf9a"', JSON.stringify('___\uDF9A'));
+assertEquals('"___\\udf9b"', JSON.stringify('___\uDF9B'));
+assertEquals('"___\\udf9c"', JSON.stringify('___\uDF9C'));
+assertEquals('"___\\udf9d"', JSON.stringify('___\uDF9D'));
+assertEquals('"___\\udf9e"', JSON.stringify('___\uDF9E'));
+assertEquals('"___\\udf9f"', JSON.stringify('___\uDF9F'));
+assertEquals('"___\\udfa0"', JSON.stringify('___\uDFA0'));
+assertEquals('"___\\udfa1"', JSON.stringify('___\uDFA1'));
+assertEquals('"___\\udfa2"', JSON.stringify('___\uDFA2'));
+assertEquals('"___\\udfa3"', JSON.stringify('___\uDFA3'));
+assertEquals('"___\\udfa4"', JSON.stringify('___\uDFA4'));
+assertEquals('"___\\udfa5"', JSON.stringify('___\uDFA5'));
+assertEquals('"___\\udfa6"', JSON.stringify('___\uDFA6'));
+assertEquals('"___\\udfa7"', JSON.stringify('___\uDFA7'));
+assertEquals('"___\\udfa8"', JSON.stringify('___\uDFA8'));
+assertEquals('"___\\udfa9"', JSON.stringify('___\uDFA9'));
+assertEquals('"___\\udfaa"', JSON.stringify('___\uDFAA'));
+assertEquals('"___\\udfab"', JSON.stringify('___\uDFAB'));
+assertEquals('"___\\udfac"', JSON.stringify('___\uDFAC'));
+assertEquals('"___\\udfad"', JSON.stringify('___\uDFAD'));
+assertEquals('"___\\udfae"', JSON.stringify('___\uDFAE'));
+assertEquals('"___\\udfaf"', JSON.stringify('___\uDFAF'));
+assertEquals('"___\\udfb0"', JSON.stringify('___\uDFB0'));
+assertEquals('"___\\udfb1"', JSON.stringify('___\uDFB1'));
+assertEquals('"___\\udfb2"', JSON.stringify('___\uDFB2'));
+assertEquals('"___\\udfb3"', JSON.stringify('___\uDFB3'));
+assertEquals('"___\\udfb4"', JSON.stringify('___\uDFB4'));
+assertEquals('"___\\udfb5"', JSON.stringify('___\uDFB5'));
+assertEquals('"___\\udfb6"', JSON.stringify('___\uDFB6'));
+assertEquals('"___\\udfb7"', JSON.stringify('___\uDFB7'));
+assertEquals('"___\\udfb8"', JSON.stringify('___\uDFB8'));
+assertEquals('"___\\udfb9"', JSON.stringify('___\uDFB9'));
+assertEquals('"___\\udfba"', JSON.stringify('___\uDFBA'));
+assertEquals('"___\\udfbb"', JSON.stringify('___\uDFBB'));
+assertEquals('"___\\udfbc"', JSON.stringify('___\uDFBC'));
+assertEquals('"___\\udfbd"', JSON.stringify('___\uDFBD'));
+assertEquals('"___\\udfbe"', JSON.stringify('___\uDFBE'));
+assertEquals('"___\\udfbf"', JSON.stringify('___\uDFBF'));
+assertEquals('"___\\udfc0"', JSON.stringify('___\uDFC0'));
+assertEquals('"___\\udfc1"', JSON.stringify('___\uDFC1'));
+assertEquals('"___\\udfc2"', JSON.stringify('___\uDFC2'));
+assertEquals('"___\\udfc3"', JSON.stringify('___\uDFC3'));
+assertEquals('"___\\udfc4"', JSON.stringify('___\uDFC4'));
+assertEquals('"___\\udfc5"', JSON.stringify('___\uDFC5'));
+assertEquals('"___\\udfc6"', JSON.stringify('___\uDFC6'));
+assertEquals('"___\\udfc7"', JSON.stringify('___\uDFC7'));
+assertEquals('"___\\udfc8"', JSON.stringify('___\uDFC8'));
+assertEquals('"___\\udfc9"', JSON.stringify('___\uDFC9'));
+assertEquals('"___\\udfca"', JSON.stringify('___\uDFCA'));
+assertEquals('"___\\udfcb"', JSON.stringify('___\uDFCB'));
+assertEquals('"___\\udfcc"', JSON.stringify('___\uDFCC'));
+assertEquals('"___\\udfcd"', JSON.stringify('___\uDFCD'));
+assertEquals('"___\\udfce"', JSON.stringify('___\uDFCE'));
+assertEquals('"___\\udfcf"', JSON.stringify('___\uDFCF'));
+assertEquals('"___\\udfd0"', JSON.stringify('___\uDFD0'));
+assertEquals('"___\\udfd1"', JSON.stringify('___\uDFD1'));
+assertEquals('"___\\udfd2"', JSON.stringify('___\uDFD2'));
+assertEquals('"___\\udfd3"', JSON.stringify('___\uDFD3'));
+assertEquals('"___\\udfd4"', JSON.stringify('___\uDFD4'));
+assertEquals('"___\\udfd5"', JSON.stringify('___\uDFD5'));
+assertEquals('"___\\udfd6"', JSON.stringify('___\uDFD6'));
+assertEquals('"___\\udfd7"', JSON.stringify('___\uDFD7'));
+assertEquals('"___\\udfd8"', JSON.stringify('___\uDFD8'));
+assertEquals('"___\\udfd9"', JSON.stringify('___\uDFD9'));
+assertEquals('"___\\udfda"', JSON.stringify('___\uDFDA'));
+assertEquals('"___\\udfdb"', JSON.stringify('___\uDFDB'));
+assertEquals('"___\\udfdc"', JSON.stringify('___\uDFDC'));
+assertEquals('"___\\udfdd"', JSON.stringify('___\uDFDD'));
+assertEquals('"___\\udfde"', JSON.stringify('___\uDFDE'));
+assertEquals('"___\\udfdf"', JSON.stringify('___\uDFDF'));
+assertEquals('"___\\udfe0"', JSON.stringify('___\uDFE0'));
+assertEquals('"___\\udfe1"', JSON.stringify('___\uDFE1'));
+assertEquals('"___\\udfe2"', JSON.stringify('___\uDFE2'));
+assertEquals('"___\\udfe3"', JSON.stringify('___\uDFE3'));
+assertEquals('"___\\udfe4"', JSON.stringify('___\uDFE4'));
+assertEquals('"___\\udfe5"', JSON.stringify('___\uDFE5'));
+assertEquals('"___\\udfe6"', JSON.stringify('___\uDFE6'));
+assertEquals('"___\\udfe7"', JSON.stringify('___\uDFE7'));
+assertEquals('"___\\udfe8"', JSON.stringify('___\uDFE8'));
+assertEquals('"___\\udfe9"', JSON.stringify('___\uDFE9'));
+assertEquals('"___\\udfea"', JSON.stringify('___\uDFEA'));
+assertEquals('"___\\udfeb"', JSON.stringify('___\uDFEB'));
+assertEquals('"___\\udfec"', JSON.stringify('___\uDFEC'));
+assertEquals('"___\\udfed"', JSON.stringify('___\uDFED'));
+assertEquals('"___\\udfee"', JSON.stringify('___\uDFEE'));
+assertEquals('"___\\udfef"', JSON.stringify('___\uDFEF'));
+assertEquals('"___\\udff0"', JSON.stringify('___\uDFF0'));
+assertEquals('"___\\udff1"', JSON.stringify('___\uDFF1'));
+assertEquals('"___\\udff2"', JSON.stringify('___\uDFF2'));
+assertEquals('"___\\udff3"', JSON.stringify('___\uDFF3'));
+assertEquals('"___\\udff4"', JSON.stringify('___\uDFF4'));
+assertEquals('"___\\udff5"', JSON.stringify('___\uDFF5'));
+assertEquals('"___\\udff6"', JSON.stringify('___\uDFF6'));
+assertEquals('"___\\udff7"', JSON.stringify('___\uDFF7'));
+assertEquals('"___\\udff8"', JSON.stringify('___\uDFF8'));
+assertEquals('"___\\udff9"', JSON.stringify('___\uDFF9'));
+assertEquals('"___\\udffa"', JSON.stringify('___\uDFFA'));
+assertEquals('"___\\udffb"', JSON.stringify('___\uDFFB'));
+assertEquals('"___\\udffc"', JSON.stringify('___\uDFFC'));
+assertEquals('"___\\udffd"', JSON.stringify('___\uDFFD'));
+assertEquals('"___\\udffe"', JSON.stringify('___\uDFFE'));
+assertEquals('"___\\udfff"', JSON.stringify('___\uDFFF'));
+
+// A random selection of code points from U+E000 to U+FFFF.
+assertEquals('"___\uE000"', JSON.stringify('___\uE000'));
+assertEquals('"___\uE00B"', JSON.stringify('___\uE00B'));
+assertEquals('"___\uE0CC"', JSON.stringify('___\uE0CC'));
+assertEquals('"___\uE0FD"', JSON.stringify('___\uE0FD'));
+assertEquals('"___\uE19E"', JSON.stringify('___\uE19E'));
+assertEquals('"___\uE1B1"', JSON.stringify('___\uE1B1'));
+assertEquals('"___\uE24F"', JSON.stringify('___\uE24F'));
+assertEquals('"___\uE262"', JSON.stringify('___\uE262'));
+assertEquals('"___\uE2C9"', JSON.stringify('___\uE2C9'));
+assertEquals('"___\uE2DF"', JSON.stringify('___\uE2DF'));
+assertEquals('"___\uE389"', JSON.stringify('___\uE389'));
+assertEquals('"___\uE413"', JSON.stringify('___\uE413'));
+assertEquals('"___\uE546"', JSON.stringify('___\uE546'));
+assertEquals('"___\uE5E4"', JSON.stringify('___\uE5E4'));
+assertEquals('"___\uE66B"', JSON.stringify('___\uE66B'));
+assertEquals('"___\uE73D"', JSON.stringify('___\uE73D'));
+assertEquals('"___\uE74F"', JSON.stringify('___\uE74F'));
+assertEquals('"___\uE759"', JSON.stringify('___\uE759'));
+assertEquals('"___\uE795"', JSON.stringify('___\uE795'));
+assertEquals('"___\uE836"', JSON.stringify('___\uE836'));
+assertEquals('"___\uE85D"', JSON.stringify('___\uE85D'));
+assertEquals('"___\uE909"', JSON.stringify('___\uE909'));
+assertEquals('"___\uE990"', JSON.stringify('___\uE990'));
+assertEquals('"___\uE99F"', JSON.stringify('___\uE99F'));
+assertEquals('"___\uE9AC"', JSON.stringify('___\uE9AC'));
+assertEquals('"___\uE9C2"', JSON.stringify('___\uE9C2'));
+assertEquals('"___\uEB11"', JSON.stringify('___\uEB11'));
+assertEquals('"___\uED33"', JSON.stringify('___\uED33'));
+assertEquals('"___\uED7D"', JSON.stringify('___\uED7D'));
+assertEquals('"___\uEDA9"', JSON.stringify('___\uEDA9'));
+assertEquals('"___\uEDFB"', JSON.stringify('___\uEDFB'));
+assertEquals('"___\uEE09"', JSON.stringify('___\uEE09'));
+assertEquals('"___\uEE0D"', JSON.stringify('___\uEE0D'));
+assertEquals('"___\uEE34"', JSON.stringify('___\uEE34'));
+assertEquals('"___\uEE37"', JSON.stringify('___\uEE37'));
+assertEquals('"___\uEE38"', JSON.stringify('___\uEE38'));
+assertEquals('"___\uEF80"', JSON.stringify('___\uEF80'));
+assertEquals('"___\uEFE2"', JSON.stringify('___\uEFE2'));
+assertEquals('"___\uF02C"', JSON.stringify('___\uF02C'));
+assertEquals('"___\uF09A"', JSON.stringify('___\uF09A'));
+assertEquals('"___\uF0C1"', JSON.stringify('___\uF0C1'));
+assertEquals('"___\uF12C"', JSON.stringify('___\uF12C'));
+assertEquals('"___\uF250"', JSON.stringify('___\uF250'));
+assertEquals('"___\uF2A3"', JSON.stringify('___\uF2A3'));
+assertEquals('"___\uF340"', JSON.stringify('___\uF340'));
+assertEquals('"___\uF3C9"', JSON.stringify('___\uF3C9'));
+assertEquals('"___\uF3F5"', JSON.stringify('___\uF3F5'));
+assertEquals('"___\uF41B"', JSON.stringify('___\uF41B'));
+assertEquals('"___\uF420"', JSON.stringify('___\uF420'));
+assertEquals('"___\uF440"', JSON.stringify('___\uF440'));
+assertEquals('"___\uF4AE"', JSON.stringify('___\uF4AE'));
+assertEquals('"___\uF4B0"', JSON.stringify('___\uF4B0'));
+assertEquals('"___\uF50D"', JSON.stringify('___\uF50D'));
+assertEquals('"___\uF55D"', JSON.stringify('___\uF55D'));
+assertEquals('"___\uF55E"', JSON.stringify('___\uF55E'));
+assertEquals('"___\uF5CD"', JSON.stringify('___\uF5CD'));
+assertEquals('"___\uF657"', JSON.stringify('___\uF657'));
+assertEquals('"___\uF66D"', JSON.stringify('___\uF66D'));
+assertEquals('"___\uF68F"', JSON.stringify('___\uF68F'));
+assertEquals('"___\uF6A6"', JSON.stringify('___\uF6A6'));
+assertEquals('"___\uF6AA"', JSON.stringify('___\uF6AA'));
+assertEquals('"___\uF6EB"', JSON.stringify('___\uF6EB'));
+assertEquals('"___\uF79A"', JSON.stringify('___\uF79A'));
+assertEquals('"___\uF7E7"', JSON.stringify('___\uF7E7'));
+assertEquals('"___\uF7E8"', JSON.stringify('___\uF7E8'));
+assertEquals('"___\uF834"', JSON.stringify('___\uF834'));
+assertEquals('"___\uF88B"', JSON.stringify('___\uF88B'));
+assertEquals('"___\uF8D5"', JSON.stringify('___\uF8D5'));
+assertEquals('"___\uF8F1"', JSON.stringify('___\uF8F1'));
+assertEquals('"___\uF905"', JSON.stringify('___\uF905'));
+assertEquals('"___\uF927"', JSON.stringify('___\uF927'));
+assertEquals('"___\uF943"', JSON.stringify('___\uF943'));
+assertEquals('"___\uF949"', JSON.stringify('___\uF949'));
+assertEquals('"___\uF9A1"', JSON.stringify('___\uF9A1'));
+assertEquals('"___\uF9C7"', JSON.stringify('___\uF9C7'));
+assertEquals('"___\uFA0F"', JSON.stringify('___\uFA0F'));
+assertEquals('"___\uFA20"', JSON.stringify('___\uFA20'));
+assertEquals('"___\uFAA7"', JSON.stringify('___\uFAA7'));
+assertEquals('"___\uFBCD"', JSON.stringify('___\uFBCD'));
+assertEquals('"___\uFBF7"', JSON.stringify('___\uFBF7'));
+assertEquals('"___\uFC40"', JSON.stringify('___\uFC40'));
+assertEquals('"___\uFC4B"', JSON.stringify('___\uFC4B'));
+assertEquals('"___\uFC51"', JSON.stringify('___\uFC51'));
+assertEquals('"___\uFC5E"', JSON.stringify('___\uFC5E'));
+assertEquals('"___\uFC67"', JSON.stringify('___\uFC67'));
+assertEquals('"___\uFC8B"', JSON.stringify('___\uFC8B'));
+assertEquals('"___\uFE32"', JSON.stringify('___\uFE32'));
+assertEquals('"___\uFFC4"', JSON.stringify('___\uFFC4'));
+assertEquals('"___\uFFFD"', JSON.stringify('___\uFFFD'));
+assertEquals('"___\uFFFE"', JSON.stringify('___\uFFFE'));
+assertEquals('"___\uFFFF"', JSON.stringify('___\uFFFF'));
+
+// A random selection of astral symbols, i.e. surrogate pairs, i.e.
+// code points from U+010000 to U+10FFFF.
+assertEquals('"___\u{10000}"', JSON.stringify('___\u{10000}'));
+assertEquals('"___\u{11DE7}"', JSON.stringify('___\u{11DE7}'));
+assertEquals('"___\u{15997}"', JSON.stringify('___\u{15997}'));
+assertEquals('"___\u{187B0}"', JSON.stringify('___\u{187B0}'));
+assertEquals('"___\u{190B2}"', JSON.stringify('___\u{190B2}'));
+assertEquals('"___\u{1BF79}"', JSON.stringify('___\u{1BF79}'));
+assertEquals('"___\u{1C624}"', JSON.stringify('___\u{1C624}'));
+assertEquals('"___\u{1D9F4}"', JSON.stringify('___\u{1D9F4}'));
+assertEquals('"___\u{24149}"', JSON.stringify('___\u{24149}'));
+assertEquals('"___\u{2521C}"', JSON.stringify('___\u{2521C}'));
+assertEquals('"___\u{2762D}"', JSON.stringify('___\u{2762D}'));
+assertEquals('"___\u{2930B}"', JSON.stringify('___\u{2930B}'));
+assertEquals('"___\u{29EC4}"', JSON.stringify('___\u{29EC4}'));
+assertEquals('"___\u{29F9A}"', JSON.stringify('___\u{29F9A}'));
+assertEquals('"___\u{2A27D}"', JSON.stringify('___\u{2A27D}'));
+assertEquals('"___\u{2B363}"', JSON.stringify('___\u{2B363}'));
+assertEquals('"___\u{2C037}"', JSON.stringify('___\u{2C037}'));
+assertEquals('"___\u{2FAE0}"', JSON.stringify('___\u{2FAE0}'));
+assertEquals('"___\u{2FFCF}"', JSON.stringify('___\u{2FFCF}'));
+assertEquals('"___\u{32C1C}"', JSON.stringify('___\u{32C1C}'));
+assertEquals('"___\u{33DA8}"', JSON.stringify('___\u{33DA8}'));
+assertEquals('"___\u{3DCA4}"', JSON.stringify('___\u{3DCA4}'));
+assertEquals('"___\u{44FA0}"', JSON.stringify('___\u{44FA0}'));
+assertEquals('"___\u{45618}"', JSON.stringify('___\u{45618}'));
+assertEquals('"___\u{47395}"', JSON.stringify('___\u{47395}'));
+assertEquals('"___\u{4752C}"', JSON.stringify('___\u{4752C}'));
+assertEquals('"___\u{483FE}"', JSON.stringify('___\u{483FE}'));
+assertEquals('"___\u{49D35}"', JSON.stringify('___\u{49D35}'));
+assertEquals('"___\u{4CE3B}"', JSON.stringify('___\u{4CE3B}'));
+assertEquals('"___\u{55196}"', JSON.stringify('___\u{55196}'));
+assertEquals('"___\u{58B3E}"', JSON.stringify('___\u{58B3E}'));
+assertEquals('"___\u{5AA47}"', JSON.stringify('___\u{5AA47}'));
+assertEquals('"___\u{5C4B8}"', JSON.stringify('___\u{5C4B8}'));
+assertEquals('"___\u{5DD1B}"', JSON.stringify('___\u{5DD1B}'));
+assertEquals('"___\u{5FDCB}"', JSON.stringify('___\u{5FDCB}'));
+assertEquals('"___\u{611BA}"', JSON.stringify('___\u{611BA}'));
+assertEquals('"___\u{66433}"', JSON.stringify('___\u{66433}'));
+assertEquals('"___\u{690D7}"', JSON.stringify('___\u{690D7}'));
+assertEquals('"___\u{6F617}"', JSON.stringify('___\u{6F617}'));
+assertEquals('"___\u{711E4}"', JSON.stringify('___\u{711E4}'));
+assertEquals('"___\u{758D2}"', JSON.stringify('___\u{758D2}'));
+assertEquals('"___\u{780AC}"', JSON.stringify('___\u{780AC}'));
+assertEquals('"___\u{7AE5F}"', JSON.stringify('___\u{7AE5F}'));
+assertEquals('"___\u{7C2FB}"', JSON.stringify('___\u{7C2FB}'));
+assertEquals('"___\u{7D25F}"', JSON.stringify('___\u{7D25F}'));
+assertEquals('"___\u{8027A}"', JSON.stringify('___\u{8027A}'));
+assertEquals('"___\u{84817}"', JSON.stringify('___\u{84817}'));
+assertEquals('"___\u{8B070}"', JSON.stringify('___\u{8B070}'));
+assertEquals('"___\u{8B390}"', JSON.stringify('___\u{8B390}'));
+assertEquals('"___\u{8BC03}"', JSON.stringify('___\u{8BC03}'));
+assertEquals('"___\u{8BE63}"', JSON.stringify('___\u{8BE63}'));
+assertEquals('"___\u{8F12A}"', JSON.stringify('___\u{8F12A}'));
+assertEquals('"___\u{9345D}"', JSON.stringify('___\u{9345D}'));
+assertEquals('"___\u{937A9}"', JSON.stringify('___\u{937A9}'));
+assertEquals('"___\u{94596}"', JSON.stringify('___\u{94596}'));
+assertEquals('"___\u{967BB}"', JSON.stringify('___\u{967BB}'));
+assertEquals('"___\u{A19D1}"', JSON.stringify('___\u{A19D1}'));
+assertEquals('"___\u{A4FC5}"', JSON.stringify('___\u{A4FC5}'));
+assertEquals('"___\u{AC9CF}"', JSON.stringify('___\u{AC9CF}'));
+assertEquals('"___\u{B1366}"', JSON.stringify('___\u{B1366}'));
+assertEquals('"___\u{B3D32}"', JSON.stringify('___\u{B3D32}'));
+assertEquals('"___\u{B74BA}"', JSON.stringify('___\u{B74BA}'));
+assertEquals('"___\u{B8FB0}"', JSON.stringify('___\u{B8FB0}'));
+assertEquals('"___\u{BA0A5}"', JSON.stringify('___\u{BA0A5}'));
+assertEquals('"___\u{BB48E}"', JSON.stringify('___\u{BB48E}'));
+assertEquals('"___\u{C0B60}"', JSON.stringify('___\u{C0B60}'));
+assertEquals('"___\u{C2D34}"', JSON.stringify('___\u{C2D34}'));
+assertEquals('"___\u{C6C75}"', JSON.stringify('___\u{C6C75}'));
+assertEquals('"___\u{C9F26}"', JSON.stringify('___\u{C9F26}'));
+assertEquals('"___\u{CDBD0}"', JSON.stringify('___\u{CDBD0}'));
+assertEquals('"___\u{D1E28}"', JSON.stringify('___\u{D1E28}'));
+assertEquals('"___\u{D4A80}"', JSON.stringify('___\u{D4A80}'));
+assertEquals('"___\u{D947F}"', JSON.stringify('___\u{D947F}'));
+assertEquals('"___\u{D9B8A}"', JSON.stringify('___\u{D9B8A}'));
+assertEquals('"___\u{DA203}"', JSON.stringify('___\u{DA203}'));
+assertEquals('"___\u{DEFD3}"', JSON.stringify('___\u{DEFD3}'));
+assertEquals('"___\u{E4F7C}"', JSON.stringify('___\u{E4F7C}'));
+assertEquals('"___\u{E6BB3}"', JSON.stringify('___\u{E6BB3}'));
+assertEquals('"___\u{E972D}"', JSON.stringify('___\u{E972D}'));
+assertEquals('"___\u{EB335}"', JSON.stringify('___\u{EB335}'));
+assertEquals('"___\u{ED3F8}"', JSON.stringify('___\u{ED3F8}'));
+assertEquals('"___\u{ED940}"', JSON.stringify('___\u{ED940}'));
+assertEquals('"___\u{EF6F8}"', JSON.stringify('___\u{EF6F8}'));
+assertEquals('"___\u{F1F57}"', JSON.stringify('___\u{F1F57}'));
+assertEquals('"___\u{F33B5}"', JSON.stringify('___\u{F33B5}'));
+assertEquals('"___\u{F4D2A}"', JSON.stringify('___\u{F4D2A}'));
+assertEquals('"___\u{F70BA}"', JSON.stringify('___\u{F70BA}'));
+assertEquals('"___\u{F899F}"', JSON.stringify('___\u{F899F}'));
+assertEquals('"___\u{1034BF}"', JSON.stringify('___\u{1034BF}'));
+assertEquals('"___\u{107ACF}"', JSON.stringify('___\u{107ACF}'));
+assertEquals('"___\u{10881F}"', JSON.stringify('___\u{10881F}'));
+assertEquals('"___\u{1098A5}"', JSON.stringify('___\u{1098A5}'));
+assertEquals('"___\u{10ABD1}"', JSON.stringify('___\u{10ABD1}'));
+assertEquals('"___\u{10B5C5}"', JSON.stringify('___\u{10B5C5}'));
+assertEquals('"___\u{10CC79}"', JSON.stringify('___\u{10CC79}'));
+assertEquals('"___\u{10CD19}"', JSON.stringify('___\u{10CD19}'));
+assertEquals('"___\u{10FFFF}"', JSON.stringify('___\u{10FFFF}'));
diff --git a/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js
new file mode 100644
index 0000000000..260d748ece
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js
@@ -0,0 +1,2575 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-json-stringify
+
+// Test JSON.stringify for cases that hit
+// JsonStringifier::SerializeStringUnchecked_.
+
+// All code points from U+0000 to U+00FF.
+assertEquals('"\\u0000"', JSON.stringify('\0'));
+assertEquals('"\\u0001"', JSON.stringify('\x01'));
+assertEquals('"\\u0002"', JSON.stringify('\x02'));
+assertEquals('"\\u0003"', JSON.stringify('\x03'));
+assertEquals('"\\u0004"', JSON.stringify('\x04'));
+assertEquals('"\\u0005"', JSON.stringify('\x05'));
+assertEquals('"\\u0006"', JSON.stringify('\x06'));
+assertEquals('"\\u0007"', JSON.stringify('\x07'));
+assertEquals('"\\b"', JSON.stringify('\b'));
+assertEquals('"\\t"', JSON.stringify('\t'));
+assertEquals('"\\n"', JSON.stringify('\n'));
+assertEquals('"\\u000b"', JSON.stringify('\x0B'));
+assertEquals('"\\f"', JSON.stringify('\f'));
+assertEquals('"\\r"', JSON.stringify('\r'));
+assertEquals('"\\u000e"', JSON.stringify('\x0E'));
+assertEquals('"\\u000f"', JSON.stringify('\x0F'));
+assertEquals('"\\u0010"', JSON.stringify('\x10'));
+assertEquals('"\\u0011"', JSON.stringify('\x11'));
+assertEquals('"\\u0012"', JSON.stringify('\x12'));
+assertEquals('"\\u0013"', JSON.stringify('\x13'));
+assertEquals('"\\u0014"', JSON.stringify('\x14'));
+assertEquals('"\\u0015"', JSON.stringify('\x15'));
+assertEquals('"\\u0016"', JSON.stringify('\x16'));
+assertEquals('"\\u0017"', JSON.stringify('\x17'));
+assertEquals('"\\u0018"', JSON.stringify('\x18'));
+assertEquals('"\\u0019"', JSON.stringify('\x19'));
+assertEquals('"\\u001a"', JSON.stringify('\x1A'));
+assertEquals('"\\u001b"', JSON.stringify('\x1B'));
+assertEquals('"\\u001c"', JSON.stringify('\x1C'));
+assertEquals('"\\u001d"', JSON.stringify('\x1D'));
+assertEquals('"\\u001e"', JSON.stringify('\x1E'));
+assertEquals('"\\u001f"', JSON.stringify('\x1F'));
+assertEquals('" "', JSON.stringify(' '));
+assertEquals('"!"', JSON.stringify('!'));
+assertEquals('"\\""', JSON.stringify('"'));
+assertEquals('"#"', JSON.stringify('#'));
+assertEquals('"$"', JSON.stringify('$'));
+assertEquals('"%"', JSON.stringify('%'));
+assertEquals('"&"', JSON.stringify('&'));
+assertEquals('"\'"', JSON.stringify('\''));
+assertEquals('"("', JSON.stringify('('));
+assertEquals('")"', JSON.stringify(')'));
+assertEquals('"*"', JSON.stringify('*'));
+assertEquals('"+"', JSON.stringify('+'));
+assertEquals('","', JSON.stringify(','));
+assertEquals('"-"', JSON.stringify('-'));
+assertEquals('"."', JSON.stringify('.'));
+assertEquals('"/"', JSON.stringify('/'));
+assertEquals('"0"', JSON.stringify('0'));
+assertEquals('"1"', JSON.stringify('1'));
+assertEquals('"2"', JSON.stringify('2'));
+assertEquals('"3"', JSON.stringify('3'));
+assertEquals('"4"', JSON.stringify('4'));
+assertEquals('"5"', JSON.stringify('5'));
+assertEquals('"6"', JSON.stringify('6'));
+assertEquals('"7"', JSON.stringify('7'));
+assertEquals('"8"', JSON.stringify('8'));
+assertEquals('"9"', JSON.stringify('9'));
+assertEquals('":"', JSON.stringify(':'));
+assertEquals('";"', JSON.stringify(';'));
+assertEquals('"<"', JSON.stringify('<'));
+assertEquals('"="', JSON.stringify('='));
+assertEquals('">"', JSON.stringify('>'));
+assertEquals('"?"', JSON.stringify('?'));
+assertEquals('"@"', JSON.stringify('@'));
+assertEquals('"A"', JSON.stringify('A'));
+assertEquals('"B"', JSON.stringify('B'));
+assertEquals('"C"', JSON.stringify('C'));
+assertEquals('"D"', JSON.stringify('D'));
+assertEquals('"E"', JSON.stringify('E'));
+assertEquals('"F"', JSON.stringify('F'));
+assertEquals('"G"', JSON.stringify('G'));
+assertEquals('"H"', JSON.stringify('H'));
+assertEquals('"I"', JSON.stringify('I'));
+assertEquals('"J"', JSON.stringify('J'));
+assertEquals('"K"', JSON.stringify('K'));
+assertEquals('"L"', JSON.stringify('L'));
+assertEquals('"M"', JSON.stringify('M'));
+assertEquals('"N"', JSON.stringify('N'));
+assertEquals('"O"', JSON.stringify('O'));
+assertEquals('"P"', JSON.stringify('P'));
+assertEquals('"Q"', JSON.stringify('Q'));
+assertEquals('"R"', JSON.stringify('R'));
+assertEquals('"S"', JSON.stringify('S'));
+assertEquals('"T"', JSON.stringify('T'));
+assertEquals('"U"', JSON.stringify('U'));
+assertEquals('"V"', JSON.stringify('V'));
+assertEquals('"W"', JSON.stringify('W'));
+assertEquals('"X"', JSON.stringify('X'));
+assertEquals('"Y"', JSON.stringify('Y'));
+assertEquals('"Z"', JSON.stringify('Z'));
+assertEquals('"["', JSON.stringify('['));
+assertEquals('"\\\\"', JSON.stringify('\\'));
+assertEquals('"]"', JSON.stringify(']'));
+assertEquals('"^"', JSON.stringify('^'));
+assertEquals('"_"', JSON.stringify('_'));
+assertEquals('"`"', JSON.stringify('`'));
+assertEquals('"a"', JSON.stringify('a'));
+assertEquals('"b"', JSON.stringify('b'));
+assertEquals('"c"', JSON.stringify('c'));
+assertEquals('"d"', JSON.stringify('d'));
+assertEquals('"e"', JSON.stringify('e'));
+assertEquals('"f"', JSON.stringify('f'));
+assertEquals('"g"', JSON.stringify('g'));
+assertEquals('"h"', JSON.stringify('h'));
+assertEquals('"i"', JSON.stringify('i'));
+assertEquals('"j"', JSON.stringify('j'));
+assertEquals('"k"', JSON.stringify('k'));
+assertEquals('"l"', JSON.stringify('l'));
+assertEquals('"m"', JSON.stringify('m'));
+assertEquals('"n"', JSON.stringify('n'));
+assertEquals('"o"', JSON.stringify('o'));
+assertEquals('"p"', JSON.stringify('p'));
+assertEquals('"q"', JSON.stringify('q'));
+assertEquals('"r"', JSON.stringify('r'));
+assertEquals('"s"', JSON.stringify('s'));
+assertEquals('"t"', JSON.stringify('t'));
+assertEquals('"u"', JSON.stringify('u'));
+assertEquals('"v"', JSON.stringify('v'));
+assertEquals('"w"', JSON.stringify('w'));
+assertEquals('"x"', JSON.stringify('x'));
+assertEquals('"y"', JSON.stringify('y'));
+assertEquals('"z"', JSON.stringify('z'));
+assertEquals('"{"', JSON.stringify('{'));
+assertEquals('"|"', JSON.stringify('|'));
+assertEquals('"}"', JSON.stringify('}'));
+assertEquals('"~"', JSON.stringify('~'));
+assertEquals('"\x7F"', JSON.stringify('\x7F'));
+assertEquals('"\x80"', JSON.stringify('\x80'));
+assertEquals('"\x81"', JSON.stringify('\x81'));
+assertEquals('"\x82"', JSON.stringify('\x82'));
+assertEquals('"\x83"', JSON.stringify('\x83'));
+assertEquals('"\x84"', JSON.stringify('\x84'));
+assertEquals('"\x85"', JSON.stringify('\x85'));
+assertEquals('"\x86"', JSON.stringify('\x86'));
+assertEquals('"\x87"', JSON.stringify('\x87'));
+assertEquals('"\x88"', JSON.stringify('\x88'));
+assertEquals('"\x89"', JSON.stringify('\x89'));
+assertEquals('"\x8A"', JSON.stringify('\x8A'));
+assertEquals('"\x8B"', JSON.stringify('\x8B'));
+assertEquals('"\x8C"', JSON.stringify('\x8C'));
+assertEquals('"\x8D"', JSON.stringify('\x8D'));
+assertEquals('"\x8E"', JSON.stringify('\x8E'));
+assertEquals('"\x8F"', JSON.stringify('\x8F'));
+assertEquals('"\x90"', JSON.stringify('\x90'));
+assertEquals('"\x91"', JSON.stringify('\x91'));
+assertEquals('"\x92"', JSON.stringify('\x92'));
+assertEquals('"\x93"', JSON.stringify('\x93'));
+assertEquals('"\x94"', JSON.stringify('\x94'));
+assertEquals('"\x95"', JSON.stringify('\x95'));
+assertEquals('"\x96"', JSON.stringify('\x96'));
+assertEquals('"\x97"', JSON.stringify('\x97'));
+assertEquals('"\x98"', JSON.stringify('\x98'));
+assertEquals('"\x99"', JSON.stringify('\x99'));
+assertEquals('"\x9A"', JSON.stringify('\x9A'));
+assertEquals('"\x9B"', JSON.stringify('\x9B'));
+assertEquals('"\x9C"', JSON.stringify('\x9C'));
+assertEquals('"\x9D"', JSON.stringify('\x9D'));
+assertEquals('"\x9E"', JSON.stringify('\x9E'));
+assertEquals('"\x9F"', JSON.stringify('\x9F'));
+assertEquals('"\xA0"', JSON.stringify('\xA0'));
+assertEquals('"\xA1"', JSON.stringify('\xA1'));
+assertEquals('"\xA2"', JSON.stringify('\xA2'));
+assertEquals('"\xA3"', JSON.stringify('\xA3'));
+assertEquals('"\xA4"', JSON.stringify('\xA4'));
+assertEquals('"\xA5"', JSON.stringify('\xA5'));
+assertEquals('"\xA6"', JSON.stringify('\xA6'));
+assertEquals('"\xA7"', JSON.stringify('\xA7'));
+assertEquals('"\xA8"', JSON.stringify('\xA8'));
+assertEquals('"\xA9"', JSON.stringify('\xA9'));
+assertEquals('"\xAA"', JSON.stringify('\xAA'));
+assertEquals('"\xAB"', JSON.stringify('\xAB'));
+assertEquals('"\xAC"', JSON.stringify('\xAC'));
+assertEquals('"\xAD"', JSON.stringify('\xAD'));
+assertEquals('"\xAE"', JSON.stringify('\xAE'));
+assertEquals('"\xAF"', JSON.stringify('\xAF'));
+assertEquals('"\xB0"', JSON.stringify('\xB0'));
+assertEquals('"\xB1"', JSON.stringify('\xB1'));
+assertEquals('"\xB2"', JSON.stringify('\xB2'));
+assertEquals('"\xB3"', JSON.stringify('\xB3'));
+assertEquals('"\xB4"', JSON.stringify('\xB4'));
+assertEquals('"\xB5"', JSON.stringify('\xB5'));
+assertEquals('"\xB6"', JSON.stringify('\xB6'));
+assertEquals('"\xB7"', JSON.stringify('\xB7'));
+assertEquals('"\xB8"', JSON.stringify('\xB8'));
+assertEquals('"\xB9"', JSON.stringify('\xB9'));
+assertEquals('"\xBA"', JSON.stringify('\xBA'));
+assertEquals('"\xBB"', JSON.stringify('\xBB'));
+assertEquals('"\xBC"', JSON.stringify('\xBC'));
+assertEquals('"\xBD"', JSON.stringify('\xBD'));
+assertEquals('"\xBE"', JSON.stringify('\xBE'));
+assertEquals('"\xBF"', JSON.stringify('\xBF'));
+assertEquals('"\xC0"', JSON.stringify('\xC0'));
+assertEquals('"\xC1"', JSON.stringify('\xC1'));
+assertEquals('"\xC2"', JSON.stringify('\xC2'));
+assertEquals('"\xC3"', JSON.stringify('\xC3'));
+assertEquals('"\xC4"', JSON.stringify('\xC4'));
+assertEquals('"\xC5"', JSON.stringify('\xC5'));
+assertEquals('"\xC6"', JSON.stringify('\xC6'));
+assertEquals('"\xC7"', JSON.stringify('\xC7'));
+assertEquals('"\xC8"', JSON.stringify('\xC8'));
+assertEquals('"\xC9"', JSON.stringify('\xC9'));
+assertEquals('"\xCA"', JSON.stringify('\xCA'));
+assertEquals('"\xCB"', JSON.stringify('\xCB'));
+assertEquals('"\xCC"', JSON.stringify('\xCC'));
+assertEquals('"\xCD"', JSON.stringify('\xCD'));
+assertEquals('"\xCE"', JSON.stringify('\xCE'));
+assertEquals('"\xCF"', JSON.stringify('\xCF'));
+assertEquals('"\xD0"', JSON.stringify('\xD0'));
+assertEquals('"\xD1"', JSON.stringify('\xD1'));
+assertEquals('"\xD2"', JSON.stringify('\xD2'));
+assertEquals('"\xD3"', JSON.stringify('\xD3'));
+assertEquals('"\xD4"', JSON.stringify('\xD4'));
+assertEquals('"\xD5"', JSON.stringify('\xD5'));
+assertEquals('"\xD6"', JSON.stringify('\xD6'));
+assertEquals('"\xD7"', JSON.stringify('\xD7'));
+assertEquals('"\xD8"', JSON.stringify('\xD8'));
+assertEquals('"\xD9"', JSON.stringify('\xD9'));
+assertEquals('"\xDA"', JSON.stringify('\xDA'));
+assertEquals('"\xDB"', JSON.stringify('\xDB'));
+assertEquals('"\xDC"', JSON.stringify('\xDC'));
+assertEquals('"\xDD"', JSON.stringify('\xDD'));
+assertEquals('"\xDE"', JSON.stringify('\xDE'));
+assertEquals('"\xDF"', JSON.stringify('\xDF'));
+assertEquals('"\xE0"', JSON.stringify('\xE0'));
+assertEquals('"\xE1"', JSON.stringify('\xE1'));
+assertEquals('"\xE2"', JSON.stringify('\xE2'));
+assertEquals('"\xE3"', JSON.stringify('\xE3'));
+assertEquals('"\xE4"', JSON.stringify('\xE4'));
+assertEquals('"\xE5"', JSON.stringify('\xE5'));
+assertEquals('"\xE6"', JSON.stringify('\xE6'));
+assertEquals('"\xE7"', JSON.stringify('\xE7'));
+assertEquals('"\xE8"', JSON.stringify('\xE8'));
+assertEquals('"\xE9"', JSON.stringify('\xE9'));
+assertEquals('"\xEA"', JSON.stringify('\xEA'));
+assertEquals('"\xEB"', JSON.stringify('\xEB'));
+assertEquals('"\xEC"', JSON.stringify('\xEC'));
+assertEquals('"\xED"', JSON.stringify('\xED'));
+assertEquals('"\xEE"', JSON.stringify('\xEE'));
+assertEquals('"\xEF"', JSON.stringify('\xEF'));
+assertEquals('"\xF0"', JSON.stringify('\xF0'));
+assertEquals('"\xF1"', JSON.stringify('\xF1'));
+assertEquals('"\xF2"', JSON.stringify('\xF2'));
+assertEquals('"\xF3"', JSON.stringify('\xF3'));
+assertEquals('"\xF4"', JSON.stringify('\xF4'));
+assertEquals('"\xF5"', JSON.stringify('\xF5'));
+assertEquals('"\xF6"', JSON.stringify('\xF6'));
+assertEquals('"\xF7"', JSON.stringify('\xF7'));
+assertEquals('"\xF8"', JSON.stringify('\xF8'));
+assertEquals('"\xF9"', JSON.stringify('\xF9'));
+assertEquals('"\xFA"', JSON.stringify('\xFA'));
+assertEquals('"\xFB"', JSON.stringify('\xFB'));
+assertEquals('"\xFC"', JSON.stringify('\xFC'));
+assertEquals('"\xFD"', JSON.stringify('\xFD'));
+assertEquals('"\xFE"', JSON.stringify('\xFE'));
+assertEquals('"\xFF"', JSON.stringify('\xFF'));
+
+// A random selection of code points from U+0100 to U+D7FF.
+assertEquals('"\u0100"', JSON.stringify('\u0100'));
+assertEquals('"\u0120"', JSON.stringify('\u0120'));
+assertEquals('"\u07D3"', JSON.stringify('\u07D3'));
+assertEquals('"\u0B8B"', JSON.stringify('\u0B8B'));
+assertEquals('"\u0C4C"', JSON.stringify('\u0C4C'));
+assertEquals('"\u178D"', JSON.stringify('\u178D'));
+assertEquals('"\u18B8"', JSON.stringify('\u18B8'));
+assertEquals('"\u193E"', JSON.stringify('\u193E'));
+assertEquals('"\u198A"', JSON.stringify('\u198A'));
+assertEquals('"\u1AF5"', JSON.stringify('\u1AF5'));
+assertEquals('"\u1D38"', JSON.stringify('\u1D38'));
+assertEquals('"\u1E37"', JSON.stringify('\u1E37'));
+assertEquals('"\u1FC2"', JSON.stringify('\u1FC2'));
+assertEquals('"\u22C7"', JSON.stringify('\u22C7'));
+assertEquals('"\u2619"', JSON.stringify('\u2619'));
+assertEquals('"\u272A"', JSON.stringify('\u272A'));
+assertEquals('"\u2B7F"', JSON.stringify('\u2B7F'));
+assertEquals('"\u2DFF"', JSON.stringify('\u2DFF'));
+assertEquals('"\u341B"', JSON.stringify('\u341B'));
+assertEquals('"\u3A3C"', JSON.stringify('\u3A3C'));
+assertEquals('"\u3E53"', JSON.stringify('\u3E53'));
+assertEquals('"\u3EC2"', JSON.stringify('\u3EC2'));
+assertEquals('"\u3F76"', JSON.stringify('\u3F76'));
+assertEquals('"\u3F85"', JSON.stringify('\u3F85'));
+assertEquals('"\u43C7"', JSON.stringify('\u43C7'));
+assertEquals('"\u4A19"', JSON.stringify('\u4A19'));
+assertEquals('"\u4A1C"', JSON.stringify('\u4A1C'));
+assertEquals('"\u4F80"', JSON.stringify('\u4F80'));
+assertEquals('"\u5A30"', JSON.stringify('\u5A30'));
+assertEquals('"\u5B55"', JSON.stringify('\u5B55'));
+assertEquals('"\u5C74"', JSON.stringify('\u5C74'));
+assertEquals('"\u6006"', JSON.stringify('\u6006'));
+assertEquals('"\u63CC"', JSON.stringify('\u63CC'));
+assertEquals('"\u6608"', JSON.stringify('\u6608'));
+assertEquals('"\u6ABF"', JSON.stringify('\u6ABF'));
+assertEquals('"\u6AE9"', JSON.stringify('\u6AE9'));
+assertEquals('"\u6C91"', JSON.stringify('\u6C91'));
+assertEquals('"\u714B"', JSON.stringify('\u714B'));
+assertEquals('"\u728A"', JSON.stringify('\u728A'));
+assertEquals('"\u7485"', JSON.stringify('\u7485'));
+assertEquals('"\u77C8"', JSON.stringify('\u77C8'));
+assertEquals('"\u7BE9"', JSON.stringify('\u7BE9'));
+assertEquals('"\u7CEF"', JSON.stringify('\u7CEF'));
+assertEquals('"\u7DD5"', JSON.stringify('\u7DD5'));
+assertEquals('"\u8DF1"', JSON.stringify('\u8DF1'));
+assertEquals('"\u94A9"', JSON.stringify('\u94A9'));
+assertEquals('"\u94F2"', JSON.stringify('\u94F2'));
+assertEquals('"\u9A7A"', JSON.stringify('\u9A7A'));
+assertEquals('"\u9AA6"', JSON.stringify('\u9AA6'));
+assertEquals('"\uA2B0"', JSON.stringify('\uA2B0'));
+assertEquals('"\uB711"', JSON.stringify('\uB711'));
+assertEquals('"\uBC01"', JSON.stringify('\uBC01'));
+assertEquals('"\uBCB6"', JSON.stringify('\uBCB6'));
+assertEquals('"\uBD70"', JSON.stringify('\uBD70'));
+assertEquals('"\uC3CD"', JSON.stringify('\uC3CD'));
+assertEquals('"\uC451"', JSON.stringify('\uC451'));
+assertEquals('"\uC677"', JSON.stringify('\uC677'));
+assertEquals('"\uC89B"', JSON.stringify('\uC89B'));
+assertEquals('"\uCBEF"', JSON.stringify('\uCBEF'));
+assertEquals('"\uCEF8"', JSON.stringify('\uCEF8'));
+assertEquals('"\uD089"', JSON.stringify('\uD089'));
+assertEquals('"\uD24D"', JSON.stringify('\uD24D'));
+assertEquals('"\uD3A7"', JSON.stringify('\uD3A7'));
+assertEquals('"\uD7FF"', JSON.stringify('\uD7FF'));
+
+// All lone surrogates, i.e. code points from U+D800 to U+DFFF.
+assertEquals('"\\ud800"', JSON.stringify('\uD800'));
+assertEquals('"\\ud801"', JSON.stringify('\uD801'));
+assertEquals('"\\ud802"', JSON.stringify('\uD802'));
+assertEquals('"\\ud803"', JSON.stringify('\uD803'));
+assertEquals('"\\ud804"', JSON.stringify('\uD804'));
+assertEquals('"\\ud805"', JSON.stringify('\uD805'));
+assertEquals('"\\ud806"', JSON.stringify('\uD806'));
+assertEquals('"\\ud807"', JSON.stringify('\uD807'));
+assertEquals('"\\ud808"', JSON.stringify('\uD808'));
+assertEquals('"\\ud809"', JSON.stringify('\uD809'));
+assertEquals('"\\ud80a"', JSON.stringify('\uD80A'));
+assertEquals('"\\ud80b"', JSON.stringify('\uD80B'));
+assertEquals('"\\ud80c"', JSON.stringify('\uD80C'));
+assertEquals('"\\ud80d"', JSON.stringify('\uD80D'));
+assertEquals('"\\ud80e"', JSON.stringify('\uD80E'));
+assertEquals('"\\ud80f"', JSON.stringify('\uD80F'));
+assertEquals('"\\ud810"', JSON.stringify('\uD810'));
+assertEquals('"\\ud811"', JSON.stringify('\uD811'));
+assertEquals('"\\ud812"', JSON.stringify('\uD812'));
+assertEquals('"\\ud813"', JSON.stringify('\uD813'));
+assertEquals('"\\ud814"', JSON.stringify('\uD814'));
+assertEquals('"\\ud815"', JSON.stringify('\uD815'));
+assertEquals('"\\ud816"', JSON.stringify('\uD816'));
+assertEquals('"\\ud817"', JSON.stringify('\uD817'));
+assertEquals('"\\ud818"', JSON.stringify('\uD818'));
+assertEquals('"\\ud819"', JSON.stringify('\uD819'));
+assertEquals('"\\ud81a"', JSON.stringify('\uD81A'));
+assertEquals('"\\ud81b"', JSON.stringify('\uD81B'));
+assertEquals('"\\ud81c"', JSON.stringify('\uD81C'));
+assertEquals('"\\ud81d"', JSON.stringify('\uD81D'));
+assertEquals('"\\ud81e"', JSON.stringify('\uD81E'));
+assertEquals('"\\ud81f"', JSON.stringify('\uD81F'));
+assertEquals('"\\ud820"', JSON.stringify('\uD820'));
+assertEquals('"\\ud821"', JSON.stringify('\uD821'));
+assertEquals('"\\ud822"', JSON.stringify('\uD822'));
+assertEquals('"\\ud823"', JSON.stringify('\uD823'));
+assertEquals('"\\ud824"', JSON.stringify('\uD824'));
+assertEquals('"\\ud825"', JSON.stringify('\uD825'));
+assertEquals('"\\ud826"', JSON.stringify('\uD826'));
+assertEquals('"\\ud827"', JSON.stringify('\uD827'));
+assertEquals('"\\ud828"', JSON.stringify('\uD828'));
+assertEquals('"\\ud829"', JSON.stringify('\uD829'));
+assertEquals('"\\ud82a"', JSON.stringify('\uD82A'));
+assertEquals('"\\ud82b"', JSON.stringify('\uD82B'));
+assertEquals('"\\ud82c"', JSON.stringify('\uD82C'));
+assertEquals('"\\ud82d"', JSON.stringify('\uD82D'));
+assertEquals('"\\ud82e"', JSON.stringify('\uD82E'));
+assertEquals('"\\ud82f"', JSON.stringify('\uD82F'));
+assertEquals('"\\ud830"', JSON.stringify('\uD830'));
+assertEquals('"\\ud831"', JSON.stringify('\uD831'));
+assertEquals('"\\ud832"', JSON.stringify('\uD832'));
+assertEquals('"\\ud833"', JSON.stringify('\uD833'));
+assertEquals('"\\ud834"', JSON.stringify('\uD834'));
+assertEquals('"\\ud835"', JSON.stringify('\uD835'));
+assertEquals('"\\ud836"', JSON.stringify('\uD836'));
+assertEquals('"\\ud837"', JSON.stringify('\uD837'));
+assertEquals('"\\ud838"', JSON.stringify('\uD838'));
+assertEquals('"\\ud839"', JSON.stringify('\uD839'));
+assertEquals('"\\ud83a"', JSON.stringify('\uD83A'));
+assertEquals('"\\ud83b"', JSON.stringify('\uD83B'));
+assertEquals('"\\ud83c"', JSON.stringify('\uD83C'));
+assertEquals('"\\ud83d"', JSON.stringify('\uD83D'));
+assertEquals('"\\ud83e"', JSON.stringify('\uD83E'));
+assertEquals('"\\ud83f"', JSON.stringify('\uD83F'));
+assertEquals('"\\ud840"', JSON.stringify('\uD840'));
+assertEquals('"\\ud841"', JSON.stringify('\uD841'));
+assertEquals('"\\ud842"', JSON.stringify('\uD842'));
+assertEquals('"\\ud843"', JSON.stringify('\uD843'));
+assertEquals('"\\ud844"', JSON.stringify('\uD844'));
+assertEquals('"\\ud845"', JSON.stringify('\uD845'));
+assertEquals('"\\ud846"', JSON.stringify('\uD846'));
+assertEquals('"\\ud847"', JSON.stringify('\uD847'));
+assertEquals('"\\ud848"', JSON.stringify('\uD848'));
+assertEquals('"\\ud849"', JSON.stringify('\uD849'));
+assertEquals('"\\ud84a"', JSON.stringify('\uD84A'));
+assertEquals('"\\ud84b"', JSON.stringify('\uD84B'));
+assertEquals('"\\ud84c"', JSON.stringify('\uD84C'));
+assertEquals('"\\ud84d"', JSON.stringify('\uD84D'));
+assertEquals('"\\ud84e"', JSON.stringify('\uD84E'));
+assertEquals('"\\ud84f"', JSON.stringify('\uD84F'));
+assertEquals('"\\ud850"', JSON.stringify('\uD850'));
+assertEquals('"\\ud851"', JSON.stringify('\uD851'));
+assertEquals('"\\ud852"', JSON.stringify('\uD852'));
+assertEquals('"\\ud853"', JSON.stringify('\uD853'));
+assertEquals('"\\ud854"', JSON.stringify('\uD854'));
+assertEquals('"\\ud855"', JSON.stringify('\uD855'));
+assertEquals('"\\ud856"', JSON.stringify('\uD856'));
+assertEquals('"\\ud857"', JSON.stringify('\uD857'));
+assertEquals('"\\ud858"', JSON.stringify('\uD858'));
+assertEquals('"\\ud859"', JSON.stringify('\uD859'));
+assertEquals('"\\ud85a"', JSON.stringify('\uD85A'));
+assertEquals('"\\ud85b"', JSON.stringify('\uD85B'));
+assertEquals('"\\ud85c"', JSON.stringify('\uD85C'));
+assertEquals('"\\ud85d"', JSON.stringify('\uD85D'));
+assertEquals('"\\ud85e"', JSON.stringify('\uD85E'));
+assertEquals('"\\ud85f"', JSON.stringify('\uD85F'));
+assertEquals('"\\ud860"', JSON.stringify('\uD860'));
+assertEquals('"\\ud861"', JSON.stringify('\uD861'));
+assertEquals('"\\ud862"', JSON.stringify('\uD862'));
+assertEquals('"\\ud863"', JSON.stringify('\uD863'));
+assertEquals('"\\ud864"', JSON.stringify('\uD864'));
+assertEquals('"\\ud865"', JSON.stringify('\uD865'));
+assertEquals('"\\ud866"', JSON.stringify('\uD866'));
+assertEquals('"\\ud867"', JSON.stringify('\uD867'));
+assertEquals('"\\ud868"', JSON.stringify('\uD868'));
+assertEquals('"\\ud869"', JSON.stringify('\uD869'));
+assertEquals('"\\ud86a"', JSON.stringify('\uD86A'));
+assertEquals('"\\ud86b"', JSON.stringify('\uD86B'));
+assertEquals('"\\ud86c"', JSON.stringify('\uD86C'));
+assertEquals('"\\ud86d"', JSON.stringify('\uD86D'));
+assertEquals('"\\ud86e"', JSON.stringify('\uD86E'));
+assertEquals('"\\ud86f"', JSON.stringify('\uD86F'));
+assertEquals('"\\ud870"', JSON.stringify('\uD870'));
+assertEquals('"\\ud871"', JSON.stringify('\uD871'));
+assertEquals('"\\ud872"', JSON.stringify('\uD872'));
+assertEquals('"\\ud873"', JSON.stringify('\uD873'));
+assertEquals('"\\ud874"', JSON.stringify('\uD874'));
+assertEquals('"\\ud875"', JSON.stringify('\uD875'));
+assertEquals('"\\ud876"', JSON.stringify('\uD876'));
+assertEquals('"\\ud877"', JSON.stringify('\uD877'));
+assertEquals('"\\ud878"', JSON.stringify('\uD878'));
+assertEquals('"\\ud879"', JSON.stringify('\uD879'));
+assertEquals('"\\ud87a"', JSON.stringify('\uD87A'));
+assertEquals('"\\ud87b"', JSON.stringify('\uD87B'));
+assertEquals('"\\ud87c"', JSON.stringify('\uD87C'));
+assertEquals('"\\ud87d"', JSON.stringify('\uD87D'));
+assertEquals('"\\ud87e"', JSON.stringify('\uD87E'));
+assertEquals('"\\ud87f"', JSON.stringify('\uD87F'));
+assertEquals('"\\ud880"', JSON.stringify('\uD880'));
+assertEquals('"\\ud881"', JSON.stringify('\uD881'));
+assertEquals('"\\ud882"', JSON.stringify('\uD882'));
+assertEquals('"\\ud883"', JSON.stringify('\uD883'));
+assertEquals('"\\ud884"', JSON.stringify('\uD884'));
+assertEquals('"\\ud885"', JSON.stringify('\uD885'));
+assertEquals('"\\ud886"', JSON.stringify('\uD886'));
+assertEquals('"\\ud887"', JSON.stringify('\uD887'));
+assertEquals('"\\ud888"', JSON.stringify('\uD888'));
+assertEquals('"\\ud889"', JSON.stringify('\uD889'));
+assertEquals('"\\ud88a"', JSON.stringify('\uD88A'));
+assertEquals('"\\ud88b"', JSON.stringify('\uD88B'));
+assertEquals('"\\ud88c"', JSON.stringify('\uD88C'));
+assertEquals('"\\ud88d"', JSON.stringify('\uD88D'));
+assertEquals('"\\ud88e"', JSON.stringify('\uD88E'));
+assertEquals('"\\ud88f"', JSON.stringify('\uD88F'));
+assertEquals('"\\ud890"', JSON.stringify('\uD890'));
+assertEquals('"\\ud891"', JSON.stringify('\uD891'));
+assertEquals('"\\ud892"', JSON.stringify('\uD892'));
+assertEquals('"\\ud893"', JSON.stringify('\uD893'));
+assertEquals('"\\ud894"', JSON.stringify('\uD894'));
+assertEquals('"\\ud895"', JSON.stringify('\uD895'));
+assertEquals('"\\ud896"', JSON.stringify('\uD896'));
+assertEquals('"\\ud897"', JSON.stringify('\uD897'));
+assertEquals('"\\ud898"', JSON.stringify('\uD898'));
+assertEquals('"\\ud899"', JSON.stringify('\uD899'));
+assertEquals('"\\ud89a"', JSON.stringify('\uD89A'));
+assertEquals('"\\ud89b"', JSON.stringify('\uD89B'));
+assertEquals('"\\ud89c"', JSON.stringify('\uD89C'));
+assertEquals('"\\ud89d"', JSON.stringify('\uD89D'));
+assertEquals('"\\ud89e"', JSON.stringify('\uD89E'));
+assertEquals('"\\ud89f"', JSON.stringify('\uD89F'));
+assertEquals('"\\ud8a0"', JSON.stringify('\uD8A0'));
+assertEquals('"\\ud8a1"', JSON.stringify('\uD8A1'));
+assertEquals('"\\ud8a2"', JSON.stringify('\uD8A2'));
+assertEquals('"\\ud8a3"', JSON.stringify('\uD8A3'));
+assertEquals('"\\ud8a4"', JSON.stringify('\uD8A4'));
+assertEquals('"\\ud8a5"', JSON.stringify('\uD8A5'));
+assertEquals('"\\ud8a6"', JSON.stringify('\uD8A6'));
+assertEquals('"\\ud8a7"', JSON.stringify('\uD8A7'));
+assertEquals('"\\ud8a8"', JSON.stringify('\uD8A8'));
+assertEquals('"\\ud8a9"', JSON.stringify('\uD8A9'));
+assertEquals('"\\ud8aa"', JSON.stringify('\uD8AA'));
+assertEquals('"\\ud8ab"', JSON.stringify('\uD8AB'));
+assertEquals('"\\ud8ac"', JSON.stringify('\uD8AC'));
+assertEquals('"\\ud8ad"', JSON.stringify('\uD8AD'));
+assertEquals('"\\ud8ae"', JSON.stringify('\uD8AE'));
+assertEquals('"\\ud8af"', JSON.stringify('\uD8AF'));
+assertEquals('"\\ud8b0"', JSON.stringify('\uD8B0'));
+assertEquals('"\\ud8b1"', JSON.stringify('\uD8B1'));
+assertEquals('"\\ud8b2"', JSON.stringify('\uD8B2'));
+assertEquals('"\\ud8b3"', JSON.stringify('\uD8B3'));
+assertEquals('"\\ud8b4"', JSON.stringify('\uD8B4'));
+assertEquals('"\\ud8b5"', JSON.stringify('\uD8B5'));
+assertEquals('"\\ud8b6"', JSON.stringify('\uD8B6'));
+assertEquals('"\\ud8b7"', JSON.stringify('\uD8B7'));
+assertEquals('"\\ud8b8"', JSON.stringify('\uD8B8'));
+assertEquals('"\\ud8b9"', JSON.stringify('\uD8B9'));
+assertEquals('"\\ud8ba"', JSON.stringify('\uD8BA'));
+assertEquals('"\\ud8bb"', JSON.stringify('\uD8BB'));
+assertEquals('"\\ud8bc"', JSON.stringify('\uD8BC'));
+assertEquals('"\\ud8bd"', JSON.stringify('\uD8BD'));
+assertEquals('"\\ud8be"', JSON.stringify('\uD8BE'));
+assertEquals('"\\ud8bf"', JSON.stringify('\uD8BF'));
+assertEquals('"\\ud8c0"', JSON.stringify('\uD8C0'));
+assertEquals('"\\ud8c1"', JSON.stringify('\uD8C1'));
+assertEquals('"\\ud8c2"', JSON.stringify('\uD8C2'));
+assertEquals('"\\ud8c3"', JSON.stringify('\uD8C3'));
+assertEquals('"\\ud8c4"', JSON.stringify('\uD8C4'));
+assertEquals('"\\ud8c5"', JSON.stringify('\uD8C5'));
+assertEquals('"\\ud8c6"', JSON.stringify('\uD8C6'));
+assertEquals('"\\ud8c7"', JSON.stringify('\uD8C7'));
+assertEquals('"\\ud8c8"', JSON.stringify('\uD8C8'));
+assertEquals('"\\ud8c9"', JSON.stringify('\uD8C9'));
+assertEquals('"\\ud8ca"', JSON.stringify('\uD8CA'));
+assertEquals('"\\ud8cb"', JSON.stringify('\uD8CB'));
+assertEquals('"\\ud8cc"', JSON.stringify('\uD8CC'));
+assertEquals('"\\ud8cd"', JSON.stringify('\uD8CD'));
+assertEquals('"\\ud8ce"', JSON.stringify('\uD8CE'));
+assertEquals('"\\ud8cf"', JSON.stringify('\uD8CF'));
+assertEquals('"\\ud8d0"', JSON.stringify('\uD8D0'));
+assertEquals('"\\ud8d1"', JSON.stringify('\uD8D1'));
+assertEquals('"\\ud8d2"', JSON.stringify('\uD8D2'));
+assertEquals('"\\ud8d3"', JSON.stringify('\uD8D3'));
+assertEquals('"\\ud8d4"', JSON.stringify('\uD8D4'));
+assertEquals('"\\ud8d5"', JSON.stringify('\uD8D5'));
+assertEquals('"\\ud8d6"', JSON.stringify('\uD8D6'));
+assertEquals('"\\ud8d7"', JSON.stringify('\uD8D7'));
+assertEquals('"\\ud8d8"', JSON.stringify('\uD8D8'));
+assertEquals('"\\ud8d9"', JSON.stringify('\uD8D9'));
+assertEquals('"\\ud8da"', JSON.stringify('\uD8DA'));
+assertEquals('"\\ud8db"', JSON.stringify('\uD8DB'));
+assertEquals('"\\ud8dc"', JSON.stringify('\uD8DC'));
+assertEquals('"\\ud8dd"', JSON.stringify('\uD8DD'));
+assertEquals('"\\ud8de"', JSON.stringify('\uD8DE'));
+assertEquals('"\\ud8df"', JSON.stringify('\uD8DF'));
+assertEquals('"\\ud8e0"', JSON.stringify('\uD8E0'));
+assertEquals('"\\ud8e1"', JSON.stringify('\uD8E1'));
+assertEquals('"\\ud8e2"', JSON.stringify('\uD8E2'));
+assertEquals('"\\ud8e3"', JSON.stringify('\uD8E3'));
+assertEquals('"\\ud8e4"', JSON.stringify('\uD8E4'));
+assertEquals('"\\ud8e5"', JSON.stringify('\uD8E5'));
+assertEquals('"\\ud8e6"', JSON.stringify('\uD8E6'));
+assertEquals('"\\ud8e7"', JSON.stringify('\uD8E7'));
+assertEquals('"\\ud8e8"', JSON.stringify('\uD8E8'));
+assertEquals('"\\ud8e9"', JSON.stringify('\uD8E9'));
+assertEquals('"\\ud8ea"', JSON.stringify('\uD8EA'));
+assertEquals('"\\ud8eb"', JSON.stringify('\uD8EB'));
+assertEquals('"\\ud8ec"', JSON.stringify('\uD8EC'));
+assertEquals('"\\ud8ed"', JSON.stringify('\uD8ED'));
+assertEquals('"\\ud8ee"', JSON.stringify('\uD8EE'));
+assertEquals('"\\ud8ef"', JSON.stringify('\uD8EF'));
+assertEquals('"\\ud8f0"', JSON.stringify('\uD8F0'));
+assertEquals('"\\ud8f1"', JSON.stringify('\uD8F1'));
+assertEquals('"\\ud8f2"', JSON.stringify('\uD8F2'));
+assertEquals('"\\ud8f3"', JSON.stringify('\uD8F3'));
+assertEquals('"\\ud8f4"', JSON.stringify('\uD8F4'));
+assertEquals('"\\ud8f5"', JSON.stringify('\uD8F5'));
+assertEquals('"\\ud8f6"', JSON.stringify('\uD8F6'));
+assertEquals('"\\ud8f7"', JSON.stringify('\uD8F7'));
+assertEquals('"\\ud8f8"', JSON.stringify('\uD8F8'));
+assertEquals('"\\ud8f9"', JSON.stringify('\uD8F9'));
+assertEquals('"\\ud8fa"', JSON.stringify('\uD8FA'));
+assertEquals('"\\ud8fb"', JSON.stringify('\uD8FB'));
+assertEquals('"\\ud8fc"', JSON.stringify('\uD8FC'));
+assertEquals('"\\ud8fd"', JSON.stringify('\uD8FD'));
+assertEquals('"\\ud8fe"', JSON.stringify('\uD8FE'));
+assertEquals('"\\ud8ff"', JSON.stringify('\uD8FF'));
+assertEquals('"\\ud900"', JSON.stringify('\uD900'));
+assertEquals('"\\ud901"', JSON.stringify('\uD901'));
+assertEquals('"\\ud902"', JSON.stringify('\uD902'));
+assertEquals('"\\ud903"', JSON.stringify('\uD903'));
+assertEquals('"\\ud904"', JSON.stringify('\uD904'));
+assertEquals('"\\ud905"', JSON.stringify('\uD905'));
+assertEquals('"\\ud906"', JSON.stringify('\uD906'));
+assertEquals('"\\ud907"', JSON.stringify('\uD907'));
+assertEquals('"\\ud908"', JSON.stringify('\uD908'));
+assertEquals('"\\ud909"', JSON.stringify('\uD909'));
+assertEquals('"\\ud90a"', JSON.stringify('\uD90A'));
+assertEquals('"\\ud90b"', JSON.stringify('\uD90B'));
+assertEquals('"\\ud90c"', JSON.stringify('\uD90C'));
+assertEquals('"\\ud90d"', JSON.stringify('\uD90D'));
+assertEquals('"\\ud90e"', JSON.stringify('\uD90E'));
+assertEquals('"\\ud90f"', JSON.stringify('\uD90F'));
+assertEquals('"\\ud910"', JSON.stringify('\uD910'));
+assertEquals('"\\ud911"', JSON.stringify('\uD911'));
+assertEquals('"\\ud912"', JSON.stringify('\uD912'));
+assertEquals('"\\ud913"', JSON.stringify('\uD913'));
+assertEquals('"\\ud914"', JSON.stringify('\uD914'));
+assertEquals('"\\ud915"', JSON.stringify('\uD915'));
+assertEquals('"\\ud916"', JSON.stringify('\uD916'));
+assertEquals('"\\ud917"', JSON.stringify('\uD917'));
+assertEquals('"\\ud918"', JSON.stringify('\uD918'));
+assertEquals('"\\ud919"', JSON.stringify('\uD919'));
+assertEquals('"\\ud91a"', JSON.stringify('\uD91A'));
+assertEquals('"\\ud91b"', JSON.stringify('\uD91B'));
+assertEquals('"\\ud91c"', JSON.stringify('\uD91C'));
+assertEquals('"\\ud91d"', JSON.stringify('\uD91D'));
+assertEquals('"\\ud91e"', JSON.stringify('\uD91E'));
+assertEquals('"\\ud91f"', JSON.stringify('\uD91F'));
+assertEquals('"\\ud920"', JSON.stringify('\uD920'));
+assertEquals('"\\ud921"', JSON.stringify('\uD921'));
+assertEquals('"\\ud922"', JSON.stringify('\uD922'));
+assertEquals('"\\ud923"', JSON.stringify('\uD923'));
+assertEquals('"\\ud924"', JSON.stringify('\uD924'));
+assertEquals('"\\ud925"', JSON.stringify('\uD925'));
+assertEquals('"\\ud926"', JSON.stringify('\uD926'));
+assertEquals('"\\ud927"', JSON.stringify('\uD927'));
+assertEquals('"\\ud928"', JSON.stringify('\uD928'));
+assertEquals('"\\ud929"', JSON.stringify('\uD929'));
+assertEquals('"\\ud92a"', JSON.stringify('\uD92A'));
+assertEquals('"\\ud92b"', JSON.stringify('\uD92B'));
+assertEquals('"\\ud92c"', JSON.stringify('\uD92C'));
+assertEquals('"\\ud92d"', JSON.stringify('\uD92D'));
+assertEquals('"\\ud92e"', JSON.stringify('\uD92E'));
+assertEquals('"\\ud92f"', JSON.stringify('\uD92F'));
+assertEquals('"\\ud930"', JSON.stringify('\uD930'));
+assertEquals('"\\ud931"', JSON.stringify('\uD931'));
+assertEquals('"\\ud932"', JSON.stringify('\uD932'));
+assertEquals('"\\ud933"', JSON.stringify('\uD933'));
+assertEquals('"\\ud934"', JSON.stringify('\uD934'));
+assertEquals('"\\ud935"', JSON.stringify('\uD935'));
+assertEquals('"\\ud936"', JSON.stringify('\uD936'));
+assertEquals('"\\ud937"', JSON.stringify('\uD937'));
+assertEquals('"\\ud938"', JSON.stringify('\uD938'));
+assertEquals('"\\ud939"', JSON.stringify('\uD939'));
+assertEquals('"\\ud93a"', JSON.stringify('\uD93A'));
+assertEquals('"\\ud93b"', JSON.stringify('\uD93B'));
+assertEquals('"\\ud93c"', JSON.stringify('\uD93C'));
+assertEquals('"\\ud93d"', JSON.stringify('\uD93D'));
+assertEquals('"\\ud93e"', JSON.stringify('\uD93E'));
+assertEquals('"\\ud93f"', JSON.stringify('\uD93F'));
+assertEquals('"\\ud940"', JSON.stringify('\uD940'));
+assertEquals('"\\ud941"', JSON.stringify('\uD941'));
+assertEquals('"\\ud942"', JSON.stringify('\uD942'));
+assertEquals('"\\ud943"', JSON.stringify('\uD943'));
+assertEquals('"\\ud944"', JSON.stringify('\uD944'));
+assertEquals('"\\ud945"', JSON.stringify('\uD945'));
+assertEquals('"\\ud946"', JSON.stringify('\uD946'));
+assertEquals('"\\ud947"', JSON.stringify('\uD947'));
+assertEquals('"\\ud948"', JSON.stringify('\uD948'));
+assertEquals('"\\ud949"', JSON.stringify('\uD949'));
+assertEquals('"\\ud94a"', JSON.stringify('\uD94A'));
+assertEquals('"\\ud94b"', JSON.stringify('\uD94B'));
+assertEquals('"\\ud94c"', JSON.stringify('\uD94C'));
+assertEquals('"\\ud94d"', JSON.stringify('\uD94D'));
+assertEquals('"\\ud94e"', JSON.stringify('\uD94E'));
+assertEquals('"\\ud94f"', JSON.stringify('\uD94F'));
+assertEquals('"\\ud950"', JSON.stringify('\uD950'));
+assertEquals('"\\ud951"', JSON.stringify('\uD951'));
+assertEquals('"\\ud952"', JSON.stringify('\uD952'));
+assertEquals('"\\ud953"', JSON.stringify('\uD953'));
+assertEquals('"\\ud954"', JSON.stringify('\uD954'));
+assertEquals('"\\ud955"', JSON.stringify('\uD955'));
+assertEquals('"\\ud956"', JSON.stringify('\uD956'));
+assertEquals('"\\ud957"', JSON.stringify('\uD957'));
+assertEquals('"\\ud958"', JSON.stringify('\uD958'));
+assertEquals('"\\ud959"', JSON.stringify('\uD959'));
+assertEquals('"\\ud95a"', JSON.stringify('\uD95A'));
+assertEquals('"\\ud95b"', JSON.stringify('\uD95B'));
+assertEquals('"\\ud95c"', JSON.stringify('\uD95C'));
+assertEquals('"\\ud95d"', JSON.stringify('\uD95D'));
+assertEquals('"\\ud95e"', JSON.stringify('\uD95E'));
+assertEquals('"\\ud95f"', JSON.stringify('\uD95F'));
+assertEquals('"\\ud960"', JSON.stringify('\uD960'));
+assertEquals('"\\ud961"', JSON.stringify('\uD961'));
+assertEquals('"\\ud962"', JSON.stringify('\uD962'));
+assertEquals('"\\ud963"', JSON.stringify('\uD963'));
+assertEquals('"\\ud964"', JSON.stringify('\uD964'));
+assertEquals('"\\ud965"', JSON.stringify('\uD965'));
+assertEquals('"\\ud966"', JSON.stringify('\uD966'));
+assertEquals('"\\ud967"', JSON.stringify('\uD967'));
+assertEquals('"\\ud968"', JSON.stringify('\uD968'));
+assertEquals('"\\ud969"', JSON.stringify('\uD969'));
+assertEquals('"\\ud96a"', JSON.stringify('\uD96A'));
+assertEquals('"\\ud96b"', JSON.stringify('\uD96B'));
+assertEquals('"\\ud96c"', JSON.stringify('\uD96C'));
+assertEquals('"\\ud96d"', JSON.stringify('\uD96D'));
+assertEquals('"\\ud96e"', JSON.stringify('\uD96E'));
+assertEquals('"\\ud96f"', JSON.stringify('\uD96F'));
+assertEquals('"\\ud970"', JSON.stringify('\uD970'));
+assertEquals('"\\ud971"', JSON.stringify('\uD971'));
+assertEquals('"\\ud972"', JSON.stringify('\uD972'));
+assertEquals('"\\ud973"', JSON.stringify('\uD973'));
+assertEquals('"\\ud974"', JSON.stringify('\uD974'));
+assertEquals('"\\ud975"', JSON.stringify('\uD975'));
+assertEquals('"\\ud976"', JSON.stringify('\uD976'));
+assertEquals('"\\ud977"', JSON.stringify('\uD977'));
+assertEquals('"\\ud978"', JSON.stringify('\uD978'));
+assertEquals('"\\ud979"', JSON.stringify('\uD979'));
+assertEquals('"\\ud97a"', JSON.stringify('\uD97A'));
+assertEquals('"\\ud97b"', JSON.stringify('\uD97B'));
+assertEquals('"\\ud97c"', JSON.stringify('\uD97C'));
+assertEquals('"\\ud97d"', JSON.stringify('\uD97D'));
+assertEquals('"\\ud97e"', JSON.stringify('\uD97E'));
+assertEquals('"\\ud97f"', JSON.stringify('\uD97F'));
+assertEquals('"\\ud980"', JSON.stringify('\uD980'));
+assertEquals('"\\ud981"', JSON.stringify('\uD981'));
+assertEquals('"\\ud982"', JSON.stringify('\uD982'));
+assertEquals('"\\ud983"', JSON.stringify('\uD983'));
+assertEquals('"\\ud984"', JSON.stringify('\uD984'));
+assertEquals('"\\ud985"', JSON.stringify('\uD985'));
+assertEquals('"\\ud986"', JSON.stringify('\uD986'));
+assertEquals('"\\ud987"', JSON.stringify('\uD987'));
+assertEquals('"\\ud988"', JSON.stringify('\uD988'));
+assertEquals('"\\ud989"', JSON.stringify('\uD989'));
+assertEquals('"\\ud98a"', JSON.stringify('\uD98A'));
+assertEquals('"\\ud98b"', JSON.stringify('\uD98B'));
+assertEquals('"\\ud98c"', JSON.stringify('\uD98C'));
+assertEquals('"\\ud98d"', JSON.stringify('\uD98D'));
+assertEquals('"\\ud98e"', JSON.stringify('\uD98E'));
+assertEquals('"\\ud98f"', JSON.stringify('\uD98F'));
+assertEquals('"\\ud990"', JSON.stringify('\uD990'));
+assertEquals('"\\ud991"', JSON.stringify('\uD991'));
+assertEquals('"\\ud992"', JSON.stringify('\uD992'));
+assertEquals('"\\ud993"', JSON.stringify('\uD993'));
+assertEquals('"\\ud994"', JSON.stringify('\uD994'));
+assertEquals('"\\ud995"', JSON.stringify('\uD995'));
+assertEquals('"\\ud996"', JSON.stringify('\uD996'));
+assertEquals('"\\ud997"', JSON.stringify('\uD997'));
+assertEquals('"\\ud998"', JSON.stringify('\uD998'));
+assertEquals('"\\ud999"', JSON.stringify('\uD999'));
+assertEquals('"\\ud99a"', JSON.stringify('\uD99A'));
+assertEquals('"\\ud99b"', JSON.stringify('\uD99B'));
+assertEquals('"\\ud99c"', JSON.stringify('\uD99C'));
+assertEquals('"\\ud99d"', JSON.stringify('\uD99D'));
+assertEquals('"\\ud99e"', JSON.stringify('\uD99E'));
+assertEquals('"\\ud99f"', JSON.stringify('\uD99F'));
+assertEquals('"\\ud9a0"', JSON.stringify('\uD9A0'));
+assertEquals('"\\ud9a1"', JSON.stringify('\uD9A1'));
+assertEquals('"\\ud9a2"', JSON.stringify('\uD9A2'));
+assertEquals('"\\ud9a3"', JSON.stringify('\uD9A3'));
+assertEquals('"\\ud9a4"', JSON.stringify('\uD9A4'));
+assertEquals('"\\ud9a5"', JSON.stringify('\uD9A5'));
+assertEquals('"\\ud9a6"', JSON.stringify('\uD9A6'));
+assertEquals('"\\ud9a7"', JSON.stringify('\uD9A7'));
+assertEquals('"\\ud9a8"', JSON.stringify('\uD9A8'));
+assertEquals('"\\ud9a9"', JSON.stringify('\uD9A9'));
+assertEquals('"\\ud9aa"', JSON.stringify('\uD9AA'));
+assertEquals('"\\ud9ab"', JSON.stringify('\uD9AB'));
+assertEquals('"\\ud9ac"', JSON.stringify('\uD9AC'));
+assertEquals('"\\ud9ad"', JSON.stringify('\uD9AD'));
+assertEquals('"\\ud9ae"', JSON.stringify('\uD9AE'));
+assertEquals('"\\ud9af"', JSON.stringify('\uD9AF'));
+assertEquals('"\\ud9b0"', JSON.stringify('\uD9B0'));
+assertEquals('"\\ud9b1"', JSON.stringify('\uD9B1'));
+assertEquals('"\\ud9b2"', JSON.stringify('\uD9B2'));
+assertEquals('"\\ud9b3"', JSON.stringify('\uD9B3'));
+assertEquals('"\\ud9b4"', JSON.stringify('\uD9B4'));
+assertEquals('"\\ud9b5"', JSON.stringify('\uD9B5'));
+assertEquals('"\\ud9b6"', JSON.stringify('\uD9B6'));
+assertEquals('"\\ud9b7"', JSON.stringify('\uD9B7'));
+assertEquals('"\\ud9b8"', JSON.stringify('\uD9B8'));
+assertEquals('"\\ud9b9"', JSON.stringify('\uD9B9'));
+assertEquals('"\\ud9ba"', JSON.stringify('\uD9BA'));
+assertEquals('"\\ud9bb"', JSON.stringify('\uD9BB'));
+assertEquals('"\\ud9bc"', JSON.stringify('\uD9BC'));
+assertEquals('"\\ud9bd"', JSON.stringify('\uD9BD'));
+assertEquals('"\\ud9be"', JSON.stringify('\uD9BE'));
+assertEquals('"\\ud9bf"', JSON.stringify('\uD9BF'));
+assertEquals('"\\ud9c0"', JSON.stringify('\uD9C0'));
+assertEquals('"\\ud9c1"', JSON.stringify('\uD9C1'));
+assertEquals('"\\ud9c2"', JSON.stringify('\uD9C2'));
+assertEquals('"\\ud9c3"', JSON.stringify('\uD9C3'));
+assertEquals('"\\ud9c4"', JSON.stringify('\uD9C4'));
+assertEquals('"\\ud9c5"', JSON.stringify('\uD9C5'));
+assertEquals('"\\ud9c6"', JSON.stringify('\uD9C6'));
+assertEquals('"\\ud9c7"', JSON.stringify('\uD9C7'));
+assertEquals('"\\ud9c8"', JSON.stringify('\uD9C8'));
+assertEquals('"\\ud9c9"', JSON.stringify('\uD9C9'));
+assertEquals('"\\ud9ca"', JSON.stringify('\uD9CA'));
+assertEquals('"\\ud9cb"', JSON.stringify('\uD9CB'));
+assertEquals('"\\ud9cc"', JSON.stringify('\uD9CC'));
+assertEquals('"\\ud9cd"', JSON.stringify('\uD9CD'));
+assertEquals('"\\ud9ce"', JSON.stringify('\uD9CE'));
+assertEquals('"\\ud9cf"', JSON.stringify('\uD9CF'));
+assertEquals('"\\ud9d0"', JSON.stringify('\uD9D0'));
+assertEquals('"\\ud9d1"', JSON.stringify('\uD9D1'));
+assertEquals('"\\ud9d2"', JSON.stringify('\uD9D2'));
+assertEquals('"\\ud9d3"', JSON.stringify('\uD9D3'));
+assertEquals('"\\ud9d4"', JSON.stringify('\uD9D4'));
+assertEquals('"\\ud9d5"', JSON.stringify('\uD9D5'));
+assertEquals('"\\ud9d6"', JSON.stringify('\uD9D6'));
+assertEquals('"\\ud9d7"', JSON.stringify('\uD9D7'));
+assertEquals('"\\ud9d8"', JSON.stringify('\uD9D8'));
+assertEquals('"\\ud9d9"', JSON.stringify('\uD9D9'));
+assertEquals('"\\ud9da"', JSON.stringify('\uD9DA'));
+assertEquals('"\\ud9db"', JSON.stringify('\uD9DB'));
+assertEquals('"\\ud9dc"', JSON.stringify('\uD9DC'));
+assertEquals('"\\ud9dd"', JSON.stringify('\uD9DD'));
+assertEquals('"\\ud9de"', JSON.stringify('\uD9DE'));
+assertEquals('"\\ud9df"', JSON.stringify('\uD9DF'));
+assertEquals('"\\ud9e0"', JSON.stringify('\uD9E0'));
+assertEquals('"\\ud9e1"', JSON.stringify('\uD9E1'));
+assertEquals('"\\ud9e2"', JSON.stringify('\uD9E2'));
+assertEquals('"\\ud9e3"', JSON.stringify('\uD9E3'));
+assertEquals('"\\ud9e4"', JSON.stringify('\uD9E4'));
+assertEquals('"\\ud9e5"', JSON.stringify('\uD9E5'));
+assertEquals('"\\ud9e6"', JSON.stringify('\uD9E6'));
+assertEquals('"\\ud9e7"', JSON.stringify('\uD9E7'));
+assertEquals('"\\ud9e8"', JSON.stringify('\uD9E8'));
+assertEquals('"\\ud9e9"', JSON.stringify('\uD9E9'));
+assertEquals('"\\ud9ea"', JSON.stringify('\uD9EA'));
+assertEquals('"\\ud9eb"', JSON.stringify('\uD9EB'));
+assertEquals('"\\ud9ec"', JSON.stringify('\uD9EC'));
+assertEquals('"\\ud9ed"', JSON.stringify('\uD9ED'));
+assertEquals('"\\ud9ee"', JSON.stringify('\uD9EE'));
+assertEquals('"\\ud9ef"', JSON.stringify('\uD9EF'));
+assertEquals('"\\ud9f0"', JSON.stringify('\uD9F0'));
+assertEquals('"\\ud9f1"', JSON.stringify('\uD9F1'));
+assertEquals('"\\ud9f2"', JSON.stringify('\uD9F2'));
+assertEquals('"\\ud9f3"', JSON.stringify('\uD9F3'));
+assertEquals('"\\ud9f4"', JSON.stringify('\uD9F4'));
+assertEquals('"\\ud9f5"', JSON.stringify('\uD9F5'));
+assertEquals('"\\ud9f6"', JSON.stringify('\uD9F6'));
+assertEquals('"\\ud9f7"', JSON.stringify('\uD9F7'));
+assertEquals('"\\ud9f8"', JSON.stringify('\uD9F8'));
+assertEquals('"\\ud9f9"', JSON.stringify('\uD9F9'));
+assertEquals('"\\ud9fa"', JSON.stringify('\uD9FA'));
+assertEquals('"\\ud9fb"', JSON.stringify('\uD9FB'));
+assertEquals('"\\ud9fc"', JSON.stringify('\uD9FC'));
+assertEquals('"\\ud9fd"', JSON.stringify('\uD9FD'));
+assertEquals('"\\ud9fe"', JSON.stringify('\uD9FE'));
+assertEquals('"\\ud9ff"', JSON.stringify('\uD9FF'));
+assertEquals('"\\uda00"', JSON.stringify('\uDA00'));
+assertEquals('"\\uda01"', JSON.stringify('\uDA01'));
+assertEquals('"\\uda02"', JSON.stringify('\uDA02'));
+assertEquals('"\\uda03"', JSON.stringify('\uDA03'));
+assertEquals('"\\uda04"', JSON.stringify('\uDA04'));
+assertEquals('"\\uda05"', JSON.stringify('\uDA05'));
+assertEquals('"\\uda06"', JSON.stringify('\uDA06'));
+assertEquals('"\\uda07"', JSON.stringify('\uDA07'));
+assertEquals('"\\uda08"', JSON.stringify('\uDA08'));
+assertEquals('"\\uda09"', JSON.stringify('\uDA09'));
+assertEquals('"\\uda0a"', JSON.stringify('\uDA0A'));
+assertEquals('"\\uda0b"', JSON.stringify('\uDA0B'));
+assertEquals('"\\uda0c"', JSON.stringify('\uDA0C'));
+assertEquals('"\\uda0d"', JSON.stringify('\uDA0D'));
+assertEquals('"\\uda0e"', JSON.stringify('\uDA0E'));
+assertEquals('"\\uda0f"', JSON.stringify('\uDA0F'));
+assertEquals('"\\uda10"', JSON.stringify('\uDA10'));
+assertEquals('"\\uda11"', JSON.stringify('\uDA11'));
+assertEquals('"\\uda12"', JSON.stringify('\uDA12'));
+assertEquals('"\\uda13"', JSON.stringify('\uDA13'));
+assertEquals('"\\uda14"', JSON.stringify('\uDA14'));
+assertEquals('"\\uda15"', JSON.stringify('\uDA15'));
+assertEquals('"\\uda16"', JSON.stringify('\uDA16'));
+assertEquals('"\\uda17"', JSON.stringify('\uDA17'));
+assertEquals('"\\uda18"', JSON.stringify('\uDA18'));
+assertEquals('"\\uda19"', JSON.stringify('\uDA19'));
+assertEquals('"\\uda1a"', JSON.stringify('\uDA1A'));
+assertEquals('"\\uda1b"', JSON.stringify('\uDA1B'));
+assertEquals('"\\uda1c"', JSON.stringify('\uDA1C'));
+assertEquals('"\\uda1d"', JSON.stringify('\uDA1D'));
+assertEquals('"\\uda1e"', JSON.stringify('\uDA1E'));
+assertEquals('"\\uda1f"', JSON.stringify('\uDA1F'));
+assertEquals('"\\uda20"', JSON.stringify('\uDA20'));
+assertEquals('"\\uda21"', JSON.stringify('\uDA21'));
+assertEquals('"\\uda22"', JSON.stringify('\uDA22'));
+assertEquals('"\\uda23"', JSON.stringify('\uDA23'));
+assertEquals('"\\uda24"', JSON.stringify('\uDA24'));
+assertEquals('"\\uda25"', JSON.stringify('\uDA25'));
+assertEquals('"\\uda26"', JSON.stringify('\uDA26'));
+assertEquals('"\\uda27"', JSON.stringify('\uDA27'));
+assertEquals('"\\uda28"', JSON.stringify('\uDA28'));
+assertEquals('"\\uda29"', JSON.stringify('\uDA29'));
+assertEquals('"\\uda2a"', JSON.stringify('\uDA2A'));
+assertEquals('"\\uda2b"', JSON.stringify('\uDA2B'));
+assertEquals('"\\uda2c"', JSON.stringify('\uDA2C'));
+assertEquals('"\\uda2d"', JSON.stringify('\uDA2D'));
+assertEquals('"\\uda2e"', JSON.stringify('\uDA2E'));
+assertEquals('"\\uda2f"', JSON.stringify('\uDA2F'));
+assertEquals('"\\uda30"', JSON.stringify('\uDA30'));
+assertEquals('"\\uda31"', JSON.stringify('\uDA31'));
+assertEquals('"\\uda32"', JSON.stringify('\uDA32'));
+assertEquals('"\\uda33"', JSON.stringify('\uDA33'));
+assertEquals('"\\uda34"', JSON.stringify('\uDA34'));
+assertEquals('"\\uda35"', JSON.stringify('\uDA35'));
+assertEquals('"\\uda36"', JSON.stringify('\uDA36'));
+assertEquals('"\\uda37"', JSON.stringify('\uDA37'));
+assertEquals('"\\uda38"', JSON.stringify('\uDA38'));
+assertEquals('"\\uda39"', JSON.stringify('\uDA39'));
+assertEquals('"\\uda3a"', JSON.stringify('\uDA3A'));
+assertEquals('"\\uda3b"', JSON.stringify('\uDA3B'));
+assertEquals('"\\uda3c"', JSON.stringify('\uDA3C'));
+assertEquals('"\\uda3d"', JSON.stringify('\uDA3D'));
+assertEquals('"\\uda3e"', JSON.stringify('\uDA3E'));
+assertEquals('"\\uda3f"', JSON.stringify('\uDA3F'));
+assertEquals('"\\uda40"', JSON.stringify('\uDA40'));
+assertEquals('"\\uda41"', JSON.stringify('\uDA41'));
+assertEquals('"\\uda42"', JSON.stringify('\uDA42'));
+assertEquals('"\\uda43"', JSON.stringify('\uDA43'));
+assertEquals('"\\uda44"', JSON.stringify('\uDA44'));
+assertEquals('"\\uda45"', JSON.stringify('\uDA45'));
+assertEquals('"\\uda46"', JSON.stringify('\uDA46'));
+assertEquals('"\\uda47"', JSON.stringify('\uDA47'));
+assertEquals('"\\uda48"', JSON.stringify('\uDA48'));
+assertEquals('"\\uda49"', JSON.stringify('\uDA49'));
+assertEquals('"\\uda4a"', JSON.stringify('\uDA4A'));
+assertEquals('"\\uda4b"', JSON.stringify('\uDA4B'));
+assertEquals('"\\uda4c"', JSON.stringify('\uDA4C'));
+assertEquals('"\\uda4d"', JSON.stringify('\uDA4D'));
+assertEquals('"\\uda4e"', JSON.stringify('\uDA4E'));
+assertEquals('"\\uda4f"', JSON.stringify('\uDA4F'));
+assertEquals('"\\uda50"', JSON.stringify('\uDA50'));
+assertEquals('"\\uda51"', JSON.stringify('\uDA51'));
+assertEquals('"\\uda52"', JSON.stringify('\uDA52'));
+assertEquals('"\\uda53"', JSON.stringify('\uDA53'));
+assertEquals('"\\uda54"', JSON.stringify('\uDA54'));
+assertEquals('"\\uda55"', JSON.stringify('\uDA55'));
+assertEquals('"\\uda56"', JSON.stringify('\uDA56'));
+assertEquals('"\\uda57"', JSON.stringify('\uDA57'));
+assertEquals('"\\uda58"', JSON.stringify('\uDA58'));
+assertEquals('"\\uda59"', JSON.stringify('\uDA59'));
+assertEquals('"\\uda5a"', JSON.stringify('\uDA5A'));
+assertEquals('"\\uda5b"', JSON.stringify('\uDA5B'));
+assertEquals('"\\uda5c"', JSON.stringify('\uDA5C'));
+assertEquals('"\\uda5d"', JSON.stringify('\uDA5D'));
+assertEquals('"\\uda5e"', JSON.stringify('\uDA5E'));
+assertEquals('"\\uda5f"', JSON.stringify('\uDA5F'));
+assertEquals('"\\uda60"', JSON.stringify('\uDA60'));
+assertEquals('"\\uda61"', JSON.stringify('\uDA61'));
+assertEquals('"\\uda62"', JSON.stringify('\uDA62'));
+assertEquals('"\\uda63"', JSON.stringify('\uDA63'));
+assertEquals('"\\uda64"', JSON.stringify('\uDA64'));
+assertEquals('"\\uda65"', JSON.stringify('\uDA65'));
+assertEquals('"\\uda66"', JSON.stringify('\uDA66'));
+assertEquals('"\\uda67"', JSON.stringify('\uDA67'));
+assertEquals('"\\uda68"', JSON.stringify('\uDA68'));
+assertEquals('"\\uda69"', JSON.stringify('\uDA69'));
+assertEquals('"\\uda6a"', JSON.stringify('\uDA6A'));
+assertEquals('"\\uda6b"', JSON.stringify('\uDA6B'));
+assertEquals('"\\uda6c"', JSON.stringify('\uDA6C'));
+assertEquals('"\\uda6d"', JSON.stringify('\uDA6D'));
+assertEquals('"\\uda6e"', JSON.stringify('\uDA6E'));
+assertEquals('"\\uda6f"', JSON.stringify('\uDA6F'));
+assertEquals('"\\uda70"', JSON.stringify('\uDA70'));
+assertEquals('"\\uda71"', JSON.stringify('\uDA71'));
+assertEquals('"\\uda72"', JSON.stringify('\uDA72'));
+assertEquals('"\\uda73"', JSON.stringify('\uDA73'));
+assertEquals('"\\uda74"', JSON.stringify('\uDA74'));
+assertEquals('"\\uda75"', JSON.stringify('\uDA75'));
+assertEquals('"\\uda76"', JSON.stringify('\uDA76'));
+assertEquals('"\\uda77"', JSON.stringify('\uDA77'));
+assertEquals('"\\uda78"', JSON.stringify('\uDA78'));
+assertEquals('"\\uda79"', JSON.stringify('\uDA79'));
+assertEquals('"\\uda7a"', JSON.stringify('\uDA7A'));
+assertEquals('"\\uda7b"', JSON.stringify('\uDA7B'));
+assertEquals('"\\uda7c"', JSON.stringify('\uDA7C'));
+assertEquals('"\\uda7d"', JSON.stringify('\uDA7D'));
+assertEquals('"\\uda7e"', JSON.stringify('\uDA7E'));
+assertEquals('"\\uda7f"', JSON.stringify('\uDA7F'));
+assertEquals('"\\uda80"', JSON.stringify('\uDA80'));
+assertEquals('"\\uda81"', JSON.stringify('\uDA81'));
+assertEquals('"\\uda82"', JSON.stringify('\uDA82'));
+assertEquals('"\\uda83"', JSON.stringify('\uDA83'));
+assertEquals('"\\uda84"', JSON.stringify('\uDA84'));
+assertEquals('"\\uda85"', JSON.stringify('\uDA85'));
+assertEquals('"\\uda86"', JSON.stringify('\uDA86'));
+assertEquals('"\\uda87"', JSON.stringify('\uDA87'));
+assertEquals('"\\uda88"', JSON.stringify('\uDA88'));
+assertEquals('"\\uda89"', JSON.stringify('\uDA89'));
+assertEquals('"\\uda8a"', JSON.stringify('\uDA8A'));
+assertEquals('"\\uda8b"', JSON.stringify('\uDA8B'));
+assertEquals('"\\uda8c"', JSON.stringify('\uDA8C'));
+assertEquals('"\\uda8d"', JSON.stringify('\uDA8D'));
+assertEquals('"\\uda8e"', JSON.stringify('\uDA8E'));
+assertEquals('"\\uda8f"', JSON.stringify('\uDA8F'));
+assertEquals('"\\uda90"', JSON.stringify('\uDA90'));
+assertEquals('"\\uda91"', JSON.stringify('\uDA91'));
+assertEquals('"\\uda92"', JSON.stringify('\uDA92'));
+assertEquals('"\\uda93"', JSON.stringify('\uDA93'));
+assertEquals('"\\uda94"', JSON.stringify('\uDA94'));
+assertEquals('"\\uda95"', JSON.stringify('\uDA95'));
+assertEquals('"\\uda96"', JSON.stringify('\uDA96'));
+assertEquals('"\\uda97"', JSON.stringify('\uDA97'));
+assertEquals('"\\uda98"', JSON.stringify('\uDA98'));
+assertEquals('"\\uda99"', JSON.stringify('\uDA99'));
+assertEquals('"\\uda9a"', JSON.stringify('\uDA9A'));
+assertEquals('"\\uda9b"', JSON.stringify('\uDA9B'));
+assertEquals('"\\uda9c"', JSON.stringify('\uDA9C'));
+assertEquals('"\\uda9d"', JSON.stringify('\uDA9D'));
+assertEquals('"\\uda9e"', JSON.stringify('\uDA9E'));
+assertEquals('"\\uda9f"', JSON.stringify('\uDA9F'));
+assertEquals('"\\udaa0"', JSON.stringify('\uDAA0'));
+assertEquals('"\\udaa1"', JSON.stringify('\uDAA1'));
+assertEquals('"\\udaa2"', JSON.stringify('\uDAA2'));
+assertEquals('"\\udaa3"', JSON.stringify('\uDAA3'));
+assertEquals('"\\udaa4"', JSON.stringify('\uDAA4'));
+assertEquals('"\\udaa5"', JSON.stringify('\uDAA5'));
+assertEquals('"\\udaa6"', JSON.stringify('\uDAA6'));
+assertEquals('"\\udaa7"', JSON.stringify('\uDAA7'));
+assertEquals('"\\udaa8"', JSON.stringify('\uDAA8'));
+assertEquals('"\\udaa9"', JSON.stringify('\uDAA9'));
+assertEquals('"\\udaaa"', JSON.stringify('\uDAAA'));
+assertEquals('"\\udaab"', JSON.stringify('\uDAAB'));
+assertEquals('"\\udaac"', JSON.stringify('\uDAAC'));
+assertEquals('"\\udaad"', JSON.stringify('\uDAAD'));
+assertEquals('"\\udaae"', JSON.stringify('\uDAAE'));
+assertEquals('"\\udaaf"', JSON.stringify('\uDAAF'));
+assertEquals('"\\udab0"', JSON.stringify('\uDAB0'));
+assertEquals('"\\udab1"', JSON.stringify('\uDAB1'));
+assertEquals('"\\udab2"', JSON.stringify('\uDAB2'));
+assertEquals('"\\udab3"', JSON.stringify('\uDAB3'));
+assertEquals('"\\udab4"', JSON.stringify('\uDAB4'));
+assertEquals('"\\udab5"', JSON.stringify('\uDAB5'));
+assertEquals('"\\udab6"', JSON.stringify('\uDAB6'));
+assertEquals('"\\udab7"', JSON.stringify('\uDAB7'));
+assertEquals('"\\udab8"', JSON.stringify('\uDAB8'));
+assertEquals('"\\udab9"', JSON.stringify('\uDAB9'));
+assertEquals('"\\udaba"', JSON.stringify('\uDABA'));
+assertEquals('"\\udabb"', JSON.stringify('\uDABB'));
+assertEquals('"\\udabc"', JSON.stringify('\uDABC'));
+assertEquals('"\\udabd"', JSON.stringify('\uDABD'));
+assertEquals('"\\udabe"', JSON.stringify('\uDABE'));
+assertEquals('"\\udabf"', JSON.stringify('\uDABF'));
+assertEquals('"\\udac0"', JSON.stringify('\uDAC0'));
+assertEquals('"\\udac1"', JSON.stringify('\uDAC1'));
+assertEquals('"\\udac2"', JSON.stringify('\uDAC2'));
+assertEquals('"\\udac3"', JSON.stringify('\uDAC3'));
+assertEquals('"\\udac4"', JSON.stringify('\uDAC4'));
+assertEquals('"\\udac5"', JSON.stringify('\uDAC5'));
+assertEquals('"\\udac6"', JSON.stringify('\uDAC6'));
+assertEquals('"\\udac7"', JSON.stringify('\uDAC7'));
+assertEquals('"\\udac8"', JSON.stringify('\uDAC8'));
+assertEquals('"\\udac9"', JSON.stringify('\uDAC9'));
+assertEquals('"\\udaca"', JSON.stringify('\uDACA'));
+assertEquals('"\\udacb"', JSON.stringify('\uDACB'));
+assertEquals('"\\udacc"', JSON.stringify('\uDACC'));
+assertEquals('"\\udacd"', JSON.stringify('\uDACD'));
+assertEquals('"\\udace"', JSON.stringify('\uDACE'));
+assertEquals('"\\udacf"', JSON.stringify('\uDACF'));
+assertEquals('"\\udad0"', JSON.stringify('\uDAD0'));
+assertEquals('"\\udad1"', JSON.stringify('\uDAD1'));
+assertEquals('"\\udad2"', JSON.stringify('\uDAD2'));
+assertEquals('"\\udad3"', JSON.stringify('\uDAD3'));
+assertEquals('"\\udad4"', JSON.stringify('\uDAD4'));
+assertEquals('"\\udad5"', JSON.stringify('\uDAD5'));
+assertEquals('"\\udad6"', JSON.stringify('\uDAD6'));
+assertEquals('"\\udad7"', JSON.stringify('\uDAD7'));
+assertEquals('"\\udad8"', JSON.stringify('\uDAD8'));
+assertEquals('"\\udad9"', JSON.stringify('\uDAD9'));
+assertEquals('"\\udada"', JSON.stringify('\uDADA'));
+assertEquals('"\\udadb"', JSON.stringify('\uDADB'));
+assertEquals('"\\udadc"', JSON.stringify('\uDADC'));
+assertEquals('"\\udadd"', JSON.stringify('\uDADD'));
+assertEquals('"\\udade"', JSON.stringify('\uDADE'));
+assertEquals('"\\udadf"', JSON.stringify('\uDADF'));
+assertEquals('"\\udae0"', JSON.stringify('\uDAE0'));
+assertEquals('"\\udae1"', JSON.stringify('\uDAE1'));
+assertEquals('"\\udae2"', JSON.stringify('\uDAE2'));
+assertEquals('"\\udae3"', JSON.stringify('\uDAE3'));
+assertEquals('"\\udae4"', JSON.stringify('\uDAE4'));
+assertEquals('"\\udae5"', JSON.stringify('\uDAE5'));
+assertEquals('"\\udae6"', JSON.stringify('\uDAE6'));
+assertEquals('"\\udae7"', JSON.stringify('\uDAE7'));
+assertEquals('"\\udae8"', JSON.stringify('\uDAE8'));
+assertEquals('"\\udae9"', JSON.stringify('\uDAE9'));
+assertEquals('"\\udaea"', JSON.stringify('\uDAEA'));
+assertEquals('"\\udaeb"', JSON.stringify('\uDAEB'));
+assertEquals('"\\udaec"', JSON.stringify('\uDAEC'));
+assertEquals('"\\udaed"', JSON.stringify('\uDAED'));
+assertEquals('"\\udaee"', JSON.stringify('\uDAEE'));
+assertEquals('"\\udaef"', JSON.stringify('\uDAEF'));
+assertEquals('"\\udaf0"', JSON.stringify('\uDAF0'));
+assertEquals('"\\udaf1"', JSON.stringify('\uDAF1'));
+assertEquals('"\\udaf2"', JSON.stringify('\uDAF2'));
+assertEquals('"\\udaf3"', JSON.stringify('\uDAF3'));
+assertEquals('"\\udaf4"', JSON.stringify('\uDAF4'));
+assertEquals('"\\udaf5"', JSON.stringify('\uDAF5'));
+assertEquals('"\\udaf6"', JSON.stringify('\uDAF6'));
+assertEquals('"\\udaf7"', JSON.stringify('\uDAF7'));
+assertEquals('"\\udaf8"', JSON.stringify('\uDAF8'));
+assertEquals('"\\udaf9"', JSON.stringify('\uDAF9'));
+assertEquals('"\\udafa"', JSON.stringify('\uDAFA'));
+assertEquals('"\\udafb"', JSON.stringify('\uDAFB'));
+assertEquals('"\\udafc"', JSON.stringify('\uDAFC'));
+assertEquals('"\\udafd"', JSON.stringify('\uDAFD'));
+assertEquals('"\\udafe"', JSON.stringify('\uDAFE'));
+assertEquals('"\\udaff"', JSON.stringify('\uDAFF'));
+assertEquals('"\\udb00"', JSON.stringify('\uDB00'));
+assertEquals('"\\udb01"', JSON.stringify('\uDB01'));
+assertEquals('"\\udb02"', JSON.stringify('\uDB02'));
+assertEquals('"\\udb03"', JSON.stringify('\uDB03'));
+assertEquals('"\\udb04"', JSON.stringify('\uDB04'));
+assertEquals('"\\udb05"', JSON.stringify('\uDB05'));
+assertEquals('"\\udb06"', JSON.stringify('\uDB06'));
+assertEquals('"\\udb07"', JSON.stringify('\uDB07'));
+assertEquals('"\\udb08"', JSON.stringify('\uDB08'));
+assertEquals('"\\udb09"', JSON.stringify('\uDB09'));
+assertEquals('"\\udb0a"', JSON.stringify('\uDB0A'));
+assertEquals('"\\udb0b"', JSON.stringify('\uDB0B'));
+assertEquals('"\\udb0c"', JSON.stringify('\uDB0C'));
+assertEquals('"\\udb0d"', JSON.stringify('\uDB0D'));
+assertEquals('"\\udb0e"', JSON.stringify('\uDB0E'));
+assertEquals('"\\udb0f"', JSON.stringify('\uDB0F'));
+assertEquals('"\\udb10"', JSON.stringify('\uDB10'));
+assertEquals('"\\udb11"', JSON.stringify('\uDB11'));
+assertEquals('"\\udb12"', JSON.stringify('\uDB12'));
+assertEquals('"\\udb13"', JSON.stringify('\uDB13'));
+assertEquals('"\\udb14"', JSON.stringify('\uDB14'));
+assertEquals('"\\udb15"', JSON.stringify('\uDB15'));
+assertEquals('"\\udb16"', JSON.stringify('\uDB16'));
+assertEquals('"\\udb17"', JSON.stringify('\uDB17'));
+assertEquals('"\\udb18"', JSON.stringify('\uDB18'));
+assertEquals('"\\udb19"', JSON.stringify('\uDB19'));
+assertEquals('"\\udb1a"', JSON.stringify('\uDB1A'));
+assertEquals('"\\udb1b"', JSON.stringify('\uDB1B'));
+assertEquals('"\\udb1c"', JSON.stringify('\uDB1C'));
+assertEquals('"\\udb1d"', JSON.stringify('\uDB1D'));
+assertEquals('"\\udb1e"', JSON.stringify('\uDB1E'));
+assertEquals('"\\udb1f"', JSON.stringify('\uDB1F'));
+assertEquals('"\\udb20"', JSON.stringify('\uDB20'));
+assertEquals('"\\udb21"', JSON.stringify('\uDB21'));
+assertEquals('"\\udb22"', JSON.stringify('\uDB22'));
+assertEquals('"\\udb23"', JSON.stringify('\uDB23'));
+assertEquals('"\\udb24"', JSON.stringify('\uDB24'));
+assertEquals('"\\udb25"', JSON.stringify('\uDB25'));
+assertEquals('"\\udb26"', JSON.stringify('\uDB26'));
+assertEquals('"\\udb27"', JSON.stringify('\uDB27'));
+assertEquals('"\\udb28"', JSON.stringify('\uDB28'));
+assertEquals('"\\udb29"', JSON.stringify('\uDB29'));
+assertEquals('"\\udb2a"', JSON.stringify('\uDB2A'));
+assertEquals('"\\udb2b"', JSON.stringify('\uDB2B'));
+assertEquals('"\\udb2c"', JSON.stringify('\uDB2C'));
+assertEquals('"\\udb2d"', JSON.stringify('\uDB2D'));
+assertEquals('"\\udb2e"', JSON.stringify('\uDB2E'));
+assertEquals('"\\udb2f"', JSON.stringify('\uDB2F'));
+assertEquals('"\\udb30"', JSON.stringify('\uDB30'));
+assertEquals('"\\udb31"', JSON.stringify('\uDB31'));
+assertEquals('"\\udb32"', JSON.stringify('\uDB32'));
+assertEquals('"\\udb33"', JSON.stringify('\uDB33'));
+assertEquals('"\\udb34"', JSON.stringify('\uDB34'));
+assertEquals('"\\udb35"', JSON.stringify('\uDB35'));
+assertEquals('"\\udb36"', JSON.stringify('\uDB36'));
+assertEquals('"\\udb37"', JSON.stringify('\uDB37'));
+assertEquals('"\\udb38"', JSON.stringify('\uDB38'));
+assertEquals('"\\udb39"', JSON.stringify('\uDB39'));
+assertEquals('"\\udb3a"', JSON.stringify('\uDB3A'));
+assertEquals('"\\udb3b"', JSON.stringify('\uDB3B'));
+assertEquals('"\\udb3c"', JSON.stringify('\uDB3C'));
+assertEquals('"\\udb3d"', JSON.stringify('\uDB3D'));
+assertEquals('"\\udb3e"', JSON.stringify('\uDB3E'));
+assertEquals('"\\udb3f"', JSON.stringify('\uDB3F'));
+assertEquals('"\\udb40"', JSON.stringify('\uDB40'));
+assertEquals('"\\udb41"', JSON.stringify('\uDB41'));
+assertEquals('"\\udb42"', JSON.stringify('\uDB42'));
+assertEquals('"\\udb43"', JSON.stringify('\uDB43'));
+assertEquals('"\\udb44"', JSON.stringify('\uDB44'));
+assertEquals('"\\udb45"', JSON.stringify('\uDB45'));
+assertEquals('"\\udb46"', JSON.stringify('\uDB46'));
+assertEquals('"\\udb47"', JSON.stringify('\uDB47'));
+assertEquals('"\\udb48"', JSON.stringify('\uDB48'));
+assertEquals('"\\udb49"', JSON.stringify('\uDB49'));
+assertEquals('"\\udb4a"', JSON.stringify('\uDB4A'));
+assertEquals('"\\udb4b"', JSON.stringify('\uDB4B'));
+assertEquals('"\\udb4c"', JSON.stringify('\uDB4C'));
+assertEquals('"\\udb4d"', JSON.stringify('\uDB4D'));
+assertEquals('"\\udb4e"', JSON.stringify('\uDB4E'));
+assertEquals('"\\udb4f"', JSON.stringify('\uDB4F'));
+assertEquals('"\\udb50"', JSON.stringify('\uDB50'));
+assertEquals('"\\udb51"', JSON.stringify('\uDB51'));
+assertEquals('"\\udb52"', JSON.stringify('\uDB52'));
+assertEquals('"\\udb53"', JSON.stringify('\uDB53'));
+assertEquals('"\\udb54"', JSON.stringify('\uDB54'));
+assertEquals('"\\udb55"', JSON.stringify('\uDB55'));
+assertEquals('"\\udb56"', JSON.stringify('\uDB56'));
+assertEquals('"\\udb57"', JSON.stringify('\uDB57'));
+assertEquals('"\\udb58"', JSON.stringify('\uDB58'));
+assertEquals('"\\udb59"', JSON.stringify('\uDB59'));
+assertEquals('"\\udb5a"', JSON.stringify('\uDB5A'));
+assertEquals('"\\udb5b"', JSON.stringify('\uDB5B'));
+assertEquals('"\\udb5c"', JSON.stringify('\uDB5C'));
+assertEquals('"\\udb5d"', JSON.stringify('\uDB5D'));
+assertEquals('"\\udb5e"', JSON.stringify('\uDB5E'));
+assertEquals('"\\udb5f"', JSON.stringify('\uDB5F'));
+assertEquals('"\\udb60"', JSON.stringify('\uDB60'));
+assertEquals('"\\udb61"', JSON.stringify('\uDB61'));
+assertEquals('"\\udb62"', JSON.stringify('\uDB62'));
+assertEquals('"\\udb63"', JSON.stringify('\uDB63'));
+assertEquals('"\\udb64"', JSON.stringify('\uDB64'));
+assertEquals('"\\udb65"', JSON.stringify('\uDB65'));
+assertEquals('"\\udb66"', JSON.stringify('\uDB66'));
+assertEquals('"\\udb67"', JSON.stringify('\uDB67'));
+assertEquals('"\\udb68"', JSON.stringify('\uDB68'));
+assertEquals('"\\udb69"', JSON.stringify('\uDB69'));
+assertEquals('"\\udb6a"', JSON.stringify('\uDB6A'));
+assertEquals('"\\udb6b"', JSON.stringify('\uDB6B'));
+assertEquals('"\\udb6c"', JSON.stringify('\uDB6C'));
+assertEquals('"\\udb6d"', JSON.stringify('\uDB6D'));
+assertEquals('"\\udb6e"', JSON.stringify('\uDB6E'));
+assertEquals('"\\udb6f"', JSON.stringify('\uDB6F'));
+assertEquals('"\\udb70"', JSON.stringify('\uDB70'));
+assertEquals('"\\udb71"', JSON.stringify('\uDB71'));
+assertEquals('"\\udb72"', JSON.stringify('\uDB72'));
+assertEquals('"\\udb73"', JSON.stringify('\uDB73'));
+assertEquals('"\\udb74"', JSON.stringify('\uDB74'));
+assertEquals('"\\udb75"', JSON.stringify('\uDB75'));
+assertEquals('"\\udb76"', JSON.stringify('\uDB76'));
+assertEquals('"\\udb77"', JSON.stringify('\uDB77'));
+assertEquals('"\\udb78"', JSON.stringify('\uDB78'));
+assertEquals('"\\udb79"', JSON.stringify('\uDB79'));
+assertEquals('"\\udb7a"', JSON.stringify('\uDB7A'));
+assertEquals('"\\udb7b"', JSON.stringify('\uDB7B'));
+assertEquals('"\\udb7c"', JSON.stringify('\uDB7C'));
+assertEquals('"\\udb7d"', JSON.stringify('\uDB7D'));
+assertEquals('"\\udb7e"', JSON.stringify('\uDB7E'));
+assertEquals('"\\udb7f"', JSON.stringify('\uDB7F'));
+assertEquals('"\\udb80"', JSON.stringify('\uDB80'));
+assertEquals('"\\udb81"', JSON.stringify('\uDB81'));
+assertEquals('"\\udb82"', JSON.stringify('\uDB82'));
+assertEquals('"\\udb83"', JSON.stringify('\uDB83'));
+assertEquals('"\\udb84"', JSON.stringify('\uDB84'));
+assertEquals('"\\udb85"', JSON.stringify('\uDB85'));
+assertEquals('"\\udb86"', JSON.stringify('\uDB86'));
+assertEquals('"\\udb87"', JSON.stringify('\uDB87'));
+assertEquals('"\\udb88"', JSON.stringify('\uDB88'));
+assertEquals('"\\udb89"', JSON.stringify('\uDB89'));
+assertEquals('"\\udb8a"', JSON.stringify('\uDB8A'));
+assertEquals('"\\udb8b"', JSON.stringify('\uDB8B'));
+assertEquals('"\\udb8c"', JSON.stringify('\uDB8C'));
+assertEquals('"\\udb8d"', JSON.stringify('\uDB8D'));
+assertEquals('"\\udb8e"', JSON.stringify('\uDB8E'));
+assertEquals('"\\udb8f"', JSON.stringify('\uDB8F'));
+assertEquals('"\\udb90"', JSON.stringify('\uDB90'));
+assertEquals('"\\udb91"', JSON.stringify('\uDB91'));
+assertEquals('"\\udb92"', JSON.stringify('\uDB92'));
+assertEquals('"\\udb93"', JSON.stringify('\uDB93'));
+assertEquals('"\\udb94"', JSON.stringify('\uDB94'));
+assertEquals('"\\udb95"', JSON.stringify('\uDB95'));
+assertEquals('"\\udb96"', JSON.stringify('\uDB96'));
+assertEquals('"\\udb97"', JSON.stringify('\uDB97'));
+assertEquals('"\\udb98"', JSON.stringify('\uDB98'));
+assertEquals('"\\udb99"', JSON.stringify('\uDB99'));
+assertEquals('"\\udb9a"', JSON.stringify('\uDB9A'));
+assertEquals('"\\udb9b"', JSON.stringify('\uDB9B'));
+assertEquals('"\\udb9c"', JSON.stringify('\uDB9C'));
+assertEquals('"\\udb9d"', JSON.stringify('\uDB9D'));
+assertEquals('"\\udb9e"', JSON.stringify('\uDB9E'));
+assertEquals('"\\udb9f"', JSON.stringify('\uDB9F'));
+assertEquals('"\\udba0"', JSON.stringify('\uDBA0'));
+assertEquals('"\\udba1"', JSON.stringify('\uDBA1'));
+assertEquals('"\\udba2"', JSON.stringify('\uDBA2'));
+assertEquals('"\\udba3"', JSON.stringify('\uDBA3'));
+assertEquals('"\\udba4"', JSON.stringify('\uDBA4'));
+assertEquals('"\\udba5"', JSON.stringify('\uDBA5'));
+assertEquals('"\\udba6"', JSON.stringify('\uDBA6'));
+assertEquals('"\\udba7"', JSON.stringify('\uDBA7'));
+assertEquals('"\\udba8"', JSON.stringify('\uDBA8'));
+assertEquals('"\\udba9"', JSON.stringify('\uDBA9'));
+assertEquals('"\\udbaa"', JSON.stringify('\uDBAA'));
+assertEquals('"\\udbab"', JSON.stringify('\uDBAB'));
+assertEquals('"\\udbac"', JSON.stringify('\uDBAC'));
+assertEquals('"\\udbad"', JSON.stringify('\uDBAD'));
+assertEquals('"\\udbae"', JSON.stringify('\uDBAE'));
+assertEquals('"\\udbaf"', JSON.stringify('\uDBAF'));
+assertEquals('"\\udbb0"', JSON.stringify('\uDBB0'));
+assertEquals('"\\udbb1"', JSON.stringify('\uDBB1'));
+assertEquals('"\\udbb2"', JSON.stringify('\uDBB2'));
+assertEquals('"\\udbb3"', JSON.stringify('\uDBB3'));
+assertEquals('"\\udbb4"', JSON.stringify('\uDBB4'));
+assertEquals('"\\udbb5"', JSON.stringify('\uDBB5'));
+assertEquals('"\\udbb6"', JSON.stringify('\uDBB6'));
+assertEquals('"\\udbb7"', JSON.stringify('\uDBB7'));
+assertEquals('"\\udbb8"', JSON.stringify('\uDBB8'));
+assertEquals('"\\udbb9"', JSON.stringify('\uDBB9'));
+assertEquals('"\\udbba"', JSON.stringify('\uDBBA'));
+assertEquals('"\\udbbb"', JSON.stringify('\uDBBB'));
+assertEquals('"\\udbbc"', JSON.stringify('\uDBBC'));
+assertEquals('"\\udbbd"', JSON.stringify('\uDBBD'));
+assertEquals('"\\udbbe"', JSON.stringify('\uDBBE'));
+assertEquals('"\\udbbf"', JSON.stringify('\uDBBF'));
+assertEquals('"\\udbc0"', JSON.stringify('\uDBC0'));
+assertEquals('"\\udbc1"', JSON.stringify('\uDBC1'));
+assertEquals('"\\udbc2"', JSON.stringify('\uDBC2'));
+assertEquals('"\\udbc3"', JSON.stringify('\uDBC3'));
+assertEquals('"\\udbc4"', JSON.stringify('\uDBC4'));
+assertEquals('"\\udbc5"', JSON.stringify('\uDBC5'));
+assertEquals('"\\udbc6"', JSON.stringify('\uDBC6'));
+assertEquals('"\\udbc7"', JSON.stringify('\uDBC7'));
+assertEquals('"\\udbc8"', JSON.stringify('\uDBC8'));
+assertEquals('"\\udbc9"', JSON.stringify('\uDBC9'));
+assertEquals('"\\udbca"', JSON.stringify('\uDBCA'));
+assertEquals('"\\udbcb"', JSON.stringify('\uDBCB'));
+assertEquals('"\\udbcc"', JSON.stringify('\uDBCC'));
+assertEquals('"\\udbcd"', JSON.stringify('\uDBCD'));
+assertEquals('"\\udbce"', JSON.stringify('\uDBCE'));
+assertEquals('"\\udbcf"', JSON.stringify('\uDBCF'));
+assertEquals('"\\udbd0"', JSON.stringify('\uDBD0'));
+assertEquals('"\\udbd1"', JSON.stringify('\uDBD1'));
+assertEquals('"\\udbd2"', JSON.stringify('\uDBD2'));
+assertEquals('"\\udbd3"', JSON.stringify('\uDBD3'));
+assertEquals('"\\udbd4"', JSON.stringify('\uDBD4'));
+assertEquals('"\\udbd5"', JSON.stringify('\uDBD5'));
+assertEquals('"\\udbd6"', JSON.stringify('\uDBD6'));
+assertEquals('"\\udbd7"', JSON.stringify('\uDBD7'));
+assertEquals('"\\udbd8"', JSON.stringify('\uDBD8'));
+assertEquals('"\\udbd9"', JSON.stringify('\uDBD9'));
+assertEquals('"\\udbda"', JSON.stringify('\uDBDA'));
+assertEquals('"\\udbdb"', JSON.stringify('\uDBDB'));
+assertEquals('"\\udbdc"', JSON.stringify('\uDBDC'));
+assertEquals('"\\udbdd"', JSON.stringify('\uDBDD'));
+assertEquals('"\\udbde"', JSON.stringify('\uDBDE'));
+assertEquals('"\\udbdf"', JSON.stringify('\uDBDF'));
+assertEquals('"\\udbe0"', JSON.stringify('\uDBE0'));
+assertEquals('"\\udbe1"', JSON.stringify('\uDBE1'));
+assertEquals('"\\udbe2"', JSON.stringify('\uDBE2'));
+assertEquals('"\\udbe3"', JSON.stringify('\uDBE3'));
+assertEquals('"\\udbe4"', JSON.stringify('\uDBE4'));
+assertEquals('"\\udbe5"', JSON.stringify('\uDBE5'));
+assertEquals('"\\udbe6"', JSON.stringify('\uDBE6'));
+assertEquals('"\\udbe7"', JSON.stringify('\uDBE7'));
+assertEquals('"\\udbe8"', JSON.stringify('\uDBE8'));
+assertEquals('"\\udbe9"', JSON.stringify('\uDBE9'));
+assertEquals('"\\udbea"', JSON.stringify('\uDBEA'));
+assertEquals('"\\udbeb"', JSON.stringify('\uDBEB'));
+assertEquals('"\\udbec"', JSON.stringify('\uDBEC'));
+assertEquals('"\\udbed"', JSON.stringify('\uDBED'));
+assertEquals('"\\udbee"', JSON.stringify('\uDBEE'));
+assertEquals('"\\udbef"', JSON.stringify('\uDBEF'));
+assertEquals('"\\udbf0"', JSON.stringify('\uDBF0'));
+assertEquals('"\\udbf1"', JSON.stringify('\uDBF1'));
+assertEquals('"\\udbf2"', JSON.stringify('\uDBF2'));
+assertEquals('"\\udbf3"', JSON.stringify('\uDBF3'));
+assertEquals('"\\udbf4"', JSON.stringify('\uDBF4'));
+assertEquals('"\\udbf5"', JSON.stringify('\uDBF5'));
+assertEquals('"\\udbf6"', JSON.stringify('\uDBF6'));
+assertEquals('"\\udbf7"', JSON.stringify('\uDBF7'));
+assertEquals('"\\udbf8"', JSON.stringify('\uDBF8'));
+assertEquals('"\\udbf9"', JSON.stringify('\uDBF9'));
+assertEquals('"\\udbfa"', JSON.stringify('\uDBFA'));
+assertEquals('"\\udbfb"', JSON.stringify('\uDBFB'));
+assertEquals('"\\udbfc"', JSON.stringify('\uDBFC'));
+assertEquals('"\\udbfd"', JSON.stringify('\uDBFD'));
+assertEquals('"\\udbfe"', JSON.stringify('\uDBFE'));
+assertEquals('"\\udbff"', JSON.stringify('\uDBFF'));
+assertEquals('"\\udc00"', JSON.stringify('\uDC00'));
+assertEquals('"\\udc01"', JSON.stringify('\uDC01'));
+assertEquals('"\\udc02"', JSON.stringify('\uDC02'));
+assertEquals('"\\udc03"', JSON.stringify('\uDC03'));
+assertEquals('"\\udc04"', JSON.stringify('\uDC04'));
+assertEquals('"\\udc05"', JSON.stringify('\uDC05'));
+assertEquals('"\\udc06"', JSON.stringify('\uDC06'));
+assertEquals('"\\udc07"', JSON.stringify('\uDC07'));
+assertEquals('"\\udc08"', JSON.stringify('\uDC08'));
+assertEquals('"\\udc09"', JSON.stringify('\uDC09'));
+assertEquals('"\\udc0a"', JSON.stringify('\uDC0A'));
+assertEquals('"\\udc0b"', JSON.stringify('\uDC0B'));
+assertEquals('"\\udc0c"', JSON.stringify('\uDC0C'));
+assertEquals('"\\udc0d"', JSON.stringify('\uDC0D'));
+assertEquals('"\\udc0e"', JSON.stringify('\uDC0E'));
+assertEquals('"\\udc0f"', JSON.stringify('\uDC0F'));
+assertEquals('"\\udc10"', JSON.stringify('\uDC10'));
+assertEquals('"\\udc11"', JSON.stringify('\uDC11'));
+assertEquals('"\\udc12"', JSON.stringify('\uDC12'));
+assertEquals('"\\udc13"', JSON.stringify('\uDC13'));
+assertEquals('"\\udc14"', JSON.stringify('\uDC14'));
+assertEquals('"\\udc15"', JSON.stringify('\uDC15'));
+assertEquals('"\\udc16"', JSON.stringify('\uDC16'));
+assertEquals('"\\udc17"', JSON.stringify('\uDC17'));
+assertEquals('"\\udc18"', JSON.stringify('\uDC18'));
+assertEquals('"\\udc19"', JSON.stringify('\uDC19'));
+assertEquals('"\\udc1a"', JSON.stringify('\uDC1A'));
+assertEquals('"\\udc1b"', JSON.stringify('\uDC1B'));
+assertEquals('"\\udc1c"', JSON.stringify('\uDC1C'));
+assertEquals('"\\udc1d"', JSON.stringify('\uDC1D'));
+assertEquals('"\\udc1e"', JSON.stringify('\uDC1E'));
+assertEquals('"\\udc1f"', JSON.stringify('\uDC1F'));
+assertEquals('"\\udc20"', JSON.stringify('\uDC20'));
+assertEquals('"\\udc21"', JSON.stringify('\uDC21'));
+assertEquals('"\\udc22"', JSON.stringify('\uDC22'));
+assertEquals('"\\udc23"', JSON.stringify('\uDC23'));
+assertEquals('"\\udc24"', JSON.stringify('\uDC24'));
+assertEquals('"\\udc25"', JSON.stringify('\uDC25'));
+assertEquals('"\\udc26"', JSON.stringify('\uDC26'));
+assertEquals('"\\udc27"', JSON.stringify('\uDC27'));
+assertEquals('"\\udc28"', JSON.stringify('\uDC28'));
+assertEquals('"\\udc29"', JSON.stringify('\uDC29'));
+assertEquals('"\\udc2a"', JSON.stringify('\uDC2A'));
+assertEquals('"\\udc2b"', JSON.stringify('\uDC2B'));
+assertEquals('"\\udc2c"', JSON.stringify('\uDC2C'));
+assertEquals('"\\udc2d"', JSON.stringify('\uDC2D'));
+assertEquals('"\\udc2e"', JSON.stringify('\uDC2E'));
+assertEquals('"\\udc2f"', JSON.stringify('\uDC2F'));
+assertEquals('"\\udc30"', JSON.stringify('\uDC30'));
+assertEquals('"\\udc31"', JSON.stringify('\uDC31'));
+assertEquals('"\\udc32"', JSON.stringify('\uDC32'));
+assertEquals('"\\udc33"', JSON.stringify('\uDC33'));
+assertEquals('"\\udc34"', JSON.stringify('\uDC34'));
+assertEquals('"\\udc35"', JSON.stringify('\uDC35'));
+assertEquals('"\\udc36"', JSON.stringify('\uDC36'));
+assertEquals('"\\udc37"', JSON.stringify('\uDC37'));
+assertEquals('"\\udc38"', JSON.stringify('\uDC38'));
+assertEquals('"\\udc39"', JSON.stringify('\uDC39'));
+assertEquals('"\\udc3a"', JSON.stringify('\uDC3A'));
+assertEquals('"\\udc3b"', JSON.stringify('\uDC3B'));
+assertEquals('"\\udc3c"', JSON.stringify('\uDC3C'));
+assertEquals('"\\udc3d"', JSON.stringify('\uDC3D'));
+assertEquals('"\\udc3e"', JSON.stringify('\uDC3E'));
+assertEquals('"\\udc3f"', JSON.stringify('\uDC3F'));
+assertEquals('"\\udc40"', JSON.stringify('\uDC40'));
+assertEquals('"\\udc41"', JSON.stringify('\uDC41'));
+assertEquals('"\\udc42"', JSON.stringify('\uDC42'));
+assertEquals('"\\udc43"', JSON.stringify('\uDC43'));
+assertEquals('"\\udc44"', JSON.stringify('\uDC44'));
+assertEquals('"\\udc45"', JSON.stringify('\uDC45'));
+assertEquals('"\\udc46"', JSON.stringify('\uDC46'));
+assertEquals('"\\udc47"', JSON.stringify('\uDC47'));
+assertEquals('"\\udc48"', JSON.stringify('\uDC48'));
+assertEquals('"\\udc49"', JSON.stringify('\uDC49'));
+assertEquals('"\\udc4a"', JSON.stringify('\uDC4A'));
+assertEquals('"\\udc4b"', JSON.stringify('\uDC4B'));
+assertEquals('"\\udc4c"', JSON.stringify('\uDC4C'));
+assertEquals('"\\udc4d"', JSON.stringify('\uDC4D'));
+assertEquals('"\\udc4e"', JSON.stringify('\uDC4E'));
+assertEquals('"\\udc4f"', JSON.stringify('\uDC4F'));
+assertEquals('"\\udc50"', JSON.stringify('\uDC50'));
+assertEquals('"\\udc51"', JSON.stringify('\uDC51'));
+assertEquals('"\\udc52"', JSON.stringify('\uDC52'));
+assertEquals('"\\udc53"', JSON.stringify('\uDC53'));
+assertEquals('"\\udc54"', JSON.stringify('\uDC54'));
+assertEquals('"\\udc55"', JSON.stringify('\uDC55'));
+assertEquals('"\\udc56"', JSON.stringify('\uDC56'));
+assertEquals('"\\udc57"', JSON.stringify('\uDC57'));
+assertEquals('"\\udc58"', JSON.stringify('\uDC58'));
+assertEquals('"\\udc59"', JSON.stringify('\uDC59'));
+assertEquals('"\\udc5a"', JSON.stringify('\uDC5A'));
+assertEquals('"\\udc5b"', JSON.stringify('\uDC5B'));
+assertEquals('"\\udc5c"', JSON.stringify('\uDC5C'));
+assertEquals('"\\udc5d"', JSON.stringify('\uDC5D'));
+assertEquals('"\\udc5e"', JSON.stringify('\uDC5E'));
+assertEquals('"\\udc5f"', JSON.stringify('\uDC5F'));
+assertEquals('"\\udc60"', JSON.stringify('\uDC60'));
+assertEquals('"\\udc61"', JSON.stringify('\uDC61'));
+assertEquals('"\\udc62"', JSON.stringify('\uDC62'));
+assertEquals('"\\udc63"', JSON.stringify('\uDC63'));
+assertEquals('"\\udc64"', JSON.stringify('\uDC64'));
+assertEquals('"\\udc65"', JSON.stringify('\uDC65'));
+assertEquals('"\\udc66"', JSON.stringify('\uDC66'));
+assertEquals('"\\udc67"', JSON.stringify('\uDC67'));
+assertEquals('"\\udc68"', JSON.stringify('\uDC68'));
+assertEquals('"\\udc69"', JSON.stringify('\uDC69'));
+assertEquals('"\\udc6a"', JSON.stringify('\uDC6A'));
+assertEquals('"\\udc6b"', JSON.stringify('\uDC6B'));
+assertEquals('"\\udc6c"', JSON.stringify('\uDC6C'));
+assertEquals('"\\udc6d"', JSON.stringify('\uDC6D'));
+assertEquals('"\\udc6e"', JSON.stringify('\uDC6E'));
+assertEquals('"\\udc6f"', JSON.stringify('\uDC6F'));
+assertEquals('"\\udc70"', JSON.stringify('\uDC70'));
+assertEquals('"\\udc71"', JSON.stringify('\uDC71'));
+assertEquals('"\\udc72"', JSON.stringify('\uDC72'));
+assertEquals('"\\udc73"', JSON.stringify('\uDC73'));
+assertEquals('"\\udc74"', JSON.stringify('\uDC74'));
+assertEquals('"\\udc75"', JSON.stringify('\uDC75'));
+assertEquals('"\\udc76"', JSON.stringify('\uDC76'));
+assertEquals('"\\udc77"', JSON.stringify('\uDC77'));
+assertEquals('"\\udc78"', JSON.stringify('\uDC78'));
+assertEquals('"\\udc79"', JSON.stringify('\uDC79'));
+assertEquals('"\\udc7a"', JSON.stringify('\uDC7A'));
+assertEquals('"\\udc7b"', JSON.stringify('\uDC7B'));
+assertEquals('"\\udc7c"', JSON.stringify('\uDC7C'));
+assertEquals('"\\udc7d"', JSON.stringify('\uDC7D'));
+assertEquals('"\\udc7e"', JSON.stringify('\uDC7E'));
+assertEquals('"\\udc7f"', JSON.stringify('\uDC7F'));
+assertEquals('"\\udc80"', JSON.stringify('\uDC80'));
+assertEquals('"\\udc81"', JSON.stringify('\uDC81'));
+assertEquals('"\\udc82"', JSON.stringify('\uDC82'));
+assertEquals('"\\udc83"', JSON.stringify('\uDC83'));
+assertEquals('"\\udc84"', JSON.stringify('\uDC84'));
+assertEquals('"\\udc85"', JSON.stringify('\uDC85'));
+assertEquals('"\\udc86"', JSON.stringify('\uDC86'));
+assertEquals('"\\udc87"', JSON.stringify('\uDC87'));
+assertEquals('"\\udc88"', JSON.stringify('\uDC88'));
+assertEquals('"\\udc89"', JSON.stringify('\uDC89'));
+assertEquals('"\\udc8a"', JSON.stringify('\uDC8A'));
+assertEquals('"\\udc8b"', JSON.stringify('\uDC8B'));
+assertEquals('"\\udc8c"', JSON.stringify('\uDC8C'));
+assertEquals('"\\udc8d"', JSON.stringify('\uDC8D'));
+assertEquals('"\\udc8e"', JSON.stringify('\uDC8E'));
+assertEquals('"\\udc8f"', JSON.stringify('\uDC8F'));
+assertEquals('"\\udc90"', JSON.stringify('\uDC90'));
+assertEquals('"\\udc91"', JSON.stringify('\uDC91'));
+assertEquals('"\\udc92"', JSON.stringify('\uDC92'));
+assertEquals('"\\udc93"', JSON.stringify('\uDC93'));
+assertEquals('"\\udc94"', JSON.stringify('\uDC94'));
+assertEquals('"\\udc95"', JSON.stringify('\uDC95'));
+assertEquals('"\\udc96"', JSON.stringify('\uDC96'));
+assertEquals('"\\udc97"', JSON.stringify('\uDC97'));
+assertEquals('"\\udc98"', JSON.stringify('\uDC98'));
+assertEquals('"\\udc99"', JSON.stringify('\uDC99'));
+assertEquals('"\\udc9a"', JSON.stringify('\uDC9A'));
+assertEquals('"\\udc9b"', JSON.stringify('\uDC9B'));
+assertEquals('"\\udc9c"', JSON.stringify('\uDC9C'));
+assertEquals('"\\udc9d"', JSON.stringify('\uDC9D'));
+assertEquals('"\\udc9e"', JSON.stringify('\uDC9E'));
+assertEquals('"\\udc9f"', JSON.stringify('\uDC9F'));
+assertEquals('"\\udca0"', JSON.stringify('\uDCA0'));
+assertEquals('"\\udca1"', JSON.stringify('\uDCA1'));
+assertEquals('"\\udca2"', JSON.stringify('\uDCA2'));
+assertEquals('"\\udca3"', JSON.stringify('\uDCA3'));
+assertEquals('"\\udca4"', JSON.stringify('\uDCA4'));
+assertEquals('"\\udca5"', JSON.stringify('\uDCA5'));
+assertEquals('"\\udca6"', JSON.stringify('\uDCA6'));
+assertEquals('"\\udca7"', JSON.stringify('\uDCA7'));
+assertEquals('"\\udca8"', JSON.stringify('\uDCA8'));
+assertEquals('"\\udca9"', JSON.stringify('\uDCA9'));
+assertEquals('"\\udcaa"', JSON.stringify('\uDCAA'));
+assertEquals('"\\udcab"', JSON.stringify('\uDCAB'));
+assertEquals('"\\udcac"', JSON.stringify('\uDCAC'));
+assertEquals('"\\udcad"', JSON.stringify('\uDCAD'));
+assertEquals('"\\udcae"', JSON.stringify('\uDCAE'));
+assertEquals('"\\udcaf"', JSON.stringify('\uDCAF'));
+assertEquals('"\\udcb0"', JSON.stringify('\uDCB0'));
+assertEquals('"\\udcb1"', JSON.stringify('\uDCB1'));
+assertEquals('"\\udcb2"', JSON.stringify('\uDCB2'));
+assertEquals('"\\udcb3"', JSON.stringify('\uDCB3'));
+assertEquals('"\\udcb4"', JSON.stringify('\uDCB4'));
+assertEquals('"\\udcb5"', JSON.stringify('\uDCB5'));
+assertEquals('"\\udcb6"', JSON.stringify('\uDCB6'));
+assertEquals('"\\udcb7"', JSON.stringify('\uDCB7'));
+assertEquals('"\\udcb8"', JSON.stringify('\uDCB8'));
+assertEquals('"\\udcb9"', JSON.stringify('\uDCB9'));
+assertEquals('"\\udcba"', JSON.stringify('\uDCBA'));
+assertEquals('"\\udcbb"', JSON.stringify('\uDCBB'));
+assertEquals('"\\udcbc"', JSON.stringify('\uDCBC'));
+assertEquals('"\\udcbd"', JSON.stringify('\uDCBD'));
+assertEquals('"\\udcbe"', JSON.stringify('\uDCBE'));
+assertEquals('"\\udcbf"', JSON.stringify('\uDCBF'));
+assertEquals('"\\udcc0"', JSON.stringify('\uDCC0'));
+assertEquals('"\\udcc1"', JSON.stringify('\uDCC1'));
+assertEquals('"\\udcc2"', JSON.stringify('\uDCC2'));
+assertEquals('"\\udcc3"', JSON.stringify('\uDCC3'));
+assertEquals('"\\udcc4"', JSON.stringify('\uDCC4'));
+assertEquals('"\\udcc5"', JSON.stringify('\uDCC5'));
+assertEquals('"\\udcc6"', JSON.stringify('\uDCC6'));
+assertEquals('"\\udcc7"', JSON.stringify('\uDCC7'));
+assertEquals('"\\udcc8"', JSON.stringify('\uDCC8'));
+assertEquals('"\\udcc9"', JSON.stringify('\uDCC9'));
+assertEquals('"\\udcca"', JSON.stringify('\uDCCA'));
+assertEquals('"\\udccb"', JSON.stringify('\uDCCB'));
+assertEquals('"\\udccc"', JSON.stringify('\uDCCC'));
+assertEquals('"\\udccd"', JSON.stringify('\uDCCD'));
+assertEquals('"\\udcce"', JSON.stringify('\uDCCE'));
+assertEquals('"\\udccf"', JSON.stringify('\uDCCF'));
+assertEquals('"\\udcd0"', JSON.stringify('\uDCD0'));
+assertEquals('"\\udcd1"', JSON.stringify('\uDCD1'));
+assertEquals('"\\udcd2"', JSON.stringify('\uDCD2'));
+assertEquals('"\\udcd3"', JSON.stringify('\uDCD3'));
+assertEquals('"\\udcd4"', JSON.stringify('\uDCD4'));
+assertEquals('"\\udcd5"', JSON.stringify('\uDCD5'));
+assertEquals('"\\udcd6"', JSON.stringify('\uDCD6'));
+assertEquals('"\\udcd7"', JSON.stringify('\uDCD7'));
+assertEquals('"\\udcd8"', JSON.stringify('\uDCD8'));
+assertEquals('"\\udcd9"', JSON.stringify('\uDCD9'));
+assertEquals('"\\udcda"', JSON.stringify('\uDCDA'));
+assertEquals('"\\udcdb"', JSON.stringify('\uDCDB'));
+assertEquals('"\\udcdc"', JSON.stringify('\uDCDC'));
+assertEquals('"\\udcdd"', JSON.stringify('\uDCDD'));
+assertEquals('"\\udcde"', JSON.stringify('\uDCDE'));
+assertEquals('"\\udcdf"', JSON.stringify('\uDCDF'));
+assertEquals('"\\udce0"', JSON.stringify('\uDCE0'));
+assertEquals('"\\udce1"', JSON.stringify('\uDCE1'));
+assertEquals('"\\udce2"', JSON.stringify('\uDCE2'));
+assertEquals('"\\udce3"', JSON.stringify('\uDCE3'));
+assertEquals('"\\udce4"', JSON.stringify('\uDCE4'));
+assertEquals('"\\udce5"', JSON.stringify('\uDCE5'));
+assertEquals('"\\udce6"', JSON.stringify('\uDCE6'));
+assertEquals('"\\udce7"', JSON.stringify('\uDCE7'));
+assertEquals('"\\udce8"', JSON.stringify('\uDCE8'));
+assertEquals('"\\udce9"', JSON.stringify('\uDCE9'));
+assertEquals('"\\udcea"', JSON.stringify('\uDCEA'));
+assertEquals('"\\udceb"', JSON.stringify('\uDCEB'));
+assertEquals('"\\udcec"', JSON.stringify('\uDCEC'));
+assertEquals('"\\udced"', JSON.stringify('\uDCED'));
+assertEquals('"\\udcee"', JSON.stringify('\uDCEE'));
+assertEquals('"\\udcef"', JSON.stringify('\uDCEF'));
+assertEquals('"\\udcf0"', JSON.stringify('\uDCF0'));
+assertEquals('"\\udcf1"', JSON.stringify('\uDCF1'));
+assertEquals('"\\udcf2"', JSON.stringify('\uDCF2'));
+assertEquals('"\\udcf3"', JSON.stringify('\uDCF3'));
+assertEquals('"\\udcf4"', JSON.stringify('\uDCF4'));
+assertEquals('"\\udcf5"', JSON.stringify('\uDCF5'));
+assertEquals('"\\udcf6"', JSON.stringify('\uDCF6'));
+assertEquals('"\\udcf7"', JSON.stringify('\uDCF7'));
+assertEquals('"\\udcf8"', JSON.stringify('\uDCF8'));
+assertEquals('"\\udcf9"', JSON.stringify('\uDCF9'));
+assertEquals('"\\udcfa"', JSON.stringify('\uDCFA'));
+assertEquals('"\\udcfb"', JSON.stringify('\uDCFB'));
+assertEquals('"\\udcfc"', JSON.stringify('\uDCFC'));
+assertEquals('"\\udcfd"', JSON.stringify('\uDCFD'));
+assertEquals('"\\udcfe"', JSON.stringify('\uDCFE'));
+assertEquals('"\\udcff"', JSON.stringify('\uDCFF'));
+assertEquals('"\\udd00"', JSON.stringify('\uDD00'));
+assertEquals('"\\udd01"', JSON.stringify('\uDD01'));
+assertEquals('"\\udd02"', JSON.stringify('\uDD02'));
+assertEquals('"\\udd03"', JSON.stringify('\uDD03'));
+assertEquals('"\\udd04"', JSON.stringify('\uDD04'));
+assertEquals('"\\udd05"', JSON.stringify('\uDD05'));
+assertEquals('"\\udd06"', JSON.stringify('\uDD06'));
+assertEquals('"\\udd07"', JSON.stringify('\uDD07'));
+assertEquals('"\\udd08"', JSON.stringify('\uDD08'));
+assertEquals('"\\udd09"', JSON.stringify('\uDD09'));
+assertEquals('"\\udd0a"', JSON.stringify('\uDD0A'));
+assertEquals('"\\udd0b"', JSON.stringify('\uDD0B'));
+assertEquals('"\\udd0c"', JSON.stringify('\uDD0C'));
+assertEquals('"\\udd0d"', JSON.stringify('\uDD0D'));
+assertEquals('"\\udd0e"', JSON.stringify('\uDD0E'));
+assertEquals('"\\udd0f"', JSON.stringify('\uDD0F'));
+assertEquals('"\\udd10"', JSON.stringify('\uDD10'));
+assertEquals('"\\udd11"', JSON.stringify('\uDD11'));
+assertEquals('"\\udd12"', JSON.stringify('\uDD12'));
+assertEquals('"\\udd13"', JSON.stringify('\uDD13'));
+assertEquals('"\\udd14"', JSON.stringify('\uDD14'));
+assertEquals('"\\udd15"', JSON.stringify('\uDD15'));
+assertEquals('"\\udd16"', JSON.stringify('\uDD16'));
+assertEquals('"\\udd17"', JSON.stringify('\uDD17'));
+assertEquals('"\\udd18"', JSON.stringify('\uDD18'));
+assertEquals('"\\udd19"', JSON.stringify('\uDD19'));
+assertEquals('"\\udd1a"', JSON.stringify('\uDD1A'));
+assertEquals('"\\udd1b"', JSON.stringify('\uDD1B'));
+assertEquals('"\\udd1c"', JSON.stringify('\uDD1C'));
+assertEquals('"\\udd1d"', JSON.stringify('\uDD1D'));
+assertEquals('"\\udd1e"', JSON.stringify('\uDD1E'));
+assertEquals('"\\udd1f"', JSON.stringify('\uDD1F'));
+assertEquals('"\\udd20"', JSON.stringify('\uDD20'));
+assertEquals('"\\udd21"', JSON.stringify('\uDD21'));
+assertEquals('"\\udd22"', JSON.stringify('\uDD22'));
+assertEquals('"\\udd23"', JSON.stringify('\uDD23'));
+assertEquals('"\\udd24"', JSON.stringify('\uDD24'));
+assertEquals('"\\udd25"', JSON.stringify('\uDD25'));
+assertEquals('"\\udd26"', JSON.stringify('\uDD26'));
+assertEquals('"\\udd27"', JSON.stringify('\uDD27'));
+assertEquals('"\\udd28"', JSON.stringify('\uDD28'));
+assertEquals('"\\udd29"', JSON.stringify('\uDD29'));
+assertEquals('"\\udd2a"', JSON.stringify('\uDD2A'));
+assertEquals('"\\udd2b"', JSON.stringify('\uDD2B'));
+assertEquals('"\\udd2c"', JSON.stringify('\uDD2C'));
+assertEquals('"\\udd2d"', JSON.stringify('\uDD2D'));
+assertEquals('"\\udd2e"', JSON.stringify('\uDD2E'));
+assertEquals('"\\udd2f"', JSON.stringify('\uDD2F'));
+assertEquals('"\\udd30"', JSON.stringify('\uDD30'));
+assertEquals('"\\udd31"', JSON.stringify('\uDD31'));
+assertEquals('"\\udd32"', JSON.stringify('\uDD32'));
+assertEquals('"\\udd33"', JSON.stringify('\uDD33'));
+assertEquals('"\\udd34"', JSON.stringify('\uDD34'));
+assertEquals('"\\udd35"', JSON.stringify('\uDD35'));
+assertEquals('"\\udd36"', JSON.stringify('\uDD36'));
+assertEquals('"\\udd37"', JSON.stringify('\uDD37'));
+assertEquals('"\\udd38"', JSON.stringify('\uDD38'));
+assertEquals('"\\udd39"', JSON.stringify('\uDD39'));
+assertEquals('"\\udd3a"', JSON.stringify('\uDD3A'));
+assertEquals('"\\udd3b"', JSON.stringify('\uDD3B'));
+assertEquals('"\\udd3c"', JSON.stringify('\uDD3C'));
+assertEquals('"\\udd3d"', JSON.stringify('\uDD3D'));
+assertEquals('"\\udd3e"', JSON.stringify('\uDD3E'));
+assertEquals('"\\udd3f"', JSON.stringify('\uDD3F'));
+assertEquals('"\\udd40"', JSON.stringify('\uDD40'));
+assertEquals('"\\udd41"', JSON.stringify('\uDD41'));
+assertEquals('"\\udd42"', JSON.stringify('\uDD42'));
+assertEquals('"\\udd43"', JSON.stringify('\uDD43'));
+assertEquals('"\\udd44"', JSON.stringify('\uDD44'));
+assertEquals('"\\udd45"', JSON.stringify('\uDD45'));
+assertEquals('"\\udd46"', JSON.stringify('\uDD46'));
+assertEquals('"\\udd47"', JSON.stringify('\uDD47'));
+assertEquals('"\\udd48"', JSON.stringify('\uDD48'));
+assertEquals('"\\udd49"', JSON.stringify('\uDD49'));
+assertEquals('"\\udd4a"', JSON.stringify('\uDD4A'));
+assertEquals('"\\udd4b"', JSON.stringify('\uDD4B'));
+assertEquals('"\\udd4c"', JSON.stringify('\uDD4C'));
+assertEquals('"\\udd4d"', JSON.stringify('\uDD4D'));
+assertEquals('"\\udd4e"', JSON.stringify('\uDD4E'));
+assertEquals('"\\udd4f"', JSON.stringify('\uDD4F'));
+assertEquals('"\\udd50"', JSON.stringify('\uDD50'));
+assertEquals('"\\udd51"', JSON.stringify('\uDD51'));
+assertEquals('"\\udd52"', JSON.stringify('\uDD52'));
+assertEquals('"\\udd53"', JSON.stringify('\uDD53'));
+assertEquals('"\\udd54"', JSON.stringify('\uDD54'));
+assertEquals('"\\udd55"', JSON.stringify('\uDD55'));
+assertEquals('"\\udd56"', JSON.stringify('\uDD56'));
+assertEquals('"\\udd57"', JSON.stringify('\uDD57'));
+assertEquals('"\\udd58"', JSON.stringify('\uDD58'));
+assertEquals('"\\udd59"', JSON.stringify('\uDD59'));
+assertEquals('"\\udd5a"', JSON.stringify('\uDD5A'));
+assertEquals('"\\udd5b"', JSON.stringify('\uDD5B'));
+assertEquals('"\\udd5c"', JSON.stringify('\uDD5C'));
+assertEquals('"\\udd5d"', JSON.stringify('\uDD5D'));
+assertEquals('"\\udd5e"', JSON.stringify('\uDD5E'));
+assertEquals('"\\udd5f"', JSON.stringify('\uDD5F'));
+assertEquals('"\\udd60"', JSON.stringify('\uDD60'));
+assertEquals('"\\udd61"', JSON.stringify('\uDD61'));
+assertEquals('"\\udd62"', JSON.stringify('\uDD62'));
+assertEquals('"\\udd63"', JSON.stringify('\uDD63'));
+assertEquals('"\\udd64"', JSON.stringify('\uDD64'));
+assertEquals('"\\udd65"', JSON.stringify('\uDD65'));
+assertEquals('"\\udd66"', JSON.stringify('\uDD66'));
+assertEquals('"\\udd67"', JSON.stringify('\uDD67'));
+assertEquals('"\\udd68"', JSON.stringify('\uDD68'));
+assertEquals('"\\udd69"', JSON.stringify('\uDD69'));
+assertEquals('"\\udd6a"', JSON.stringify('\uDD6A'));
+assertEquals('"\\udd6b"', JSON.stringify('\uDD6B'));
+assertEquals('"\\udd6c"', JSON.stringify('\uDD6C'));
+assertEquals('"\\udd6d"', JSON.stringify('\uDD6D'));
+assertEquals('"\\udd6e"', JSON.stringify('\uDD6E'));
+assertEquals('"\\udd6f"', JSON.stringify('\uDD6F'));
+assertEquals('"\\udd70"', JSON.stringify('\uDD70'));
+assertEquals('"\\udd71"', JSON.stringify('\uDD71'));
+assertEquals('"\\udd72"', JSON.stringify('\uDD72'));
+assertEquals('"\\udd73"', JSON.stringify('\uDD73'));
+assertEquals('"\\udd74"', JSON.stringify('\uDD74'));
+assertEquals('"\\udd75"', JSON.stringify('\uDD75'));
+assertEquals('"\\udd76"', JSON.stringify('\uDD76'));
+assertEquals('"\\udd77"', JSON.stringify('\uDD77'));
+assertEquals('"\\udd78"', JSON.stringify('\uDD78'));
+assertEquals('"\\udd79"', JSON.stringify('\uDD79'));
+assertEquals('"\\udd7a"', JSON.stringify('\uDD7A'));
+assertEquals('"\\udd7b"', JSON.stringify('\uDD7B'));
+assertEquals('"\\udd7c"', JSON.stringify('\uDD7C'));
+assertEquals('"\\udd7d"', JSON.stringify('\uDD7D'));
+assertEquals('"\\udd7e"', JSON.stringify('\uDD7E'));
+assertEquals('"\\udd7f"', JSON.stringify('\uDD7F'));
+assertEquals('"\\udd80"', JSON.stringify('\uDD80'));
+assertEquals('"\\udd81"', JSON.stringify('\uDD81'));
+assertEquals('"\\udd82"', JSON.stringify('\uDD82'));
+assertEquals('"\\udd83"', JSON.stringify('\uDD83'));
+assertEquals('"\\udd84"', JSON.stringify('\uDD84'));
+assertEquals('"\\udd85"', JSON.stringify('\uDD85'));
+assertEquals('"\\udd86"', JSON.stringify('\uDD86'));
+assertEquals('"\\udd87"', JSON.stringify('\uDD87'));
+assertEquals('"\\udd88"', JSON.stringify('\uDD88'));
+assertEquals('"\\udd89"', JSON.stringify('\uDD89'));
+assertEquals('"\\udd8a"', JSON.stringify('\uDD8A'));
+assertEquals('"\\udd8b"', JSON.stringify('\uDD8B'));
+assertEquals('"\\udd8c"', JSON.stringify('\uDD8C'));
+assertEquals('"\\udd8d"', JSON.stringify('\uDD8D'));
+assertEquals('"\\udd8e"', JSON.stringify('\uDD8E'));
+assertEquals('"\\udd8f"', JSON.stringify('\uDD8F'));
+assertEquals('"\\udd90"', JSON.stringify('\uDD90'));
+assertEquals('"\\udd91"', JSON.stringify('\uDD91'));
+assertEquals('"\\udd92"', JSON.stringify('\uDD92'));
+assertEquals('"\\udd93"', JSON.stringify('\uDD93'));
+assertEquals('"\\udd94"', JSON.stringify('\uDD94'));
+assertEquals('"\\udd95"', JSON.stringify('\uDD95'));
+assertEquals('"\\udd96"', JSON.stringify('\uDD96'));
+assertEquals('"\\udd97"', JSON.stringify('\uDD97'));
+assertEquals('"\\udd98"', JSON.stringify('\uDD98'));
+assertEquals('"\\udd99"', JSON.stringify('\uDD99'));
+assertEquals('"\\udd9a"', JSON.stringify('\uDD9A'));
+assertEquals('"\\udd9b"', JSON.stringify('\uDD9B'));
+assertEquals('"\\udd9c"', JSON.stringify('\uDD9C'));
+assertEquals('"\\udd9d"', JSON.stringify('\uDD9D'));
+assertEquals('"\\udd9e"', JSON.stringify('\uDD9E'));
+assertEquals('"\\udd9f"', JSON.stringify('\uDD9F'));
+assertEquals('"\\udda0"', JSON.stringify('\uDDA0'));
+assertEquals('"\\udda1"', JSON.stringify('\uDDA1'));
+assertEquals('"\\udda2"', JSON.stringify('\uDDA2'));
+assertEquals('"\\udda3"', JSON.stringify('\uDDA3'));
+assertEquals('"\\udda4"', JSON.stringify('\uDDA4'));
+assertEquals('"\\udda5"', JSON.stringify('\uDDA5'));
+assertEquals('"\\udda6"', JSON.stringify('\uDDA6'));
+assertEquals('"\\udda7"', JSON.stringify('\uDDA7'));
+assertEquals('"\\udda8"', JSON.stringify('\uDDA8'));
+assertEquals('"\\udda9"', JSON.stringify('\uDDA9'));
+assertEquals('"\\uddaa"', JSON.stringify('\uDDAA'));
+assertEquals('"\\uddab"', JSON.stringify('\uDDAB'));
+assertEquals('"\\uddac"', JSON.stringify('\uDDAC'));
+assertEquals('"\\uddad"', JSON.stringify('\uDDAD'));
+assertEquals('"\\uddae"', JSON.stringify('\uDDAE'));
+assertEquals('"\\uddaf"', JSON.stringify('\uDDAF'));
+assertEquals('"\\uddb0"', JSON.stringify('\uDDB0'));
+assertEquals('"\\uddb1"', JSON.stringify('\uDDB1'));
+assertEquals('"\\uddb2"', JSON.stringify('\uDDB2'));
+assertEquals('"\\uddb3"', JSON.stringify('\uDDB3'));
+assertEquals('"\\uddb4"', JSON.stringify('\uDDB4'));
+assertEquals('"\\uddb5"', JSON.stringify('\uDDB5'));
+assertEquals('"\\uddb6"', JSON.stringify('\uDDB6'));
+assertEquals('"\\uddb7"', JSON.stringify('\uDDB7'));
+assertEquals('"\\uddb8"', JSON.stringify('\uDDB8'));
+assertEquals('"\\uddb9"', JSON.stringify('\uDDB9'));
+assertEquals('"\\uddba"', JSON.stringify('\uDDBA'));
+assertEquals('"\\uddbb"', JSON.stringify('\uDDBB'));
+assertEquals('"\\uddbc"', JSON.stringify('\uDDBC'));
+assertEquals('"\\uddbd"', JSON.stringify('\uDDBD'));
+assertEquals('"\\uddbe"', JSON.stringify('\uDDBE'));
+assertEquals('"\\uddbf"', JSON.stringify('\uDDBF'));
+assertEquals('"\\uddc0"', JSON.stringify('\uDDC0'));
+assertEquals('"\\uddc1"', JSON.stringify('\uDDC1'));
+assertEquals('"\\uddc2"', JSON.stringify('\uDDC2'));
+assertEquals('"\\uddc3"', JSON.stringify('\uDDC3'));
+assertEquals('"\\uddc4"', JSON.stringify('\uDDC4'));
+assertEquals('"\\uddc5"', JSON.stringify('\uDDC5'));
+assertEquals('"\\uddc6"', JSON.stringify('\uDDC6'));
+assertEquals('"\\uddc7"', JSON.stringify('\uDDC7'));
+assertEquals('"\\uddc8"', JSON.stringify('\uDDC8'));
+assertEquals('"\\uddc9"', JSON.stringify('\uDDC9'));
+assertEquals('"\\uddca"', JSON.stringify('\uDDCA'));
+assertEquals('"\\uddcb"', JSON.stringify('\uDDCB'));
+assertEquals('"\\uddcc"', JSON.stringify('\uDDCC'));
+assertEquals('"\\uddcd"', JSON.stringify('\uDDCD'));
+assertEquals('"\\uddce"', JSON.stringify('\uDDCE'));
+assertEquals('"\\uddcf"', JSON.stringify('\uDDCF'));
+assertEquals('"\\uddd0"', JSON.stringify('\uDDD0'));
+assertEquals('"\\uddd1"', JSON.stringify('\uDDD1'));
+assertEquals('"\\uddd2"', JSON.stringify('\uDDD2'));
+assertEquals('"\\uddd3"', JSON.stringify('\uDDD3'));
+assertEquals('"\\uddd4"', JSON.stringify('\uDDD4'));
+assertEquals('"\\uddd5"', JSON.stringify('\uDDD5'));
+assertEquals('"\\uddd6"', JSON.stringify('\uDDD6'));
+assertEquals('"\\uddd7"', JSON.stringify('\uDDD7'));
+assertEquals('"\\uddd8"', JSON.stringify('\uDDD8'));
+assertEquals('"\\uddd9"', JSON.stringify('\uDDD9'));
+assertEquals('"\\uddda"', JSON.stringify('\uDDDA'));
+assertEquals('"\\udddb"', JSON.stringify('\uDDDB'));
+assertEquals('"\\udddc"', JSON.stringify('\uDDDC'));
+assertEquals('"\\udddd"', JSON.stringify('\uDDDD'));
+assertEquals('"\\uddde"', JSON.stringify('\uDDDE'));
+assertEquals('"\\udddf"', JSON.stringify('\uDDDF'));
+assertEquals('"\\udde0"', JSON.stringify('\uDDE0'));
+assertEquals('"\\udde1"', JSON.stringify('\uDDE1'));
+assertEquals('"\\udde2"', JSON.stringify('\uDDE2'));
+assertEquals('"\\udde3"', JSON.stringify('\uDDE3'));
+assertEquals('"\\udde4"', JSON.stringify('\uDDE4'));
+assertEquals('"\\udde5"', JSON.stringify('\uDDE5'));
+assertEquals('"\\udde6"', JSON.stringify('\uDDE6'));
+assertEquals('"\\udde7"', JSON.stringify('\uDDE7'));
+assertEquals('"\\udde8"', JSON.stringify('\uDDE8'));
+assertEquals('"\\udde9"', JSON.stringify('\uDDE9'));
+assertEquals('"\\uddea"', JSON.stringify('\uDDEA'));
+assertEquals('"\\uddeb"', JSON.stringify('\uDDEB'));
+assertEquals('"\\uddec"', JSON.stringify('\uDDEC'));
+assertEquals('"\\udded"', JSON.stringify('\uDDED'));
+assertEquals('"\\uddee"', JSON.stringify('\uDDEE'));
+assertEquals('"\\uddef"', JSON.stringify('\uDDEF'));
+assertEquals('"\\uddf0"', JSON.stringify('\uDDF0'));
+assertEquals('"\\uddf1"', JSON.stringify('\uDDF1'));
+assertEquals('"\\uddf2"', JSON.stringify('\uDDF2'));
+assertEquals('"\\uddf3"', JSON.stringify('\uDDF3'));
+assertEquals('"\\uddf4"', JSON.stringify('\uDDF4'));
+assertEquals('"\\uddf5"', JSON.stringify('\uDDF5'));
+assertEquals('"\\uddf6"', JSON.stringify('\uDDF6'));
+assertEquals('"\\uddf7"', JSON.stringify('\uDDF7'));
+assertEquals('"\\uddf8"', JSON.stringify('\uDDF8'));
+assertEquals('"\\uddf9"', JSON.stringify('\uDDF9'));
+assertEquals('"\\uddfa"', JSON.stringify('\uDDFA'));
+assertEquals('"\\uddfb"', JSON.stringify('\uDDFB'));
+assertEquals('"\\uddfc"', JSON.stringify('\uDDFC'));
+assertEquals('"\\uddfd"', JSON.stringify('\uDDFD'));
+assertEquals('"\\uddfe"', JSON.stringify('\uDDFE'));
+assertEquals('"\\uddff"', JSON.stringify('\uDDFF'));
+assertEquals('"\\ude00"', JSON.stringify('\uDE00'));
+assertEquals('"\\ude01"', JSON.stringify('\uDE01'));
+assertEquals('"\\ude02"', JSON.stringify('\uDE02'));
+assertEquals('"\\ude03"', JSON.stringify('\uDE03'));
+assertEquals('"\\ude04"', JSON.stringify('\uDE04'));
+assertEquals('"\\ude05"', JSON.stringify('\uDE05'));
+assertEquals('"\\ude06"', JSON.stringify('\uDE06'));
+assertEquals('"\\ude07"', JSON.stringify('\uDE07'));
+assertEquals('"\\ude08"', JSON.stringify('\uDE08'));
+assertEquals('"\\ude09"', JSON.stringify('\uDE09'));
+assertEquals('"\\ude0a"', JSON.stringify('\uDE0A'));
+assertEquals('"\\ude0b"', JSON.stringify('\uDE0B'));
+assertEquals('"\\ude0c"', JSON.stringify('\uDE0C'));
+assertEquals('"\\ude0d"', JSON.stringify('\uDE0D'));
+assertEquals('"\\ude0e"', JSON.stringify('\uDE0E'));
+assertEquals('"\\ude0f"', JSON.stringify('\uDE0F'));
+assertEquals('"\\ude10"', JSON.stringify('\uDE10'));
+assertEquals('"\\ude11"', JSON.stringify('\uDE11'));
+assertEquals('"\\ude12"', JSON.stringify('\uDE12'));
+assertEquals('"\\ude13"', JSON.stringify('\uDE13'));
+assertEquals('"\\ude14"', JSON.stringify('\uDE14'));
+assertEquals('"\\ude15"', JSON.stringify('\uDE15'));
+assertEquals('"\\ude16"', JSON.stringify('\uDE16'));
+assertEquals('"\\ude17"', JSON.stringify('\uDE17'));
+assertEquals('"\\ude18"', JSON.stringify('\uDE18'));
+assertEquals('"\\ude19"', JSON.stringify('\uDE19'));
+assertEquals('"\\ude1a"', JSON.stringify('\uDE1A'));
+assertEquals('"\\ude1b"', JSON.stringify('\uDE1B'));
+assertEquals('"\\ude1c"', JSON.stringify('\uDE1C'));
+assertEquals('"\\ude1d"', JSON.stringify('\uDE1D'));
+assertEquals('"\\ude1e"', JSON.stringify('\uDE1E'));
+assertEquals('"\\ude1f"', JSON.stringify('\uDE1F'));
+assertEquals('"\\ude20"', JSON.stringify('\uDE20'));
+assertEquals('"\\ude21"', JSON.stringify('\uDE21'));
+assertEquals('"\\ude22"', JSON.stringify('\uDE22'));
+assertEquals('"\\ude23"', JSON.stringify('\uDE23'));
+assertEquals('"\\ude24"', JSON.stringify('\uDE24'));
+assertEquals('"\\ude25"', JSON.stringify('\uDE25'));
+assertEquals('"\\ude26"', JSON.stringify('\uDE26'));
+assertEquals('"\\ude27"', JSON.stringify('\uDE27'));
+assertEquals('"\\ude28"', JSON.stringify('\uDE28'));
+assertEquals('"\\ude29"', JSON.stringify('\uDE29'));
+assertEquals('"\\ude2a"', JSON.stringify('\uDE2A'));
+assertEquals('"\\ude2b"', JSON.stringify('\uDE2B'));
+assertEquals('"\\ude2c"', JSON.stringify('\uDE2C'));
+assertEquals('"\\ude2d"', JSON.stringify('\uDE2D'));
+assertEquals('"\\ude2e"', JSON.stringify('\uDE2E'));
+assertEquals('"\\ude2f"', JSON.stringify('\uDE2F'));
+assertEquals('"\\ude30"', JSON.stringify('\uDE30'));
+assertEquals('"\\ude31"', JSON.stringify('\uDE31'));
+assertEquals('"\\ude32"', JSON.stringify('\uDE32'));
+assertEquals('"\\ude33"', JSON.stringify('\uDE33'));
+assertEquals('"\\ude34"', JSON.stringify('\uDE34'));
+assertEquals('"\\ude35"', JSON.stringify('\uDE35'));
+assertEquals('"\\ude36"', JSON.stringify('\uDE36'));
+assertEquals('"\\ude37"', JSON.stringify('\uDE37'));
+assertEquals('"\\ude38"', JSON.stringify('\uDE38'));
+assertEquals('"\\ude39"', JSON.stringify('\uDE39'));
+assertEquals('"\\ude3a"', JSON.stringify('\uDE3A'));
+assertEquals('"\\ude3b"', JSON.stringify('\uDE3B'));
+assertEquals('"\\ude3c"', JSON.stringify('\uDE3C'));
+assertEquals('"\\ude3d"', JSON.stringify('\uDE3D'));
+assertEquals('"\\ude3e"', JSON.stringify('\uDE3E'));
+assertEquals('"\\ude3f"', JSON.stringify('\uDE3F'));
+assertEquals('"\\ude40"', JSON.stringify('\uDE40'));
+assertEquals('"\\ude41"', JSON.stringify('\uDE41'));
+assertEquals('"\\ude42"', JSON.stringify('\uDE42'));
+assertEquals('"\\ude43"', JSON.stringify('\uDE43'));
+assertEquals('"\\ude44"', JSON.stringify('\uDE44'));
+assertEquals('"\\ude45"', JSON.stringify('\uDE45'));
+assertEquals('"\\ude46"', JSON.stringify('\uDE46'));
+assertEquals('"\\ude47"', JSON.stringify('\uDE47'));
+assertEquals('"\\ude48"', JSON.stringify('\uDE48'));
+assertEquals('"\\ude49"', JSON.stringify('\uDE49'));
+assertEquals('"\\ude4a"', JSON.stringify('\uDE4A'));
+assertEquals('"\\ude4b"', JSON.stringify('\uDE4B'));
+assertEquals('"\\ude4c"', JSON.stringify('\uDE4C'));
+assertEquals('"\\ude4d"', JSON.stringify('\uDE4D'));
+assertEquals('"\\ude4e"', JSON.stringify('\uDE4E'));
+assertEquals('"\\ude4f"', JSON.stringify('\uDE4F'));
+assertEquals('"\\ude50"', JSON.stringify('\uDE50'));
+assertEquals('"\\ude51"', JSON.stringify('\uDE51'));
+assertEquals('"\\ude52"', JSON.stringify('\uDE52'));
+assertEquals('"\\ude53"', JSON.stringify('\uDE53'));
+assertEquals('"\\ude54"', JSON.stringify('\uDE54'));
+assertEquals('"\\ude55"', JSON.stringify('\uDE55'));
+assertEquals('"\\ude56"', JSON.stringify('\uDE56'));
+assertEquals('"\\ude57"', JSON.stringify('\uDE57'));
+assertEquals('"\\ude58"', JSON.stringify('\uDE58'));
+assertEquals('"\\ude59"', JSON.stringify('\uDE59'));
+assertEquals('"\\ude5a"', JSON.stringify('\uDE5A'));
+assertEquals('"\\ude5b"', JSON.stringify('\uDE5B'));
+assertEquals('"\\ude5c"', JSON.stringify('\uDE5C'));
+assertEquals('"\\ude5d"', JSON.stringify('\uDE5D'));
+assertEquals('"\\ude5e"', JSON.stringify('\uDE5E'));
+assertEquals('"\\ude5f"', JSON.stringify('\uDE5F'));
+assertEquals('"\\ude60"', JSON.stringify('\uDE60'));
+assertEquals('"\\ude61"', JSON.stringify('\uDE61'));
+assertEquals('"\\ude62"', JSON.stringify('\uDE62'));
+assertEquals('"\\ude63"', JSON.stringify('\uDE63'));
+assertEquals('"\\ude64"', JSON.stringify('\uDE64'));
+assertEquals('"\\ude65"', JSON.stringify('\uDE65'));
+assertEquals('"\\ude66"', JSON.stringify('\uDE66'));
+assertEquals('"\\ude67"', JSON.stringify('\uDE67'));
+assertEquals('"\\ude68"', JSON.stringify('\uDE68'));
+assertEquals('"\\ude69"', JSON.stringify('\uDE69'));
+assertEquals('"\\ude6a"', JSON.stringify('\uDE6A'));
+assertEquals('"\\ude6b"', JSON.stringify('\uDE6B'));
+assertEquals('"\\ude6c"', JSON.stringify('\uDE6C'));
+assertEquals('"\\ude6d"', JSON.stringify('\uDE6D'));
+assertEquals('"\\ude6e"', JSON.stringify('\uDE6E'));
+assertEquals('"\\ude6f"', JSON.stringify('\uDE6F'));
+assertEquals('"\\ude70"', JSON.stringify('\uDE70'));
+assertEquals('"\\ude71"', JSON.stringify('\uDE71'));
+assertEquals('"\\ude72"', JSON.stringify('\uDE72'));
+assertEquals('"\\ude73"', JSON.stringify('\uDE73'));
+assertEquals('"\\ude74"', JSON.stringify('\uDE74'));
+assertEquals('"\\ude75"', JSON.stringify('\uDE75'));
+assertEquals('"\\ude76"', JSON.stringify('\uDE76'));
+assertEquals('"\\ude77"', JSON.stringify('\uDE77'));
+assertEquals('"\\ude78"', JSON.stringify('\uDE78'));
+assertEquals('"\\ude79"', JSON.stringify('\uDE79'));
+assertEquals('"\\ude7a"', JSON.stringify('\uDE7A'));
+assertEquals('"\\ude7b"', JSON.stringify('\uDE7B'));
+assertEquals('"\\ude7c"', JSON.stringify('\uDE7C'));
+assertEquals('"\\ude7d"', JSON.stringify('\uDE7D'));
+assertEquals('"\\ude7e"', JSON.stringify('\uDE7E'));
+assertEquals('"\\ude7f"', JSON.stringify('\uDE7F'));
+assertEquals('"\\ude80"', JSON.stringify('\uDE80'));
+assertEquals('"\\ude81"', JSON.stringify('\uDE81'));
+assertEquals('"\\ude82"', JSON.stringify('\uDE82'));
+assertEquals('"\\ude83"', JSON.stringify('\uDE83'));
+assertEquals('"\\ude84"', JSON.stringify('\uDE84'));
+assertEquals('"\\ude85"', JSON.stringify('\uDE85'));
+assertEquals('"\\ude86"', JSON.stringify('\uDE86'));
+assertEquals('"\\ude87"', JSON.stringify('\uDE87'));
+assertEquals('"\\ude88"', JSON.stringify('\uDE88'));
+assertEquals('"\\ude89"', JSON.stringify('\uDE89'));
+assertEquals('"\\ude8a"', JSON.stringify('\uDE8A'));
+assertEquals('"\\ude8b"', JSON.stringify('\uDE8B'));
+assertEquals('"\\ude8c"', JSON.stringify('\uDE8C'));
+assertEquals('"\\ude8d"', JSON.stringify('\uDE8D'));
+assertEquals('"\\ude8e"', JSON.stringify('\uDE8E'));
+assertEquals('"\\ude8f"', JSON.stringify('\uDE8F'));
+assertEquals('"\\ude90"', JSON.stringify('\uDE90'));
+assertEquals('"\\ude91"', JSON.stringify('\uDE91'));
+assertEquals('"\\ude92"', JSON.stringify('\uDE92'));
+assertEquals('"\\ude93"', JSON.stringify('\uDE93'));
+assertEquals('"\\ude94"', JSON.stringify('\uDE94'));
+assertEquals('"\\ude95"', JSON.stringify('\uDE95'));
+assertEquals('"\\ude96"', JSON.stringify('\uDE96'));
+assertEquals('"\\ude97"', JSON.stringify('\uDE97'));
+assertEquals('"\\ude98"', JSON.stringify('\uDE98'));
+assertEquals('"\\ude99"', JSON.stringify('\uDE99'));
+assertEquals('"\\ude9a"', JSON.stringify('\uDE9A'));
+assertEquals('"\\ude9b"', JSON.stringify('\uDE9B'));
+assertEquals('"\\ude9c"', JSON.stringify('\uDE9C'));
+assertEquals('"\\ude9d"', JSON.stringify('\uDE9D'));
+assertEquals('"\\ude9e"', JSON.stringify('\uDE9E'));
+assertEquals('"\\ude9f"', JSON.stringify('\uDE9F'));
+assertEquals('"\\udea0"', JSON.stringify('\uDEA0'));
+assertEquals('"\\udea1"', JSON.stringify('\uDEA1'));
+assertEquals('"\\udea2"', JSON.stringify('\uDEA2'));
+assertEquals('"\\udea3"', JSON.stringify('\uDEA3'));
+assertEquals('"\\udea4"', JSON.stringify('\uDEA4'));
+assertEquals('"\\udea5"', JSON.stringify('\uDEA5'));
+assertEquals('"\\udea6"', JSON.stringify('\uDEA6'));
+assertEquals('"\\udea7"', JSON.stringify('\uDEA7'));
+assertEquals('"\\udea8"', JSON.stringify('\uDEA8'));
+assertEquals('"\\udea9"', JSON.stringify('\uDEA9'));
+assertEquals('"\\udeaa"', JSON.stringify('\uDEAA'));
+assertEquals('"\\udeab"', JSON.stringify('\uDEAB'));
+assertEquals('"\\udeac"', JSON.stringify('\uDEAC'));
+assertEquals('"\\udead"', JSON.stringify('\uDEAD'));
+assertEquals('"\\udeae"', JSON.stringify('\uDEAE'));
+assertEquals('"\\udeaf"', JSON.stringify('\uDEAF'));
+assertEquals('"\\udeb0"', JSON.stringify('\uDEB0'));
+assertEquals('"\\udeb1"', JSON.stringify('\uDEB1'));
+assertEquals('"\\udeb2"', JSON.stringify('\uDEB2'));
+assertEquals('"\\udeb3"', JSON.stringify('\uDEB3'));
+assertEquals('"\\udeb4"', JSON.stringify('\uDEB4'));
+assertEquals('"\\udeb5"', JSON.stringify('\uDEB5'));
+assertEquals('"\\udeb6"', JSON.stringify('\uDEB6'));
+assertEquals('"\\udeb7"', JSON.stringify('\uDEB7'));
+assertEquals('"\\udeb8"', JSON.stringify('\uDEB8'));
+assertEquals('"\\udeb9"', JSON.stringify('\uDEB9'));
+assertEquals('"\\udeba"', JSON.stringify('\uDEBA'));
+assertEquals('"\\udebb"', JSON.stringify('\uDEBB'));
+assertEquals('"\\udebc"', JSON.stringify('\uDEBC'));
+assertEquals('"\\udebd"', JSON.stringify('\uDEBD'));
+assertEquals('"\\udebe"', JSON.stringify('\uDEBE'));
+assertEquals('"\\udebf"', JSON.stringify('\uDEBF'));
+assertEquals('"\\udec0"', JSON.stringify('\uDEC0'));
+assertEquals('"\\udec1"', JSON.stringify('\uDEC1'));
+assertEquals('"\\udec2"', JSON.stringify('\uDEC2'));
+assertEquals('"\\udec3"', JSON.stringify('\uDEC3'));
+assertEquals('"\\udec4"', JSON.stringify('\uDEC4'));
+assertEquals('"\\udec5"', JSON.stringify('\uDEC5'));
+assertEquals('"\\udec6"', JSON.stringify('\uDEC6'));
+assertEquals('"\\udec7"', JSON.stringify('\uDEC7'));
+assertEquals('"\\udec8"', JSON.stringify('\uDEC8'));
+assertEquals('"\\udec9"', JSON.stringify('\uDEC9'));
+assertEquals('"\\udeca"', JSON.stringify('\uDECA'));
+assertEquals('"\\udecb"', JSON.stringify('\uDECB'));
+assertEquals('"\\udecc"', JSON.stringify('\uDECC'));
+assertEquals('"\\udecd"', JSON.stringify('\uDECD'));
+assertEquals('"\\udece"', JSON.stringify('\uDECE'));
+assertEquals('"\\udecf"', JSON.stringify('\uDECF'));
+assertEquals('"\\uded0"', JSON.stringify('\uDED0'));
+assertEquals('"\\uded1"', JSON.stringify('\uDED1'));
+assertEquals('"\\uded2"', JSON.stringify('\uDED2'));
+assertEquals('"\\uded3"', JSON.stringify('\uDED3'));
+assertEquals('"\\uded4"', JSON.stringify('\uDED4'));
+assertEquals('"\\uded5"', JSON.stringify('\uDED5'));
+assertEquals('"\\uded6"', JSON.stringify('\uDED6'));
+assertEquals('"\\uded7"', JSON.stringify('\uDED7'));
+assertEquals('"\\uded8"', JSON.stringify('\uDED8'));
+assertEquals('"\\uded9"', JSON.stringify('\uDED9'));
+assertEquals('"\\udeda"', JSON.stringify('\uDEDA'));
+assertEquals('"\\udedb"', JSON.stringify('\uDEDB'));
+assertEquals('"\\udedc"', JSON.stringify('\uDEDC'));
+assertEquals('"\\udedd"', JSON.stringify('\uDEDD'));
+assertEquals('"\\udede"', JSON.stringify('\uDEDE'));
+assertEquals('"\\udedf"', JSON.stringify('\uDEDF'));
+assertEquals('"\\udee0"', JSON.stringify('\uDEE0'));
+assertEquals('"\\udee1"', JSON.stringify('\uDEE1'));
+assertEquals('"\\udee2"', JSON.stringify('\uDEE2'));
+assertEquals('"\\udee3"', JSON.stringify('\uDEE3'));
+assertEquals('"\\udee4"', JSON.stringify('\uDEE4'));
+assertEquals('"\\udee5"', JSON.stringify('\uDEE5'));
+assertEquals('"\\udee6"', JSON.stringify('\uDEE6'));
+assertEquals('"\\udee7"', JSON.stringify('\uDEE7'));
+assertEquals('"\\udee8"', JSON.stringify('\uDEE8'));
+assertEquals('"\\udee9"', JSON.stringify('\uDEE9'));
+assertEquals('"\\udeea"', JSON.stringify('\uDEEA'));
+assertEquals('"\\udeeb"', JSON.stringify('\uDEEB'));
+assertEquals('"\\udeec"', JSON.stringify('\uDEEC'));
+assertEquals('"\\udeed"', JSON.stringify('\uDEED'));
+assertEquals('"\\udeee"', JSON.stringify('\uDEEE'));
+assertEquals('"\\udeef"', JSON.stringify('\uDEEF'));
+assertEquals('"\\udef0"', JSON.stringify('\uDEF0'));
+assertEquals('"\\udef1"', JSON.stringify('\uDEF1'));
+assertEquals('"\\udef2"', JSON.stringify('\uDEF2'));
+assertEquals('"\\udef3"', JSON.stringify('\uDEF3'));
+assertEquals('"\\udef4"', JSON.stringify('\uDEF4'));
+assertEquals('"\\udef5"', JSON.stringify('\uDEF5'));
+assertEquals('"\\udef6"', JSON.stringify('\uDEF6'));
+assertEquals('"\\udef7"', JSON.stringify('\uDEF7'));
+assertEquals('"\\udef8"', JSON.stringify('\uDEF8'));
+assertEquals('"\\udef9"', JSON.stringify('\uDEF9'));
+assertEquals('"\\udefa"', JSON.stringify('\uDEFA'));
+assertEquals('"\\udefb"', JSON.stringify('\uDEFB'));
+assertEquals('"\\udefc"', JSON.stringify('\uDEFC'));
+assertEquals('"\\udefd"', JSON.stringify('\uDEFD'));
+assertEquals('"\\udefe"', JSON.stringify('\uDEFE'));
+assertEquals('"\\udeff"', JSON.stringify('\uDEFF'));
+assertEquals('"\\udf00"', JSON.stringify('\uDF00'));
+assertEquals('"\\udf01"', JSON.stringify('\uDF01'));
+assertEquals('"\\udf02"', JSON.stringify('\uDF02'));
+assertEquals('"\\udf03"', JSON.stringify('\uDF03'));
+assertEquals('"\\udf04"', JSON.stringify('\uDF04'));
+assertEquals('"\\udf05"', JSON.stringify('\uDF05'));
+assertEquals('"\\udf06"', JSON.stringify('\uDF06'));
+assertEquals('"\\udf07"', JSON.stringify('\uDF07'));
+assertEquals('"\\udf08"', JSON.stringify('\uDF08'));
+assertEquals('"\\udf09"', JSON.stringify('\uDF09'));
+assertEquals('"\\udf0a"', JSON.stringify('\uDF0A'));
+assertEquals('"\\udf0b"', JSON.stringify('\uDF0B'));
+assertEquals('"\\udf0c"', JSON.stringify('\uDF0C'));
+assertEquals('"\\udf0d"', JSON.stringify('\uDF0D'));
+assertEquals('"\\udf0e"', JSON.stringify('\uDF0E'));
+assertEquals('"\\udf0f"', JSON.stringify('\uDF0F'));
+assertEquals('"\\udf10"', JSON.stringify('\uDF10'));
+assertEquals('"\\udf11"', JSON.stringify('\uDF11'));
+assertEquals('"\\udf12"', JSON.stringify('\uDF12'));
+assertEquals('"\\udf13"', JSON.stringify('\uDF13'));
+assertEquals('"\\udf14"', JSON.stringify('\uDF14'));
+assertEquals('"\\udf15"', JSON.stringify('\uDF15'));
+assertEquals('"\\udf16"', JSON.stringify('\uDF16'));
+assertEquals('"\\udf17"', JSON.stringify('\uDF17'));
+assertEquals('"\\udf18"', JSON.stringify('\uDF18'));
+assertEquals('"\\udf19"', JSON.stringify('\uDF19'));
+assertEquals('"\\udf1a"', JSON.stringify('\uDF1A'));
+assertEquals('"\\udf1b"', JSON.stringify('\uDF1B'));
+assertEquals('"\\udf1c"', JSON.stringify('\uDF1C'));
+assertEquals('"\\udf1d"', JSON.stringify('\uDF1D'));
+assertEquals('"\\udf1e"', JSON.stringify('\uDF1E'));
+assertEquals('"\\udf1f"', JSON.stringify('\uDF1F'));
+assertEquals('"\\udf20"', JSON.stringify('\uDF20'));
+assertEquals('"\\udf21"', JSON.stringify('\uDF21'));
+assertEquals('"\\udf22"', JSON.stringify('\uDF22'));
+assertEquals('"\\udf23"', JSON.stringify('\uDF23'));
+assertEquals('"\\udf24"', JSON.stringify('\uDF24'));
+assertEquals('"\\udf25"', JSON.stringify('\uDF25'));
+assertEquals('"\\udf26"', JSON.stringify('\uDF26'));
+assertEquals('"\\udf27"', JSON.stringify('\uDF27'));
+assertEquals('"\\udf28"', JSON.stringify('\uDF28'));
+assertEquals('"\\udf29"', JSON.stringify('\uDF29'));
+assertEquals('"\\udf2a"', JSON.stringify('\uDF2A'));
+assertEquals('"\\udf2b"', JSON.stringify('\uDF2B'));
+assertEquals('"\\udf2c"', JSON.stringify('\uDF2C'));
+assertEquals('"\\udf2d"', JSON.stringify('\uDF2D'));
+assertEquals('"\\udf2e"', JSON.stringify('\uDF2E'));
+assertEquals('"\\udf2f"', JSON.stringify('\uDF2F'));
+assertEquals('"\\udf30"', JSON.stringify('\uDF30'));
+assertEquals('"\\udf31"', JSON.stringify('\uDF31'));
+assertEquals('"\\udf32"', JSON.stringify('\uDF32'));
+assertEquals('"\\udf33"', JSON.stringify('\uDF33'));
+assertEquals('"\\udf34"', JSON.stringify('\uDF34'));
+assertEquals('"\\udf35"', JSON.stringify('\uDF35'));
+assertEquals('"\\udf36"', JSON.stringify('\uDF36'));
+assertEquals('"\\udf37"', JSON.stringify('\uDF37'));
+assertEquals('"\\udf38"', JSON.stringify('\uDF38'));
+assertEquals('"\\udf39"', JSON.stringify('\uDF39'));
+assertEquals('"\\udf3a"', JSON.stringify('\uDF3A'));
+assertEquals('"\\udf3b"', JSON.stringify('\uDF3B'));
+assertEquals('"\\udf3c"', JSON.stringify('\uDF3C'));
+assertEquals('"\\udf3d"', JSON.stringify('\uDF3D'));
+assertEquals('"\\udf3e"', JSON.stringify('\uDF3E'));
+assertEquals('"\\udf3f"', JSON.stringify('\uDF3F'));
+assertEquals('"\\udf40"', JSON.stringify('\uDF40'));
+assertEquals('"\\udf41"', JSON.stringify('\uDF41'));
+assertEquals('"\\udf42"', JSON.stringify('\uDF42'));
+assertEquals('"\\udf43"', JSON.stringify('\uDF43'));
+assertEquals('"\\udf44"', JSON.stringify('\uDF44'));
+assertEquals('"\\udf45"', JSON.stringify('\uDF45'));
+assertEquals('"\\udf46"', JSON.stringify('\uDF46'));
+assertEquals('"\\udf47"', JSON.stringify('\uDF47'));
+assertEquals('"\\udf48"', JSON.stringify('\uDF48'));
+assertEquals('"\\udf49"', JSON.stringify('\uDF49'));
+assertEquals('"\\udf4a"', JSON.stringify('\uDF4A'));
+assertEquals('"\\udf4b"', JSON.stringify('\uDF4B'));
+assertEquals('"\\udf4c"', JSON.stringify('\uDF4C'));
+assertEquals('"\\udf4d"', JSON.stringify('\uDF4D'));
+assertEquals('"\\udf4e"', JSON.stringify('\uDF4E'));
+assertEquals('"\\udf4f"', JSON.stringify('\uDF4F'));
+assertEquals('"\\udf50"', JSON.stringify('\uDF50'));
+assertEquals('"\\udf51"', JSON.stringify('\uDF51'));
+assertEquals('"\\udf52"', JSON.stringify('\uDF52'));
+assertEquals('"\\udf53"', JSON.stringify('\uDF53'));
+assertEquals('"\\udf54"', JSON.stringify('\uDF54'));
+assertEquals('"\\udf55"', JSON.stringify('\uDF55'));
+assertEquals('"\\udf56"', JSON.stringify('\uDF56'));
+assertEquals('"\\udf57"', JSON.stringify('\uDF57'));
+assertEquals('"\\udf58"', JSON.stringify('\uDF58'));
+assertEquals('"\\udf59"', JSON.stringify('\uDF59'));
+assertEquals('"\\udf5a"', JSON.stringify('\uDF5A'));
+assertEquals('"\\udf5b"', JSON.stringify('\uDF5B'));
+assertEquals('"\\udf5c"', JSON.stringify('\uDF5C'));
+assertEquals('"\\udf5d"', JSON.stringify('\uDF5D'));
+assertEquals('"\\udf5e"', JSON.stringify('\uDF5E'));
+assertEquals('"\\udf5f"', JSON.stringify('\uDF5F'));
+assertEquals('"\\udf60"', JSON.stringify('\uDF60'));
+assertEquals('"\\udf61"', JSON.stringify('\uDF61'));
+assertEquals('"\\udf62"', JSON.stringify('\uDF62'));
+assertEquals('"\\udf63"', JSON.stringify('\uDF63'));
+assertEquals('"\\udf64"', JSON.stringify('\uDF64'));
+assertEquals('"\\udf65"', JSON.stringify('\uDF65'));
+assertEquals('"\\udf66"', JSON.stringify('\uDF66'));
+assertEquals('"\\udf67"', JSON.stringify('\uDF67'));
+assertEquals('"\\udf68"', JSON.stringify('\uDF68'));
+assertEquals('"\\udf69"', JSON.stringify('\uDF69'));
+assertEquals('"\\udf6a"', JSON.stringify('\uDF6A'));
+assertEquals('"\\udf6b"', JSON.stringify('\uDF6B'));
+assertEquals('"\\udf6c"', JSON.stringify('\uDF6C'));
+assertEquals('"\\udf6d"', JSON.stringify('\uDF6D'));
+assertEquals('"\\udf6e"', JSON.stringify('\uDF6E'));
+assertEquals('"\\udf6f"', JSON.stringify('\uDF6F'));
+assertEquals('"\\udf70"', JSON.stringify('\uDF70'));
+assertEquals('"\\udf71"', JSON.stringify('\uDF71'));
+assertEquals('"\\udf72"', JSON.stringify('\uDF72'));
+assertEquals('"\\udf73"', JSON.stringify('\uDF73'));
+assertEquals('"\\udf74"', JSON.stringify('\uDF74'));
+assertEquals('"\\udf75"', JSON.stringify('\uDF75'));
+assertEquals('"\\udf76"', JSON.stringify('\uDF76'));
+assertEquals('"\\udf77"', JSON.stringify('\uDF77'));
+assertEquals('"\\udf78"', JSON.stringify('\uDF78'));
+assertEquals('"\\udf79"', JSON.stringify('\uDF79'));
+assertEquals('"\\udf7a"', JSON.stringify('\uDF7A'));
+assertEquals('"\\udf7b"', JSON.stringify('\uDF7B'));
+assertEquals('"\\udf7c"', JSON.stringify('\uDF7C'));
+assertEquals('"\\udf7d"', JSON.stringify('\uDF7D'));
+assertEquals('"\\udf7e"', JSON.stringify('\uDF7E'));
+assertEquals('"\\udf7f"', JSON.stringify('\uDF7F'));
+assertEquals('"\\udf80"', JSON.stringify('\uDF80'));
+assertEquals('"\\udf81"', JSON.stringify('\uDF81'));
+assertEquals('"\\udf82"', JSON.stringify('\uDF82'));
+assertEquals('"\\udf83"', JSON.stringify('\uDF83'));
+assertEquals('"\\udf84"', JSON.stringify('\uDF84'));
+assertEquals('"\\udf85"', JSON.stringify('\uDF85'));
+assertEquals('"\\udf86"', JSON.stringify('\uDF86'));
+assertEquals('"\\udf87"', JSON.stringify('\uDF87'));
+assertEquals('"\\udf88"', JSON.stringify('\uDF88'));
+assertEquals('"\\udf89"', JSON.stringify('\uDF89'));
+assertEquals('"\\udf8a"', JSON.stringify('\uDF8A'));
+assertEquals('"\\udf8b"', JSON.stringify('\uDF8B'));
+assertEquals('"\\udf8c"', JSON.stringify('\uDF8C'));
+assertEquals('"\\udf8d"', JSON.stringify('\uDF8D'));
+assertEquals('"\\udf8e"', JSON.stringify('\uDF8E'));
+assertEquals('"\\udf8f"', JSON.stringify('\uDF8F'));
+assertEquals('"\\udf90"', JSON.stringify('\uDF90'));
+assertEquals('"\\udf91"', JSON.stringify('\uDF91'));
+assertEquals('"\\udf92"', JSON.stringify('\uDF92'));
+assertEquals('"\\udf93"', JSON.stringify('\uDF93'));
+assertEquals('"\\udf94"', JSON.stringify('\uDF94'));
+assertEquals('"\\udf95"', JSON.stringify('\uDF95'));
+assertEquals('"\\udf96"', JSON.stringify('\uDF96'));
+assertEquals('"\\udf97"', JSON.stringify('\uDF97'));
+assertEquals('"\\udf98"', JSON.stringify('\uDF98'));
+assertEquals('"\\udf99"', JSON.stringify('\uDF99'));
+assertEquals('"\\udf9a"', JSON.stringify('\uDF9A'));
+assertEquals('"\\udf9b"', JSON.stringify('\uDF9B'));
+assertEquals('"\\udf9c"', JSON.stringify('\uDF9C'));
+assertEquals('"\\udf9d"', JSON.stringify('\uDF9D'));
+assertEquals('"\\udf9e"', JSON.stringify('\uDF9E'));
+assertEquals('"\\udf9f"', JSON.stringify('\uDF9F'));
+assertEquals('"\\udfa0"', JSON.stringify('\uDFA0'));
+assertEquals('"\\udfa1"', JSON.stringify('\uDFA1'));
+assertEquals('"\\udfa2"', JSON.stringify('\uDFA2'));
+assertEquals('"\\udfa3"', JSON.stringify('\uDFA3'));
+assertEquals('"\\udfa4"', JSON.stringify('\uDFA4'));
+assertEquals('"\\udfa5"', JSON.stringify('\uDFA5'));
+assertEquals('"\\udfa6"', JSON.stringify('\uDFA6'));
+assertEquals('"\\udfa7"', JSON.stringify('\uDFA7'));
+assertEquals('"\\udfa8"', JSON.stringify('\uDFA8'));
+assertEquals('"\\udfa9"', JSON.stringify('\uDFA9'));
+assertEquals('"\\udfaa"', JSON.stringify('\uDFAA'));
+assertEquals('"\\udfab"', JSON.stringify('\uDFAB'));
+assertEquals('"\\udfac"', JSON.stringify('\uDFAC'));
+assertEquals('"\\udfad"', JSON.stringify('\uDFAD'));
+assertEquals('"\\udfae"', JSON.stringify('\uDFAE'));
+assertEquals('"\\udfaf"', JSON.stringify('\uDFAF'));
+assertEquals('"\\udfb0"', JSON.stringify('\uDFB0'));
+assertEquals('"\\udfb1"', JSON.stringify('\uDFB1'));
+assertEquals('"\\udfb2"', JSON.stringify('\uDFB2'));
+assertEquals('"\\udfb3"', JSON.stringify('\uDFB3'));
+assertEquals('"\\udfb4"', JSON.stringify('\uDFB4'));
+assertEquals('"\\udfb5"', JSON.stringify('\uDFB5'));
+assertEquals('"\\udfb6"', JSON.stringify('\uDFB6'));
+assertEquals('"\\udfb7"', JSON.stringify('\uDFB7'));
+assertEquals('"\\udfb8"', JSON.stringify('\uDFB8'));
+assertEquals('"\\udfb9"', JSON.stringify('\uDFB9'));
+assertEquals('"\\udfba"', JSON.stringify('\uDFBA'));
+assertEquals('"\\udfbb"', JSON.stringify('\uDFBB'));
+assertEquals('"\\udfbc"', JSON.stringify('\uDFBC'));
+assertEquals('"\\udfbd"', JSON.stringify('\uDFBD'));
+assertEquals('"\\udfbe"', JSON.stringify('\uDFBE'));
+assertEquals('"\\udfbf"', JSON.stringify('\uDFBF'));
+assertEquals('"\\udfc0"', JSON.stringify('\uDFC0'));
+assertEquals('"\\udfc1"', JSON.stringify('\uDFC1'));
+assertEquals('"\\udfc2"', JSON.stringify('\uDFC2'));
+assertEquals('"\\udfc3"', JSON.stringify('\uDFC3'));
+assertEquals('"\\udfc4"', JSON.stringify('\uDFC4'));
+assertEquals('"\\udfc5"', JSON.stringify('\uDFC5'));
+assertEquals('"\\udfc6"', JSON.stringify('\uDFC6'));
+assertEquals('"\\udfc7"', JSON.stringify('\uDFC7'));
+assertEquals('"\\udfc8"', JSON.stringify('\uDFC8'));
+assertEquals('"\\udfc9"', JSON.stringify('\uDFC9'));
+assertEquals('"\\udfca"', JSON.stringify('\uDFCA'));
+assertEquals('"\\udfcb"', JSON.stringify('\uDFCB'));
+assertEquals('"\\udfcc"', JSON.stringify('\uDFCC'));
+assertEquals('"\\udfcd"', JSON.stringify('\uDFCD'));
+assertEquals('"\\udfce"', JSON.stringify('\uDFCE'));
+assertEquals('"\\udfcf"', JSON.stringify('\uDFCF'));
+assertEquals('"\\udfd0"', JSON.stringify('\uDFD0'));
+assertEquals('"\\udfd1"', JSON.stringify('\uDFD1'));
+assertEquals('"\\udfd2"', JSON.stringify('\uDFD2'));
+assertEquals('"\\udfd3"', JSON.stringify('\uDFD3'));
+assertEquals('"\\udfd4"', JSON.stringify('\uDFD4'));
+assertEquals('"\\udfd5"', JSON.stringify('\uDFD5'));
+assertEquals('"\\udfd6"', JSON.stringify('\uDFD6'));
+assertEquals('"\\udfd7"', JSON.stringify('\uDFD7'));
+assertEquals('"\\udfd8"', JSON.stringify('\uDFD8'));
+assertEquals('"\\udfd9"', JSON.stringify('\uDFD9'));
+assertEquals('"\\udfda"', JSON.stringify('\uDFDA'));
+assertEquals('"\\udfdb"', JSON.stringify('\uDFDB'));
+assertEquals('"\\udfdc"', JSON.stringify('\uDFDC'));
+assertEquals('"\\udfdd"', JSON.stringify('\uDFDD'));
+assertEquals('"\\udfde"', JSON.stringify('\uDFDE'));
+assertEquals('"\\udfdf"', JSON.stringify('\uDFDF'));
+assertEquals('"\\udfe0"', JSON.stringify('\uDFE0'));
+assertEquals('"\\udfe1"', JSON.stringify('\uDFE1'));
+assertEquals('"\\udfe2"', JSON.stringify('\uDFE2'));
+assertEquals('"\\udfe3"', JSON.stringify('\uDFE3'));
+assertEquals('"\\udfe4"', JSON.stringify('\uDFE4'));
+assertEquals('"\\udfe5"', JSON.stringify('\uDFE5'));
+assertEquals('"\\udfe6"', JSON.stringify('\uDFE6'));
+assertEquals('"\\udfe7"', JSON.stringify('\uDFE7'));
+assertEquals('"\\udfe8"', JSON.stringify('\uDFE8'));
+assertEquals('"\\udfe9"', JSON.stringify('\uDFE9'));
+assertEquals('"\\udfea"', JSON.stringify('\uDFEA'));
+assertEquals('"\\udfeb"', JSON.stringify('\uDFEB'));
+assertEquals('"\\udfec"', JSON.stringify('\uDFEC'));
+assertEquals('"\\udfed"', JSON.stringify('\uDFED'));
+assertEquals('"\\udfee"', JSON.stringify('\uDFEE'));
+assertEquals('"\\udfef"', JSON.stringify('\uDFEF'));
+assertEquals('"\\udff0"', JSON.stringify('\uDFF0'));
+assertEquals('"\\udff1"', JSON.stringify('\uDFF1'));
+assertEquals('"\\udff2"', JSON.stringify('\uDFF2'));
+assertEquals('"\\udff3"', JSON.stringify('\uDFF3'));
+assertEquals('"\\udff4"', JSON.stringify('\uDFF4'));
+assertEquals('"\\udff5"', JSON.stringify('\uDFF5'));
+assertEquals('"\\udff6"', JSON.stringify('\uDFF6'));
+assertEquals('"\\udff7"', JSON.stringify('\uDFF7'));
+assertEquals('"\\udff8"', JSON.stringify('\uDFF8'));
+assertEquals('"\\udff9"', JSON.stringify('\uDFF9'));
+assertEquals('"\\udffa"', JSON.stringify('\uDFFA'));
+assertEquals('"\\udffb"', JSON.stringify('\uDFFB'));
+assertEquals('"\\udffc"', JSON.stringify('\uDFFC'));
+assertEquals('"\\udffd"', JSON.stringify('\uDFFD'));
+assertEquals('"\\udffe"', JSON.stringify('\uDFFE'));
+assertEquals('"\\udfff"', JSON.stringify('\uDFFF'));
+
+// A random selection of code points from U+E000 to U+FFFF.
+assertEquals('"\uE000"', JSON.stringify('\uE000'));
+assertEquals('"\uE00B"', JSON.stringify('\uE00B'));
+assertEquals('"\uE0CC"', JSON.stringify('\uE0CC'));
+assertEquals('"\uE0FD"', JSON.stringify('\uE0FD'));
+assertEquals('"\uE19E"', JSON.stringify('\uE19E'));
+assertEquals('"\uE1B1"', JSON.stringify('\uE1B1'));
+assertEquals('"\uE24F"', JSON.stringify('\uE24F'));
+assertEquals('"\uE262"', JSON.stringify('\uE262'));
+assertEquals('"\uE2C9"', JSON.stringify('\uE2C9'));
+assertEquals('"\uE2DF"', JSON.stringify('\uE2DF'));
+assertEquals('"\uE389"', JSON.stringify('\uE389'));
+assertEquals('"\uE413"', JSON.stringify('\uE413'));
+assertEquals('"\uE546"', JSON.stringify('\uE546'));
+assertEquals('"\uE5E4"', JSON.stringify('\uE5E4'));
+assertEquals('"\uE66B"', JSON.stringify('\uE66B'));
+assertEquals('"\uE73D"', JSON.stringify('\uE73D'));
+assertEquals('"\uE74F"', JSON.stringify('\uE74F'));
+assertEquals('"\uE759"', JSON.stringify('\uE759'));
+assertEquals('"\uE795"', JSON.stringify('\uE795'));
+assertEquals('"\uE836"', JSON.stringify('\uE836'));
+assertEquals('"\uE85D"', JSON.stringify('\uE85D'));
+assertEquals('"\uE909"', JSON.stringify('\uE909'));
+assertEquals('"\uE990"', JSON.stringify('\uE990'));
+assertEquals('"\uE99F"', JSON.stringify('\uE99F'));
+assertEquals('"\uE9AC"', JSON.stringify('\uE9AC'));
+assertEquals('"\uE9C2"', JSON.stringify('\uE9C2'));
+assertEquals('"\uEB11"', JSON.stringify('\uEB11'));
+assertEquals('"\uED33"', JSON.stringify('\uED33'));
+assertEquals('"\uED7D"', JSON.stringify('\uED7D'));
+assertEquals('"\uEDA9"', JSON.stringify('\uEDA9'));
+assertEquals('"\uEDFB"', JSON.stringify('\uEDFB'));
+assertEquals('"\uEE09"', JSON.stringify('\uEE09'));
+assertEquals('"\uEE0D"', JSON.stringify('\uEE0D'));
+assertEquals('"\uEE34"', JSON.stringify('\uEE34'));
+assertEquals('"\uEE37"', JSON.stringify('\uEE37'));
+assertEquals('"\uEE38"', JSON.stringify('\uEE38'));
+assertEquals('"\uEF80"', JSON.stringify('\uEF80'));
+assertEquals('"\uEFE2"', JSON.stringify('\uEFE2'));
+assertEquals('"\uF02C"', JSON.stringify('\uF02C'));
+assertEquals('"\uF09A"', JSON.stringify('\uF09A'));
+assertEquals('"\uF0C1"', JSON.stringify('\uF0C1'));
+assertEquals('"\uF12C"', JSON.stringify('\uF12C'));
+assertEquals('"\uF250"', JSON.stringify('\uF250'));
+assertEquals('"\uF2A3"', JSON.stringify('\uF2A3'));
+assertEquals('"\uF340"', JSON.stringify('\uF340'));
+assertEquals('"\uF3C9"', JSON.stringify('\uF3C9'));
+assertEquals('"\uF3F5"', JSON.stringify('\uF3F5'));
+assertEquals('"\uF41B"', JSON.stringify('\uF41B'));
+assertEquals('"\uF420"', JSON.stringify('\uF420'));
+assertEquals('"\uF440"', JSON.stringify('\uF440'));
+assertEquals('"\uF4AE"', JSON.stringify('\uF4AE'));
+assertEquals('"\uF4B0"', JSON.stringify('\uF4B0'));
+assertEquals('"\uF50D"', JSON.stringify('\uF50D'));
+assertEquals('"\uF55D"', JSON.stringify('\uF55D'));
+assertEquals('"\uF55E"', JSON.stringify('\uF55E'));
+assertEquals('"\uF5CD"', JSON.stringify('\uF5CD'));
+assertEquals('"\uF657"', JSON.stringify('\uF657'));
+assertEquals('"\uF66D"', JSON.stringify('\uF66D'));
+assertEquals('"\uF68F"', JSON.stringify('\uF68F'));
+assertEquals('"\uF6A6"', JSON.stringify('\uF6A6'));
+assertEquals('"\uF6AA"', JSON.stringify('\uF6AA'));
+assertEquals('"\uF6EB"', JSON.stringify('\uF6EB'));
+assertEquals('"\uF79A"', JSON.stringify('\uF79A'));
+assertEquals('"\uF7E7"', JSON.stringify('\uF7E7'));
+assertEquals('"\uF7E8"', JSON.stringify('\uF7E8'));
+assertEquals('"\uF834"', JSON.stringify('\uF834'));
+assertEquals('"\uF88B"', JSON.stringify('\uF88B'));
+assertEquals('"\uF8D5"', JSON.stringify('\uF8D5'));
+assertEquals('"\uF8F1"', JSON.stringify('\uF8F1'));
+assertEquals('"\uF905"', JSON.stringify('\uF905'));
+assertEquals('"\uF927"', JSON.stringify('\uF927'));
+assertEquals('"\uF943"', JSON.stringify('\uF943'));
+assertEquals('"\uF949"', JSON.stringify('\uF949'));
+assertEquals('"\uF9A1"', JSON.stringify('\uF9A1'));
+assertEquals('"\uF9C7"', JSON.stringify('\uF9C7'));
+assertEquals('"\uFA0F"', JSON.stringify('\uFA0F'));
+assertEquals('"\uFA20"', JSON.stringify('\uFA20'));
+assertEquals('"\uFAA7"', JSON.stringify('\uFAA7'));
+assertEquals('"\uFBCD"', JSON.stringify('\uFBCD'));
+assertEquals('"\uFBF7"', JSON.stringify('\uFBF7'));
+assertEquals('"\uFC40"', JSON.stringify('\uFC40'));
+assertEquals('"\uFC4B"', JSON.stringify('\uFC4B'));
+assertEquals('"\uFC51"', JSON.stringify('\uFC51'));
+assertEquals('"\uFC5E"', JSON.stringify('\uFC5E'));
+assertEquals('"\uFC67"', JSON.stringify('\uFC67'));
+assertEquals('"\uFC8B"', JSON.stringify('\uFC8B'));
+assertEquals('"\uFE32"', JSON.stringify('\uFE32'));
+assertEquals('"\uFFC4"', JSON.stringify('\uFFC4'));
+assertEquals('"\uFFFD"', JSON.stringify('\uFFFD'));
+assertEquals('"\uFFFE"', JSON.stringify('\uFFFE'));
+assertEquals('"\uFFFF"', JSON.stringify('\uFFFF'));
+
+// A random selection of astral symbols, i.e. surrogate pairs, i.e.
+// code points from U+010000 to U+10FFFF.
+assertEquals('"\u{10000}"', JSON.stringify('\u{10000}'));
+assertEquals('"\u{11DE7}"', JSON.stringify('\u{11DE7}'));
+assertEquals('"\u{15997}"', JSON.stringify('\u{15997}'));
+assertEquals('"\u{187B0}"', JSON.stringify('\u{187B0}'));
+assertEquals('"\u{190B2}"', JSON.stringify('\u{190B2}'));
+assertEquals('"\u{1BF79}"', JSON.stringify('\u{1BF79}'));
+assertEquals('"\u{1C624}"', JSON.stringify('\u{1C624}'));
+assertEquals('"\u{1D9F4}"', JSON.stringify('\u{1D9F4}'));
+assertEquals('"\u{24149}"', JSON.stringify('\u{24149}'));
+assertEquals('"\u{2521C}"', JSON.stringify('\u{2521C}'));
+assertEquals('"\u{2762D}"', JSON.stringify('\u{2762D}'));
+assertEquals('"\u{2930B}"', JSON.stringify('\u{2930B}'));
+assertEquals('"\u{29EC4}"', JSON.stringify('\u{29EC4}'));
+assertEquals('"\u{29F9A}"', JSON.stringify('\u{29F9A}'));
+assertEquals('"\u{2A27D}"', JSON.stringify('\u{2A27D}'));
+assertEquals('"\u{2B363}"', JSON.stringify('\u{2B363}'));
+assertEquals('"\u{2C037}"', JSON.stringify('\u{2C037}'));
+assertEquals('"\u{2FAE0}"', JSON.stringify('\u{2FAE0}'));
+assertEquals('"\u{2FFCF}"', JSON.stringify('\u{2FFCF}'));
+assertEquals('"\u{32C1C}"', JSON.stringify('\u{32C1C}'));
+assertEquals('"\u{33DA8}"', JSON.stringify('\u{33DA8}'));
+assertEquals('"\u{3DCA4}"', JSON.stringify('\u{3DCA4}'));
+assertEquals('"\u{44FA0}"', JSON.stringify('\u{44FA0}'));
+assertEquals('"\u{45618}"', JSON.stringify('\u{45618}'));
+assertEquals('"\u{47395}"', JSON.stringify('\u{47395}'));
+assertEquals('"\u{4752C}"', JSON.stringify('\u{4752C}'));
+assertEquals('"\u{483FE}"', JSON.stringify('\u{483FE}'));
+assertEquals('"\u{49D35}"', JSON.stringify('\u{49D35}'));
+assertEquals('"\u{4CE3B}"', JSON.stringify('\u{4CE3B}'));
+assertEquals('"\u{55196}"', JSON.stringify('\u{55196}'));
+assertEquals('"\u{58B3E}"', JSON.stringify('\u{58B3E}'));
+assertEquals('"\u{5AA47}"', JSON.stringify('\u{5AA47}'));
+assertEquals('"\u{5C4B8}"', JSON.stringify('\u{5C4B8}'));
+assertEquals('"\u{5DD1B}"', JSON.stringify('\u{5DD1B}'));
+assertEquals('"\u{5FDCB}"', JSON.stringify('\u{5FDCB}'));
+assertEquals('"\u{611BA}"', JSON.stringify('\u{611BA}'));
+assertEquals('"\u{66433}"', JSON.stringify('\u{66433}'));
+assertEquals('"\u{690D7}"', JSON.stringify('\u{690D7}'));
+assertEquals('"\u{6F617}"', JSON.stringify('\u{6F617}'));
+assertEquals('"\u{711E4}"', JSON.stringify('\u{711E4}'));
+assertEquals('"\u{758D2}"', JSON.stringify('\u{758D2}'));
+assertEquals('"\u{780AC}"', JSON.stringify('\u{780AC}'));
+assertEquals('"\u{7AE5F}"', JSON.stringify('\u{7AE5F}'));
+assertEquals('"\u{7C2FB}"', JSON.stringify('\u{7C2FB}'));
+assertEquals('"\u{7D25F}"', JSON.stringify('\u{7D25F}'));
+assertEquals('"\u{8027A}"', JSON.stringify('\u{8027A}'));
+assertEquals('"\u{84817}"', JSON.stringify('\u{84817}'));
+assertEquals('"\u{8B070}"', JSON.stringify('\u{8B070}'));
+assertEquals('"\u{8B390}"', JSON.stringify('\u{8B390}'));
+assertEquals('"\u{8BC03}"', JSON.stringify('\u{8BC03}'));
+assertEquals('"\u{8BE63}"', JSON.stringify('\u{8BE63}'));
+assertEquals('"\u{8F12A}"', JSON.stringify('\u{8F12A}'));
+assertEquals('"\u{9345D}"', JSON.stringify('\u{9345D}'));
+assertEquals('"\u{937A9}"', JSON.stringify('\u{937A9}'));
+assertEquals('"\u{94596}"', JSON.stringify('\u{94596}'));
+assertEquals('"\u{967BB}"', JSON.stringify('\u{967BB}'));
+assertEquals('"\u{A19D1}"', JSON.stringify('\u{A19D1}'));
+assertEquals('"\u{A4FC5}"', JSON.stringify('\u{A4FC5}'));
+assertEquals('"\u{AC9CF}"', JSON.stringify('\u{AC9CF}'));
+assertEquals('"\u{B1366}"', JSON.stringify('\u{B1366}'));
+assertEquals('"\u{B3D32}"', JSON.stringify('\u{B3D32}'));
+assertEquals('"\u{B74BA}"', JSON.stringify('\u{B74BA}'));
+assertEquals('"\u{B8FB0}"', JSON.stringify('\u{B8FB0}'));
+assertEquals('"\u{BA0A5}"', JSON.stringify('\u{BA0A5}'));
+assertEquals('"\u{BB48E}"', JSON.stringify('\u{BB48E}'));
+assertEquals('"\u{C0B60}"', JSON.stringify('\u{C0B60}'));
+assertEquals('"\u{C2D34}"', JSON.stringify('\u{C2D34}'));
+assertEquals('"\u{C6C75}"', JSON.stringify('\u{C6C75}'));
+assertEquals('"\u{C9F26}"', JSON.stringify('\u{C9F26}'));
+assertEquals('"\u{CDBD0}"', JSON.stringify('\u{CDBD0}'));
+assertEquals('"\u{D1E28}"', JSON.stringify('\u{D1E28}'));
+assertEquals('"\u{D4A80}"', JSON.stringify('\u{D4A80}'));
+assertEquals('"\u{D947F}"', JSON.stringify('\u{D947F}'));
+assertEquals('"\u{D9B8A}"', JSON.stringify('\u{D9B8A}'));
+assertEquals('"\u{DA203}"', JSON.stringify('\u{DA203}'));
+assertEquals('"\u{DEFD3}"', JSON.stringify('\u{DEFD3}'));
+assertEquals('"\u{E4F7C}"', JSON.stringify('\u{E4F7C}'));
+assertEquals('"\u{E6BB3}"', JSON.stringify('\u{E6BB3}'));
+assertEquals('"\u{E972D}"', JSON.stringify('\u{E972D}'));
+assertEquals('"\u{EB335}"', JSON.stringify('\u{EB335}'));
+assertEquals('"\u{ED3F8}"', JSON.stringify('\u{ED3F8}'));
+assertEquals('"\u{ED940}"', JSON.stringify('\u{ED940}'));
+assertEquals('"\u{EF6F8}"', JSON.stringify('\u{EF6F8}'));
+assertEquals('"\u{F1F57}"', JSON.stringify('\u{F1F57}'));
+assertEquals('"\u{F33B5}"', JSON.stringify('\u{F33B5}'));
+assertEquals('"\u{F4D2A}"', JSON.stringify('\u{F4D2A}'));
+assertEquals('"\u{F70BA}"', JSON.stringify('\u{F70BA}'));
+assertEquals('"\u{F899F}"', JSON.stringify('\u{F899F}'));
+assertEquals('"\u{1034BF}"', JSON.stringify('\u{1034BF}'));
+assertEquals('"\u{107ACF}"', JSON.stringify('\u{107ACF}'));
+assertEquals('"\u{10881F}"', JSON.stringify('\u{10881F}'));
+assertEquals('"\u{1098A5}"', JSON.stringify('\u{1098A5}'));
+assertEquals('"\u{10ABD1}"', JSON.stringify('\u{10ABD1}'));
+assertEquals('"\u{10B5C5}"', JSON.stringify('\u{10B5C5}'));
+assertEquals('"\u{10CC79}"', JSON.stringify('\u{10CC79}'));
+assertEquals('"\u{10CD19}"', JSON.stringify('\u{10CD19}'));
+assertEquals('"\u{10FFFF}"', JSON.stringify('\u{10FFFF}'));
diff --git a/deps/v8/test/mjsunit/ignition/regress-616064.js b/deps/v8/test/mjsunit/ignition/regress-616064.js
index 805de41ac5..f14679a422 100644
--- a/deps/v8/test/mjsunit/ignition/regress-616064.js
+++ b/deps/v8/test/mjsunit/ignition/regress-616064.js
@@ -13,7 +13,7 @@ function foo() {
return Worker.__f_0(-2147483648, __f_0);
};
- var __v_9 = new Worker('');
+ var __v_9 = new Worker('', {type: 'string'});
__f_1 = {s: Math.s, __f_1: true};
}
}
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 102f777c74..04754577ff 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -375,32 +375,30 @@ var o = { toString: function() { return "42"; } };
assertEquals(42, JSON.parse(o));
-for (var i = 0; i < 65536; i++) {
+for (var i = 0x0000; i <= 0xFFFF; i++) {
var string = String.fromCharCode(i);
var encoded = JSON.stringify(string);
- var expected = "uninitialized";
+ var expected = 'uninitialized';
// Following the ES5 specification of the abstraction function Quote.
if (string == '"' || string == '\\') {
// Step 2.a
expected = '\\' + string;
- } else if ("\b\t\n\r\f".indexOf(string) >= 0) {
+ } else if ("\b\t\n\r\f".includes(string)) {
// Step 2.b
if (string == '\b') expected = '\\b';
else if (string == '\t') expected = '\\t';
else if (string == '\n') expected = '\\n';
else if (string == '\f') expected = '\\f';
else if (string == '\r') expected = '\\r';
- } else if (i < 32) {
+ } else if (i < 0x20) {
// Step 2.c
- if (i < 16) {
- expected = "\\u000" + i.toString(16);
- } else {
- expected = "\\u00" + i.toString(16);
- }
+ expected = '\\u' + i.toString(16).padStart(4, '0');
+ // TODO(mathias): Add i >= 0xD800 && i <= 0xDFFF case once
+ // --harmony-json-stringify is enabled by default.
} else {
expected = string;
}
- assertEquals('"' + expected + '"', encoded, "Codepoint " + i);
+ assertEquals('"' + expected + '"', encoded, "code point " + i);
}
diff --git a/deps/v8/test/mjsunit/lexicographic-compare.js b/deps/v8/test/mjsunit/lexicographic-compare.js
deleted file mode 100644
index b87dd1c1ef..0000000000
--- a/deps/v8/test/mjsunit/lexicographic-compare.js
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-(function () {
-
- assertFalse(%IsSmi(2147483648), 'Update test for >32 bit Smi');
-
- // Collect a list of interesting Smis.
- const seen = {};
- const smis = [];
- function add(x) {
- if (x | 0 == x) {
- x = x | 0; // Canonicalizes to Smi if 32-bit signed and fits in Smi.
- }
- if (%_IsSmi(x) && !seen[x]) {
- seen[x] = 1;
- smis.push(x);
- }
- }
- function addSigned(x) {
- add(x);
- add(-x);
- }
-
- var BIGGER_THAN_ANY_SMI = 10 * 1000 * 1000 * 1000;
- for (var xb = 1; xb <= BIGGER_THAN_ANY_SMI; xb *= 10) {
- for (var xf = 0; xf <= 9; xf++) {
- for (var xo = -1; xo <= 1; xo++) {
- addSigned(xb * xf + xo);
- }
- }
- }
-
- console.log("A")
-
- for (var yb = 1; yb <= BIGGER_THAN_ANY_SMI; yb *= 2) {
- for (var yo = -2; yo <= 2; yo++) {
- addSigned(yb + yo);
- }
- }
-
- function test(x,y) {
- const lex = %SmiLexicographicCompare(x, y);
- const expected = (x == y) ? 0 : (("" + x) < ("" + y) ? -1 : 1);
- return lex == expected;
- }
-
- console.log(smis.length);
-
- for (var i = 0; i < smis.length; i++) {
- for (var j = 0; j < smis.length; j++) {
- const x = smis[i];
- const y = smis[j];
- assertTrue(test(x, y), x + " < " + y);;
- }
- }
-
- console.log("C")
-})();
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index a458e0cd10..59923a4247 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -64,6 +64,9 @@ var assertNotSame;
// and the properties of non-Array objects).
var assertEquals;
+// Deep equality predicate used by assertEquals.
+var deepEquals;
+
// Expected and found values are not identical primitive values or functions
// or similarly structured objects (checking internal properties
// of, e.g., Number and Date objects, the elements of arrays
@@ -183,6 +186,9 @@ var isTurboFanned;
// Monkey-patchable all-purpose failure handler.
var failWithMessage;
+// Returns the formatted failure text. Used by test-async.js.
+var formatFailureText;
+
// Returns a pretty-printed string representation of the passed value.
var prettyPrinted;
@@ -297,7 +303,7 @@ var prettyPrinted;
throw new MjsUnitAssertionError(message);
}
- function formatFailureText(expectedText, found, name_opt) {
+ formatFailureText = function(expectedText, found, name_opt) {
var message = "Fail" + "ure";
if (name_opt) {
// Fix this when we ditch the old test runner.
@@ -335,7 +341,7 @@ var prettyPrinted;
}
- function deepEquals(a, b) {
+ deepEquals = function deepEquals(a, b) {
if (a === b) {
// Check for -0.
if (a === 0) return (1 / a) === (1 / b);
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 4d54808b46..16a17189e0 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -55,8 +55,6 @@
# Issue 5495: enable the test when the constant field tracking in enabled.
'const-field-tracking': [SKIP],
- # Issue 7872: flaky OOM
- 'regress/wasm/regress-827806': [SKIP],
##############################################################################
# Too slow in debug mode with --stress-opt mode.
'regress/regress-create-exception': [PASS, ['mode == debug', SKIP]],
@@ -168,12 +166,15 @@
'regress/regress-605470': [PASS, SLOW],
'regress/regress-655573': [PASS, SLOW],
'regress/regress-1200351': [PASS, SLOW],
+ 'regress/wasm/regress-810973': [PASS, SLOW],
'string-replace-gc': [PASS, SLOW],
'wasm/asm-wasm-f32': [PASS, SLOW],
'wasm/asm-wasm-f64': [PASS, SLOW],
'wasm/embenchen/*': [PASS, SLOW],
'wasm/grow-memory': [PASS, SLOW],
'wasm/unreachable-validation': [PASS, SLOW],
+ 'wasm/compare-exchange-stress': [PASS, SLOW, NO_VARIANTS],
+ 'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
# case-insensitive unicode regexp relies on case mapping provided by ICU.
'es6/unicode-regexp-ignore-case': [PASS, ['no_i18n == True', FAIL]],
@@ -215,14 +216,14 @@
# Allocates a huge string and then flattens it, very slow in debug mode.
'regress/regress-752764': [PASS, ['mode == debug', SLOW]],
- # https://crbug.com/v8/7682
- 'regress/regress-v8-7682': [FAIL],
-
# https://crbug.com/v8/7697
'array-literal-feedback': [PASS, FAIL],
# https://crbug.com/v8/7775
'allocation-site-info': [SKIP],
+
+ # BUG(v8:8169)
+ 'external-backing-store-gc': [SKIP],
}], # ALWAYS
['novfp3 == True', {
@@ -277,7 +278,6 @@
# TODO(mstarzinger): Takes too long with TF.
'array-sort': [PASS, NO_VARIANTS],
- 'lexicographic-compare': [PASS, NO_VARIANTS],
'regress/regress-91008': [PASS, NO_VARIANTS],
'regress/regress-transcendental': [PASS, ['arch == arm64', NO_VARIANTS]],
'compiler/osr-regress-max-locals': [PASS, NO_VARIANTS],
@@ -320,9 +320,6 @@
'asm/sqlite3/*': [SKIP],
# TODO(mips-team): Fix Wasm for big-endian.
'wasm/*': [SKIP],
- # TODO(mips-team): Fix SEGV on regress-864509.js on big endian
- # (https://crbug.com/v8/7953).
- 'regress/wasm/regress-864509': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -370,12 +367,10 @@
'compiler/osr-with-args': [PASS, SLOW],
'generated-transition-stub': [PASS, SLOW],
'json2': [PASS, SLOW],
- 'lexicographic-compare': [PASS, SLOW],
'math-floor-of-div-nosudiv': [PASS, SLOW],
'math-floor-of-div': [PASS, SLOW],
'messages': [PASS, SLOW],
'packed-elements': [PASS, SLOW],
- 'regress/regress-2185': [PASS, SLOW],
'regress/regress-2790': [PASS, SLOW],
'regress/regress-331444': [PASS, SLOW],
'regress/regress-490': [PASS, SLOW],
@@ -397,7 +392,6 @@
# Pass but take too long with the simulator in debug mode.
'array-sort': [PASS, SLOW],
- 'lexicographic-compare': [PASS, SLOW],
'packed-elements': [SKIP],
'regexp-global': [SKIP],
'math-floor-of-div': [PASS, SLOW],
@@ -466,6 +460,9 @@
# BUG(v8:7042). Uses a lot of memory.
'regress/regress-678917': [SKIP],
+ # BUG(v8:8103). Uses a lot of memory.
+ 'regress/regress-852258': [SKIP],
+
# BUG(v8:6924). The test uses a lot of memory.
'regress/wasm/regress-694433': [SKIP],
'es6/typedarray': [PASS, NO_VARIANTS],
@@ -506,7 +503,6 @@
# Slow tests.
'array-sort': [PASS, SLOW],
'compiler/osr-with-args': [PASS, SLOW],
- 'lexicographic-compare': [PASS, SLOW],
'packed-elements': [PASS, SLOW],
'regress/regress-2790': [PASS, SLOW],
'regress/regress-91008': [PASS, SLOW],
@@ -651,6 +647,8 @@
'regress/regress-748069': [FAIL],
'regress/regress-752764': [FAIL],
'regress/regress-779407': [FAIL],
+ # Flaky OOM:
+ 'regress/regress-852258': [SKIP],
}], # 'system == android'
##############################################################################
@@ -660,6 +658,13 @@
}], # 'system == macos'
##############################################################################
+['isolates', {
+ # Slow tests.
+ 'es6/typedarray-of': [PASS, SLOW],
+ 'regress/regress-crbug-854299': [PASS, SLOW],
+}], # 'isolates'
+
+##############################################################################
['deopt_fuzzer == True', {
# Skip tests that are not suitable for deoptimization fuzzing.
@@ -708,6 +713,7 @@
'keyed-load-with-symbol-key': [PASS, FAIL],
'object-seal': [PASS, FAIL],
'regress/regress-3709': [PASS, FAIL],
+ 'regress/regress-6948': [PASS, FAIL],
'regress/regress-7510': [PASS, FAIL],
'regress/regress-trap-allocation-memento': [PASS, FAIL],
'regress/regress-unlink-closures-on-deopt': [PASS, FAIL],
@@ -792,26 +798,32 @@
# Slow on arm64 simulator: https://crbug.com/v8/7783
'string-replace-gc': [PASS, ['arch == arm64 and simulator_run', SKIP]],
+
+ # Too memory hungry on Odroid devices.
+ 'regress/regress-678917': [PASS, ['arch == arm and not simulator_run', SKIP]],
+
+ # https://crbug.com/v8/8164
+ 'wasm/compare-exchange-stress': [SKIP],
}], # variant == stress
##############################################################################
+['variant == stress and (arch == arm or arch == arm64) and simulator_run', {
+ # Slow tests: https://crbug.com/v8/7783
+ 'generated-transition-stub': [SKIP],
+ 'wasm/grow-memory': [SKIP],
+}], # variant == stress and (arch == arm or arch == arm64) and simulator_run
+
+##############################################################################
['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
- 'lexicographic-compare': [SKIP],
'md5': [SKIP],
- 'regress/regress-2185': [SKIP],
+ 'packed-elements': [SKIP],
'wasm/asm-wasm-f32': [SKIP],
'wasm/asm-wasm-f64': [SKIP],
'wasm/grow-memory': [SKIP],
}], # variant == nooptimization and (arch == arm or arch == arm64) and simulator_run
##############################################################################
-['(arch == arm or arch == arm64) and simulator_run', {
- # Slow tests: https://crbug.com/v8/7783
- 'regress/regress-2185': [SKIP],
-}], # (arch == arm or arch == arm64) and simulator_run
-
-##############################################################################
['(arch == arm or arch == arm64)', {
# Flaky tests: https://crbug.com/v8/8090
'regress/regress-752764': [SKIP],
diff --git a/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js b/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
index 9a24fc5c7c..4a48a61ab3 100644
--- a/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
+++ b/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --nostress-opt --allow-natives-syntax --mock-arraybuffer-allocator
-var maxSize = %_MaxSmi() + 1;
+var maxSize = %MaxSmi() + 1;
var ab;
// Allocate the largest ArrayBuffer we can on this architecture.
diff --git a/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js b/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js
index e497aecbe0..0445e2d2cf 100644
--- a/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js
+++ b/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js
@@ -27,7 +27,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --nostress-opt --allow-natives-syntax
-var maxSize = %_MaxSmi() + 1;
+var maxSize = %MaxSmi() + 1;
function TestArray(constr) {
assertThrows(function() {
new constr(maxSize);
diff --git a/deps/v8/test/mjsunit/regress/regress-3255.js b/deps/v8/test/mjsunit/regress/regress-3255.js
index 0e77435374..0c5ee4ff00 100644
--- a/deps/v8/test/mjsunit/regress/regress-3255.js
+++ b/deps/v8/test/mjsunit/regress/regress-3255.js
@@ -16,4 +16,4 @@ f(str, 0);
f(str, 0);
// This is just to trigger elements validation, object already broken.
-%SetProperty(str, 1, 'y', 0);
+%SetKeyedProperty(str, 1, 'y', 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-4271.js b/deps/v8/test/mjsunit/regress/regress-4271.js
index bc18771e72..5b724b01b6 100644
--- a/deps/v8/test/mjsunit/regress/regress-4271.js
+++ b/deps/v8/test/mjsunit/regress/regress-4271.js
@@ -17,7 +17,7 @@ if (this.Worker) {
});
// Don't throw for real worker
- var worker = new Worker('');
+ var worker = new Worker('', {type: 'string'});
worker.getMessage();
worker.postMessage({});
worker.terminate();
diff --git a/deps/v8/test/mjsunit/regress/regress-4279.js b/deps/v8/test/mjsunit/regress/regress-4279.js
index 64ef967d89..ddc272793e 100644
--- a/deps/v8/test/mjsunit/regress/regress-4279.js
+++ b/deps/v8/test/mjsunit/regress/regress-4279.js
@@ -4,7 +4,7 @@
if (this.Worker && this.quit) {
try {
- new Function(new Worker("55"));
+ new Function(new Worker("55"), {type: 'string'});
} catch(err) {}
quit();
diff --git a/deps/v8/test/mjsunit/regress/regress-707066.js b/deps/v8/test/mjsunit/regress/regress-707066.js
index b33b585ebd..b5d70c2e6b 100644
--- a/deps/v8/test/mjsunit/regress/regress-707066.js
+++ b/deps/v8/test/mjsunit/regress/regress-707066.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-function-tostring
-
// There was a bug in CreateDynamicFunction where a stack overflow
// situation caused an assertion failure.
diff --git a/deps/v8/test/mjsunit/regress/regress-8133-1.js b/deps/v8/test/mjsunit/regress/regress-8133-1.js
new file mode 100644
index 0000000000..8f3cc734a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8133-1.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const arr = [1, , 3];
+
+function mapper(x) {
+ Array.prototype[1] = 2;
+ return x + 1;
+}
+
+// This iterates over arr using the iterator protocol, which turns the hole into
+// undefined. The mapper function then gets called in a separate iteration over
+// the acquired elements, where it increments undefined, which produces NaN and
+// gets converted to 0.
+assertArrayEquals([2, 0, 4], Uint16Array.from(arr, mapper));
diff --git a/deps/v8/test/mjsunit/regress/regress-8133-2.js b/deps/v8/test/mjsunit/regress/regress-8133-2.js
new file mode 100644
index 0000000000..e163e3c784
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8133-2.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const arr = [1, , 3];
+
+function mapper(x) {
+ Array.prototype[1] = 2;
+ return x + 1;
+}
+
+// We force a direct iteration (using the array length, not the iterator
+// protocol). The mapper function gets called during this iteration, not in a
+// separate one. Hence when index 1 is read, 2 is retrieved from the prototype
+// and incremented to 3.
+Array.prototype[Symbol.iterator] = undefined;
+assertArrayEquals([2, 3, 4], Uint16Array.from(arr, mapper));
diff --git a/deps/v8/test/mjsunit/regress/regress-821368.js b/deps/v8/test/mjsunit/regress/regress-821368.js
index b41261d7c9..8f8a01b3f7 100644
--- a/deps/v8/test/mjsunit/regress/regress-821368.js
+++ b/deps/v8/test/mjsunit/regress/regress-821368.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-const worker = new Worker("onmessage = function(){}");
+const worker = new Worker("onmessage = function(){}", {type: 'string'});
const buffer = new ArrayBuffer();
worker.postMessage(buffer, [buffer]);
try {
diff --git a/deps/v8/test/mjsunit/regress/regress-8237.js b/deps/v8/test/mjsunit/regress/regress-8237.js
new file mode 100644
index 0000000000..c3abd17e8a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8237.js
@@ -0,0 +1,57 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt
+// Files: test/mjsunit/code-coverage-utils.js
+
+%DebugToggleBlockCoverage(true);
+
+TestCoverage(
+"Repro for the bug",
+`
+function lib (n) { // 0000
+ if (n >= 0) { // 0050
+ if (n < 0) { // 0100
+ return; // 0150
+ } // 0200
+ } else if (foo()) { // 0250
+ } // 0300
+} // 0350
+function foo () { // 0400
+ console.log('foo') // 0450
+ return false // 0500
+} // 0550
+lib(1) // 0600
+`,
+[{"start":0,"end":649,"count":1},
+{"start":0,"end":351,"count":1},
+{"start":115,"end":205,"count":0},
+{"start":253,"end":303,"count":0},
+{"start":400,"end":551,"count":0}]
+);
+
+TestCoverage(
+"Variant with omitted brackets",
+`
+function lib (n) { // 0000
+ if (n >= 0) { // 0050
+ if (n < 0) // 0100
+ return; // 0150
+ } // 0200
+ else if (foo()); // 0250
+} // 0300
+function foo () { // 0350
+ console.log('foo') // 0400
+ return false // 0450
+} // 0500
+lib(1) // 0550
+`,
+[{"start":0,"end":599,"count":1},
+{"start":0,"end":301,"count":1},
+{"start":156,"end":163,"count":0},
+{"start":203,"end":268,"count":0},
+{"start":350,"end":501,"count":0}]
+);
+
+%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-8265.js b/deps/v8/test/mjsunit/regress/regress-8265.js
new file mode 100644
index 0000000000..ffe6191c9f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8265.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --random-seed=1
+
+for (let i = 0; i < 54; ++i) Math.random();
+let sum = 0;
+for (let i = 0; i < 10; ++i)
+ sum += Math.floor(Math.random() * 50);
+
+assertNotEquals(0, sum);
diff --git a/deps/v8/test/mjsunit/regress/regress-8449.js b/deps/v8/test/mjsunit/regress/regress-8449.js
new file mode 100644
index 0000000000..32fa82aa6e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8449.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+{
+ const x = [, 1];
+ x.__proto__ = [42];
+ const y = [...x];
+ assertEquals([42, 1], y);
+ assertTrue(y.hasOwnProperty(0));
+}
+
+{
+ const x = [, 1];
+ x.__proto__ = [42];
+ assertEquals(42, x[Symbol.iterator]().next().value);
+}
+
+{
+ const array_prototype = [].__proto__;
+ array_prototype[0] = 42;
+ const x = [, 1];
+ assertEquals(42, x[Symbol.iterator]().next().value);
+ delete array_prototype[0];
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-883059.js b/deps/v8/test/mjsunit/regress/regress-883059.js
new file mode 100644
index 0000000000..ed70feddb4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-883059.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --random-seed=-1595876594 --disable-in-process-stack-traces --no-lazy
+
+var __v_47 = ({[__v_46]: __f_52}) => { var __v_46 = 'b'; return __f_52; };
diff --git a/deps/v8/test/mjsunit/regress/regress-889722.js b/deps/v8/test/mjsunit/regress/regress-889722.js
new file mode 100644
index 0000000000..c883dbe489
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-889722.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function getRandomProperty(v, rand) {
+ var properties = Object.getOwnPropertyNames(v);
+ return properties[rand % properties.length];
+}
+r = Realm.create();
+o = Realm.eval(r, "() => { return Realm.global(-10) instanceof Object }");
+o.__p_211203344 = o[getRandomProperty(o, 211203344)];
diff --git a/deps/v8/test/mjsunit/regress/regress-890553.js b/deps/v8/test/mjsunit/regress/regress-890553.js
new file mode 100644
index 0000000000..33f13e8f26
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-890553.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+var s = "function __f_9(func, testName) {" +
+ "var __v_0 = function __f_10(__v_14, __v_14) {" +
+ " return __v_16;" +
+ "}; " +
+"}"
+assertThrows(function() { eval(s); });
diff --git a/deps/v8/test/mjsunit/regress/regress-2185.js b/deps/v8/test/mjsunit/regress/regress-892858.js
index 9b91066f33..ec921e3d43 100644
--- a/deps/v8/test/mjsunit/regress/regress-2185.js
+++ b/deps/v8/test/mjsunit/regress/regress-892858.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2018 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,14 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --noenable-slow-asserts
-
-var a = [];
-
-for (var i = 0; i < 2; i++) {
- for (var j = 0; j < 30000; j++) {
- a.push(j);
- }
+async function foo() {
+ await Promise.resolve(42);
}
-a.sort(function(a, b) { return a - b; } );
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-arrow-single-expression-eval.js b/deps/v8/test/mjsunit/regress/regress-arrow-single-expression-eval.js
new file mode 100644
index 0000000000..83a9ca1a56
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-arrow-single-expression-eval.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+((x=1) => eval("var x = 10"))();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-380671.js b/deps/v8/test/mjsunit/regress/regress-crbug-380671.js
deleted file mode 100644
index e2909e0a43..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-380671.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --mock-arraybuffer-allocator --expose-gc
-
-var buffer = new ArrayBuffer(0xc0000000);
-assertEquals(0xc0000000, buffer.byteLength);
-// We call the GC here to free up the large array buffer. Otherwise, the
-// mock allocator would allow us to allocate more than the physical memory
-// available on 32bit platforms, leaving the internal counters in an invalid
-// state.
-buffer = null;
-gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-503578.js b/deps/v8/test/mjsunit/regress/regress-crbug-503578.js
index 1274d91ffe..59c33da20e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-503578.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-503578.js
@@ -8,7 +8,7 @@ if (this.Worker) {
var __v_5 = new Uint32Array(__v_1);
return __v_5;
}
- var __v_6 = new Worker('onmessage = function() {}');
+ var __v_6 = new Worker('onmessage = function() {}', {type: 'string'});
var __v_3 = __f_0(16);
__v_6.postMessage(__v_3);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-503698.js b/deps/v8/test/mjsunit/regress/regress-crbug-503698.js
index 415d1bc81b..c817bede29 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-503698.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-503698.js
@@ -5,5 +5,5 @@
// Flags: --invoke-weak-callbacks
if (this.Worker) {
- var __v_6 = new Worker('');
+ var __v_6 = new Worker('', {type: 'string'});
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-503968.js b/deps/v8/test/mjsunit/regress/regress-crbug-503968.js
index 78d1c7b98a..0552163144 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-503968.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-503968.js
@@ -8,6 +8,6 @@ if (this.Worker) {
this.l = [new __f_0, new __f_0];
}
__v_6 = new __f_1;
- var __v_9 = new Worker('');
+ var __v_9 = new Worker('', {type: 'string'});
__v_9.postMessage(__v_6);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-503991.js b/deps/v8/test/mjsunit/regress/regress-crbug-503991.js
index 6a3b0de759..7b96e92677 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-503991.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-503991.js
@@ -4,6 +4,6 @@
if (this.Worker) {
__v_3 = "";
- var __v_6 = new Worker('');
+ var __v_6 = new Worker('', {type: 'string'});
__v_6.postMessage(__v_3);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-504136.js b/deps/v8/test/mjsunit/regress/regress-crbug-504136.js
index 4ed6843544..25b11e0fec 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-504136.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-504136.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
if (this.Worker) {
- var __v_10 = new Worker('');
+ var __v_10 = new Worker('', {type: 'string'});
__v_10.terminate();
__v_10.getMessage();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-504727.js b/deps/v8/test/mjsunit/regress/regress-crbug-504727.js
index 16d8ff16cd..a295a1bd35 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-504727.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-504727.js
@@ -5,5 +5,5 @@
// Flags: --no-test
if (this.Worker) {
- var __v_2 = new Worker('');
+ var __v_2 = new Worker('', {type: 'string'});
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-504729.js b/deps/v8/test/mjsunit/regress/regress-crbug-504729.js
index 435cafee87..8b1f8b0899 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-504729.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-504729.js
@@ -5,5 +5,5 @@
if (this.Worker) {
Function.prototype.toString = "foo";
function __f_7() {}
- assertThrows(function() { var __v_5 = new Worker(__f_7.toString()); });
+ assertThrows(function() { var __v_5 = new Worker(__f_7.toString(), {type: 'string'}) });
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-505778.js b/deps/v8/test/mjsunit/regress/regress-crbug-505778.js
index 74d96ab094..3d87ba6982 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-505778.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-505778.js
@@ -3,6 +3,6 @@
// found in the LICENSE file.
if (this.Worker) {
- var __v_7 = new Worker('onmessage = function() {}');
+ var __v_7 = new Worker('onmessage = function() {}', {type: 'string'});
__v_7.postMessage("");
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-506549.js b/deps/v8/test/mjsunit/regress/regress-crbug-506549.js
index 40e162caf5..219fd10cb7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-506549.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-506549.js
@@ -5,6 +5,6 @@
if (this.Worker) {
var __v_5 = {};
__v_5.__defineGetter__('byteLength', function() {foo();});
- var __v_8 = new Worker('onmessage = function() {};');
+ var __v_8 = new Worker('onmessage = function() {};', {type: 'string'});
assertThrows(function() { __v_8.postMessage(__v_5); });
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-511880.js b/deps/v8/test/mjsunit/regress/regress-crbug-511880.js
index f9b05ff7bc..5dceb2a733 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-511880.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-511880.js
@@ -4,10 +4,10 @@
if (this.Worker) {
var __v_8 =
- `var __v_9 = new Worker('postMessage(42)');
+ `var __v_9 = new Worker('postMessage(42)', {type: 'string'});
onmessage = function(parentMsg) {
__v_9.postMessage(parentMsg);
};`;
- var __v_9 = new Worker(__v_8);
+ var __v_9 = new Worker(__v_8, {type: 'string'});
__v_9.postMessage(9);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-514081.js b/deps/v8/test/mjsunit/regress/regress-crbug-514081.js
index ee3ed81300..3d99b4586b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-514081.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-514081.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
if (this.Worker) {
- var __v_7 = new Worker('onmessage = function() {};');
+ var __v_7 = new Worker('onmessage = function() {};', {type: 'string'});
var e;
var ab = new ArrayBuffer(2 * 1000 * 1000);
try {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-518747.js b/deps/v8/test/mjsunit/regress/regress-crbug-518747.js
index f1787c4c4b..2a4058d401 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-518747.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-518747.js
@@ -4,6 +4,6 @@
if (this.Worker) {
Worker.prototype = 12;
- var __v_6 = new Worker('');
+ var __v_6 = new Worker('', {type: 'string'});
__v_6.postMessage([]);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-522496.js b/deps/v8/test/mjsunit/regress/regress-crbug-522496.js
index e47e0a0677..4dea5a89ac 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-522496.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-522496.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
if (this.Worker) {
- var worker = new Worker("onmessage = function(){}");
+ var worker = new Worker("onmessage = function(){}", {type: 'string'});
var buf = new ArrayBuffer();
worker.postMessage(buf, [buf]);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-687063.js b/deps/v8/test/mjsunit/regress/regress-crbug-687063.js
new file mode 100644
index 0000000000..8c579331fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-687063.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Collect the actual properties looked up on the Proxy.
+const actual = [];
+
+// Perform a relational comparison with a Proxy on the right hand
+// side and a Symbol which cannot be turned into a Number on the
+// left hand side.
+function foo() {
+ actual.length = 0;
+ const lhs = Symbol();
+ const rhs = new Proxy({}, {
+ get: function(target, property, receiver) {
+ actual.push(property);
+ return undefined;
+ }
+ });
+ return lhs < rhs;
+}
+
+assertThrows(foo, TypeError);
+assertEquals([Symbol.toPrimitive, 'valueOf', 'toString'], actual);
+assertThrows(foo, TypeError);
+assertEquals([Symbol.toPrimitive, 'valueOf', 'toString'], actual);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, TypeError);
+assertEquals([Symbol.toPrimitive, 'valueOf', 'toString'], actual);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-722871.js b/deps/v8/test/mjsunit/regress/regress-crbug-722871.js
index c5b7958f49..e71f083be6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-722871.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-722871.js
@@ -7,7 +7,7 @@ let workers = [];
let runningWorkers = 0;
function startWorker(script) {
- let worker = new Worker(script);
+ let worker = new Worker(script, {type: 'string'});
worker.done = false;
worker.idx = workers.length;
workers.push(worker);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-876443.js b/deps/v8/test/mjsunit/regress/regress-crbug-876443.js
new file mode 100644
index 0000000000..33ffa020f3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-876443.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags:
+
+var a = [5.65];
+a.splice(0);
+var b = a.splice(-4, 9, 10);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-878845.js b/deps/v8/test/mjsunit/regress/regress-crbug-878845.js
new file mode 100644
index 0000000000..2913f9e849
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-878845.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let arr = [, 0.1];
+
+Array.prototype.lastIndexOf.call(arr, 100, {
+ valueOf() {
+ arr.length = 0;
+ }
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-879560.js b/deps/v8/test/mjsunit/regress/regress-crbug-879560.js
new file mode 100644
index 0000000000..a17deadfcd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-879560.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ var x = 1;
+ x = undefined;
+ while (x--) ;
+}
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-879898.js b/deps/v8/test/mjsunit/regress/regress-crbug-879898.js
new file mode 100644
index 0000000000..c97001ae5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-879898.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return Symbol.toPrimitive++;
+}
+assertThrows(foo);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-880207.js b/deps/v8/test/mjsunit/regress/regress-crbug-880207.js
new file mode 100644
index 0000000000..09796a9ff4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-880207.js
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestOptimizedFastExpm1MinusZero() {
+ function foo() {
+ return Object.is(Math.expm1(-0), -0);
+ }
+
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+(function TestOptimizedExpm1MinusZeroSlowPath() {
+ function f(x) {
+ return Object.is(Math.expm1(x), -0);
+ }
+
+ function g() {
+ return f(-0);
+ }
+
+ f(0);
+ // Compile function optimistically for numbers (with fast inlined
+ // path for Math.expm1).
+ %OptimizeFunctionOnNextCall(f);
+ // Invalidate the optimistic assumption, deopting and marking non-number
+ // input feedback in the call IC.
+ f("0");
+ // Optimize again, now with non-lowered call to Math.expm1.
+ assertTrue(g());
+ %OptimizeFunctionOnNextCall(g);
+ assertTrue(g());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-882233-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-882233-1.js
new file mode 100644
index 0000000000..197660a683
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-882233-1.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Intended to test bug [882233] on CSA fast-path.
+
+let array = [];
+Object.defineProperty(array, 'length', {writable: false});
+
+assertEquals(array.length, 0);
+assertThrows(() => array.shift(), TypeError);
+
+let object = { length: 0 };
+Object.defineProperty(object, 'length', {writable: false});
+
+assertEquals(object.length, 0);
+assertThrows(() => Array.prototype.shift.call(object));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js
new file mode 100644
index 0000000000..565e5fbc23
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Intended to test bug [882233] on TF inlined (js-call-reducer) path.
+
+function shift_array() {
+ let array = [];
+ Object.defineProperty(array, 'length', {writable : false});
+ return array.shift();
+}
+
+assertThrows(shift_array);
+assertThrows(shift_array);
+%OptimizeFunctionOnNextCall(shift_array);
+assertThrows(shift_array);
+assertOptimized(shift_array);
+
+
+function shift_object() {
+ let object = { length: 0 };
+ Object.defineProperty(object, 'length', {writable : false});
+ return object.shift();
+}
+
+assertThrows(shift_object);
+assertThrows(shift_object);
+%OptimizeFunctionOnNextCall(shift_object);
+assertThrows(shift_object);
+assertOptimized(shift_object);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-884933.js b/deps/v8/test/mjsunit/regress/regress-crbug-884933.js
new file mode 100644
index 0000000000..447d303bbf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-884933.js
@@ -0,0 +1,85 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test Uint8 -> Word64 conversions.
+(function() {
+ function bar(x, y) {
+ return x + y;
+ }
+
+ bar(0.1, 0.2);
+ bar(0.1, 0.2);
+
+ function foo(dv) {
+ return bar(dv.getUint8(0, true), 0xFFFFFFFF);
+ }
+
+ const dv = new DataView(new ArrayBuffer(8));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0xFFFFFFFF, foo(dv));
+})();
+
+// Test Int8 -> Word64 conversions.
+(function() {
+ function bar(x, y) {
+ return x + y;
+ }
+
+ bar(0.1, 0.2);
+ bar(0.1, 0.2);
+
+ function foo(dv) {
+ return bar(dv.getInt8(0, true), 0xFFFFFFFF);
+ }
+
+ const dv = new DataView(new ArrayBuffer(8));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0xFFFFFFFF, foo(dv));
+})();
+
+// Test Uint16 -> Word64 conversions.
+(function() {
+ function bar(x, y) {
+ return x + y;
+ }
+
+ bar(0.1, 0.2);
+ bar(0.1, 0.2);
+
+ function foo(dv) {
+ return bar(dv.getUint16(0, true), 0xFFFFFFFF);
+ }
+
+ const dv = new DataView(new ArrayBuffer(8));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0xFFFFFFFF, foo(dv));
+})();
+
+// Test Int16 -> Word64 conversions.
+(function() {
+ function bar(x, y) {
+ return x + y;
+ }
+
+ bar(0.1, 0.2);
+ bar(0.1, 0.2);
+
+ function foo(dv) {
+ return bar(dv.getInt16(0, true), 0xFFFFFFFF);
+ }
+
+ const dv = new DataView(new ArrayBuffer(8));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ assertEquals(0xFFFFFFFF, foo(dv));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0xFFFFFFFF, foo(dv));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-885404.js b/deps/v8/test/mjsunit/regress/regress-crbug-885404.js
new file mode 100644
index 0000000000..534b883a01
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-885404.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap --expose-gc
+
+var ab = new ArrayBuffer(2);
+try { new Int32Array(ab); } catch (e) { }
+assertEquals(2, ab.byteLength);
+gc();
+assertEquals(2, ab.byteLength);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-887891.js b/deps/v8/test/mjsunit/regress/regress-crbug-887891.js
new file mode 100644
index 0000000000..0e72ab263a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-887891.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap
+
+const l = 1000000000;
+const a = [];
+function foo() { var x = new Int32Array(l); }
+try { foo(); } catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-888825.js b/deps/v8/test/mjsunit/regress/regress-crbug-888825.js
new file mode 100644
index 0000000000..9aa52d62e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-888825.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+eval("((a=function g() { function g() {}}) => {})();");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-890243.js b/deps/v8/test/mjsunit/regress/regress-crbug-890243.js
new file mode 100644
index 0000000000..0d889b2787
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-890243.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// We need a SpeculativeNumberAdd with Number feedback.
+function bar(x) { return x + x; }
+bar(0.1);
+
+// We also need an indirection via an object field such
+// that only after escape analysis TurboFan can figure
+// out that the value `y` is actually a Number in the
+// safe integer range.
+function baz(y) { return {y}; }
+baz(null); baz(0);
+
+// Now we can put all of that together to get a kRepBit
+// use of a kWord64 value (on 64-bit architectures).
+function foo(o) {
+ return !baz(bar(o.x)).y;
+}
+
+assertFalse(foo({x:1}));
+assertFalse(foo({x:1}));
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo({x:1}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-891627.js b/deps/v8/test/mjsunit/regress/regress-crbug-891627.js
new file mode 100644
index 0000000000..afe4093c96
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-891627.js
@@ -0,0 +1,43 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// We need a NumberModulus, so we make sure that we have a
+// SpeculativeNumberModulus with Number feedback, and later
+// on use it with known Number inputs (via the bitwise or),
+// such that JSTypedLowering turns it into the NumberModulus.
+function bar(x) { return x % 2; }
+bar(0.1);
+
+// Check that the Word32->Float64 conversion works properly.
+(function() {
+ function foo(x) {
+ // The NumberEqual identifies 0 and -0.
+ return bar(x | -1) == 4294967295;
+ }
+
+ assertFalse(foo(1));
+ assertFalse(foo(0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1));
+ assertFalse(foo(0));
+})();
+
+// Check that the Word32->Word32 conversion works properly.
+(function() {
+ function makeFoo(y) {
+ return function foo(x) {
+ return bar(x | -1) == y;
+ }
+ }
+ makeFoo(0); // Defeat the function context specialization.
+ const foo = makeFoo(1);
+
+ assertFalse(foo(1));
+ assertFalse(foo(0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(1));
+ assertFalse(foo(0));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-892472-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-892472-1.js
new file mode 100644
index 0000000000..d6332fd399
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-892472-1.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces
+
+const a = /x/;
+a.exec = RegExp.prototype.test;
+assertThrows(() => RegExp.prototype.test.call(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-892472-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-892472-2.js
new file mode 100644
index 0000000000..0f9a2c1816
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-892472-2.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces
+
+assertThrows(_ => '' + {toString: Object.prototype.toLocaleString});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-897514.js b/deps/v8/test/mjsunit/regress/regress-crbug-897514.js
new file mode 100644
index 0000000000..822a6bcf5e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-897514.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Create transtion => 'get a'.
+let o = {};
+Object.defineProperty(o, 'a', {
+ enumerable: true,
+ configurable: true,
+ get: function() { return 7 }
+});
+
+function spread(o) {
+ let result = { ...o };
+ %HeapObjectVerify(result);
+ return result;
+}
+
+for (let i = 0; i<3; i++) {
+ spread([]);
+ // Use different transition => 'a'.
+ spread({ a:0 });
+ spread("abc");
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-899524.js b/deps/v8/test/mjsunit/regress/regress-crbug-899524.js
new file mode 100644
index 0000000000..32d28c9b09
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-899524.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function empty() { }
+
+function baz(expected, found) {
+ var start = "";
+ found.length, start + 'x';
+ if (expected.length === found.length) {
+ for (var i = 0; i < expected.length; ++i) {
+ empty(found[i]);
+ }
+ }
+}
+
+baz([1], new (class A extends Array {}));
+
+(function () {
+ "use strict";
+ function bar() {
+ baz([1,2], arguments);
+ }
+ function foo() {
+ bar(2147483648,-[]);
+ }
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-90771.js b/deps/v8/test/mjsunit/regress/regress-crbug-90771.js
new file mode 100644
index 0000000000..b541ff8cc2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-90771.js
@@ -0,0 +1,15 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Flags: --allow-natives-syntax
+function target() {};
+
+for (let key of Object.getOwnPropertyNames(this)) {
+ try {
+ let newTarget = this[key];
+ let arg = target;
+ Reflect.construct(target, arg, newTarget);
+ } catch {}
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-7682.js b/deps/v8/test/mjsunit/regress/regress-v8-7682.js
index 4f9b2e7d60..86f12f5b74 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-7682.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-7682.js
@@ -18,5 +18,9 @@ class MyArrayLike {
const xs = new MyArrayLike();
Array.prototype.sort.call(xs);
-assertEquals(1, xs[0]);
-assertEquals(2, xs[1]);
+// Sort-order is implementation-defined as we actually hit two conditions from
+// the spec:
+// - "xs" is sparse and IsExtensible(xs) is false (its frozen).
+// - "xs" is sparse and the prototype has properties in the sort range.
+assertEquals(2, xs[0]);
+assertEquals(1, xs[1]);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-801850.js b/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
index ad6ff4c432..b56af694a9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
@@ -7,5 +7,5 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
let module = new WebAssembly.Module(builder.toBuffer());
-var worker = new Worker('onmessage = function() {};');
+var worker = new Worker('onmessage = function() {};', {type: 'string'});
worker.postMessage(module)
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-803427.js b/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
index 833b140fd4..d3ab31b4c9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
@@ -9,5 +9,5 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
let module = new WebAssembly.Module(builder.toBuffer());
-var worker = new Worker('onmessage = function() {};');
+var worker = new Worker('onmessage = function() {};', {type: 'string'});
worker.postMessage(module)
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8059.js b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
index 5c421c4ee4..c30ed152f8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
@@ -31,7 +31,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
Realm.shared = { m:module, s:workerScript };
let realmScript = `
- let worker = new Worker(Realm.shared.s);
+ let worker = new Worker(Realm.shared.s, {type: 'string'});
worker.postMessage(Realm.shared.m);
let message = worker.getMessage();
worker.terminate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808012.js b/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
index 1b91f226a8..ae613ceb54 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
@@ -10,5 +10,5 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addFunction('test', kSig_i_i).addBody([kExprUnreachable]);
let module = new WebAssembly.Module(builder.toBuffer());
-var worker = new Worker('onmessage = function() {};');
+var worker = new Worker('onmessage = function() {};', {type: 'string'});
worker.postMessage(module);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
index 69423b954d..bcf8469a14 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
@@ -60,7 +60,7 @@ let worker_onmessage = function(msg) {
}
let workerScript = "onmessage = " + worker_onmessage.toString();
-let worker = new Worker(workerScript);
+let worker = new Worker(workerScript, {type: 'string'});
worker.postMessage({serialized_m1, m1_bytes});
// Wait for worker to finish.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8094.js b/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
new file mode 100644
index 0000000000..a35d583a4a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Instantiate a throwing module.
+var builder = new WasmModuleBuilder();
+builder.addException(kSig_v_v);
+builder.addFunction("propel", kSig_v_v)
+ .addBody([kExprThrow, 0])
+ .exportFunc();
+var instance = builder.instantiate();
+
+// Catch the exception.
+var exception;
+try {
+ instance.exports.propel();
+} catch (e) {
+ exception = e;
+}
+
+// Check that the exception is an instance of the correct error function and
+// that no extraneous properties exist. Setting such properties could be
+// observable by JavaScript and could break compatibility.
+assertInstanceof(exception, WebAssembly.RuntimeError);
+assertArrayEquals(["stack", "message"], Object.getOwnPropertyNames(exception));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8095.js b/deps/v8/test/mjsunit/regress/wasm/regress-8095.js
new file mode 100644
index 0000000000..66ffc0d4b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8095.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Prepare a special error object to throw.
+var error = new Error("my error");
+error.__proto__ = new Proxy(new Error(), {
+ has(target, property, receiver) {
+ assertUnreachable();
+ }
+});
+
+// Throw it through a WebAssembly module.
+var builder = new WasmModuleBuilder();
+builder.addImport('mod', 'fun', kSig_v_v);
+builder.addFunction("funnel", kSig_v_v)
+ .addBody([kExprCallFunction, 0])
+ .exportFunc();
+var instance = builder.instantiate({ mod: {fun: function() { throw error }}});
+assertThrows(instance.exports.funnel, Error);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-810973.js b/deps/v8/test/mjsunit/regress/wasm/regress-810973.js
index 5a776884ee..bd3d902772 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-810973.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-810973.js
@@ -12,7 +12,7 @@ function MjsUnitAssertionError() {
};
let __v_692 = `(function module() { "use asm";function foo(`;
const __v_693 =
-3695;
+1005;
for (let __v_695 = 0; __v_695 < __v_693; ++__v_695) {
__v_692 += `arg${__v_695},`;
}
@@ -28,5 +28,6 @@ for (let __v_697 = 0; __v_697 < __v_693; ++__v_697) {
}
__v_692 += "1.0)|0;}";
- __v_692 += "return bar})()()";
+__v_692 += "return bar})()()";
+
const __v_694 = eval(__v_692);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-810973b.js b/deps/v8/test/mjsunit/regress/wasm/regress-810973b.js
new file mode 100644
index 0000000000..227bf55fc0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-810973b.js
@@ -0,0 +1,1209 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function module() {
+ "use asm";
+ function foo(
+ a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
+ a12, a13, a14, a15, a16, a17, a18, a19, a20, a21,
+ a22, a23, a24, a25, a26, a27, a28, a29, a30, a31,
+ a32, a33, a34, a35, a36, a37, a38, a39, a40, a41,
+ a42, a43, a44, a45, a46, a47, a48, a49, a50, a51,
+ a52, a53, a54, a55, a56, a57, a58, a59, a60, a61,
+ a62, a63, a64, a65, a66, a67, a68, a69, a70, a71,
+ a72, a73, a74, a75, a76, a77, a78, a79, a80, a81,
+ a82, a83, a84, a85, a86, a87, a88, a89, a90, a91,
+ a92, a93, a94, a95, a96, a97, a98, a99, a100, a101,
+ a102, a103, a104, a105, a106, a107, a108, a109, a110,
+ a111, a112, a113, a114, a115, a116, a117, a118, a119,
+ a120, a121, a122, a123, a124, a125, a126, a127, a128,
+ a129, a130, a131, a132, a133, a134, a135, a136, a137,
+ a138, a139, a140, a141, a142, a143, a144, a145, a146,
+ a147, a148, a149, a150, a151, a152, a153, a154, a155,
+ a156, a157, a158, a159, a160, a161, a162, a163, a164,
+ a165, a166, a167, a168, a169, a170, a171, a172, a173,
+ a174, a175, a176, a177, a178, a179, a180, a181, a182,
+ a183, a184, a185, a186, a187, a188, a189, a190, a191,
+ a192, a193, a194, a195, a196, a197, a198, a199, a200,
+ a201, a202, a203, a204, a205, a206, a207, a208, a209,
+ a210, a211, a212, a213, a214, a215, a216, a217, a218,
+ a219, a220, a221, a222, a223, a224, a225, a226, a227,
+ a228, a229, a230, a231, a232, a233, a234, a235, a236,
+ a237, a238, a239, a240, a241, a242, a243, a244, a245,
+ a246, a247, a248, a249, a250, a251, a252, a253, a254,
+ a255, a256, a257, a258, a259, a260, a261, a262, a263,
+ a264, a265, a266, a267, a268, a269, a270, a271, a272,
+ a273, a274, a275, a276, a277, a278, a279, a280, a281,
+ a282, a283, a284, a285, a286, a287, a288, a289, a290,
+ a291, a292, a293, a294, a295, a296, a297, a298, a299,
+ a300, a301, a302, a303, a304, a305, a306, a307, a308,
+ a309, a310, a311, a312, a313, a314, a315, a316, a317,
+ a318, a319, a320, a321, a322, a323, a324, a325, a326,
+ a327, a328, a329, a330, a331, a332, a333, a334, a335,
+ a336, a337, a338, a339, a340, a341, a342, a343, a344,
+ a345, a346, a347, a348, a349, a350, a351, a352, a353,
+ a354, a355, a356, a357, a358, a359, a360, a361, a362,
+ a363, a364, a365, a366, a367, a368, a369, a370, a371,
+ a372, a373, a374, a375, a376, a377, a378, a379, a380,
+ a381, a382, a383, a384, a385, a386, a387, a388, a389,
+ a390, a391, a392, a393, a394, a395, a396, a397, a398,
+ a399, a400, a401, a402, a403, a404, a405, a406, a407,
+ a408, a409, a410, a411, a412, a413, a414, a415, a416,
+ a417, a418, a419, a420, a421, a422, a423, a424, a425,
+ a426, a427, a428, a429, a430, a431, a432, a433, a434,
+ a435, a436, a437, a438, a439, a440, a441, a442, a443,
+ a444, a445, a446, a447, a448, a449, a450, a451, a452,
+ a453, a454, a455, a456, a457, a458, a459, a460, a461,
+ a462, a463, a464, a465, a466, a467, a468, a469, a470,
+ a471, a472, a473, a474, a475, a476, a477, a478, a479,
+ a480, a481, a482, a483, a484, a485, a486, a487, a488,
+ a489, a490, a491, a492, a493, a494, a495, a496, a497,
+ a498, a499, a500, a501, a502, a503, a504, a505, a506,
+ a507, a508, a509, a510, a511, a512, a513, a514, a515,
+ a516, a517, a518, a519, a520, a521, a522, a523, a524,
+ a525, a526, a527, a528, a529, a530, a531, a532, a533,
+ a534, a535, a536, a537, a538, a539, a540, a541, a542,
+ a543, a544, a545, a546, a547, a548, a549, a550, a551,
+ a552, a553, a554, a555, a556, a557, a558, a559, a560,
+ a561, a562, a563, a564, a565, a566, a567, a568, a569,
+ a570, a571, a572, a573, a574, a575, a576, a577, a578,
+ a579, a580, a581, a582, a583, a584, a585, a586, a587,
+ a588, a589, a590, a591, a592, a593, a594, a595, a596,
+ a597, a598, a599, a600, a601, a602, a603, a604, a605,
+ a606, a607, a608, a609, a610, a611, a612, a613, a614,
+ a615, a616, a617, a618, a619, a620, a621, a622, a623,
+ a624, a625, a626, a627, a628, a629, a630, a631, a632,
+ a633, a634, a635, a636, a637, a638, a639, a640, a641,
+ a642, a643, a644, a645, a646, a647, a648, a649, a650,
+ a651, a652, a653, a654, a655, a656, a657, a658, a659,
+ a660, a661, a662, a663, a664, a665, a666, a667, a668,
+ a669, a670, a671, a672, a673, a674, a675, a676, a677,
+ a678, a679, a680, a681, a682, a683, a684, a685, a686,
+ a687, a688, a689, a690, a691, a692, a693, a694, a695,
+ a696, a697, a698, a699, a700, a701, a702, a703, a704,
+ a705, a706, a707, a708, a709, a710, a711, a712, a713,
+ a714, a715, a716, a717, a718, a719, a720, a721, a722,
+ a723, a724, a725, a726, a727, a728, a729, a730, a731,
+ a732, a733, a734, a735, a736, a737, a738, a739, a740,
+ a741, a742, a743, a744, a745, a746, a747, a748, a749,
+ a750, a751, a752, a753, a754, a755, a756, a757, a758,
+ a759, a760, a761, a762, a763, a764, a765, a766, a767,
+ a768, a769, a770, a771, a772, a773, a774, a775, a776,
+ a777, a778, a779, a780, a781, a782, a783, a784, a785,
+ a786, a787, a788, a789, a790, a791, a792, a793, a794,
+ a795, a796, a797, a798, a799, a800, a801, a802, a803,
+ a804, a805, a806, a807, a808, a809, a810, a811, a812,
+ a813, a814, a815, a816, a817, a818, a819, a820, a821,
+ a822, a823, a824, a825, a826, a827, a828, a829, a830,
+ a831, a832, a833, a834, a835, a836, a837, a838, a839,
+ a840, a841, a842, a843, a844, a845, a846, a847, a848,
+ a849, a850, a851, a852, a853, a854, a855, a856, a857,
+ a858, a859, a860, a861, a862, a863, a864, a865, a866,
+ a867, a868, a869, a870, a871, a872, a873, a874, a875,
+ a876, a877, a878, a879, a880, a881, a882, a883, a884,
+ a885, a886, a887, a888, a889, a890, a891, a892, a893,
+ a894, a895, a896, a897, a898, a899, a900, a901, a902,
+ a903, a904, a905, a906, a907, a908, a909, a910, a911,
+ a912, a913, a914, a915, a916, a917, a918, a919, a920,
+ a921, a922, a923, a924, a925, a926, a927, a928, a929,
+ a930, a931, a932, a933, a934, a935, a936, a937, a938,
+ a939, a940, a941, a942, a943, a944, a945, a946, a947,
+ a948, a949, a950, a951, a952, a953, a954, a955, a956,
+ a957, a958, a959, a960, a961, a962, a963, a964, a965,
+ a966, a967, a968, a969, a970, a971, a972, a973, a974,
+ a975, a976, a977, a978, a979, a980, a981, a982, a983,
+ a984, a985, a986, a987, a988, a989, a990, a991, a992,
+ a993, a994, a995, a996, a997, a998, a999, a1000, a1001,
+ a1002, a1003, a1004, a1005) {
+ a0 = +a0;
+ a1 = +a1;
+ a2 = +a2;
+ a3 = +a3;
+ a4 = +a4;
+ a5 = +a5;
+ a6 = +a6;
+ a7 = +a7;
+ a8 = +a8;
+ a9 = +a9;
+ a10 = +a10;
+ a11 = +a11;
+ a12 = +a12;
+ a13 = +a13;
+ a14 = +a14;
+ a15 = +a15;
+ a16 = +a16;
+ a17 = +a17;
+ a18 = +a18;
+ a19 = +a19;
+ a20 = +a20;
+ a21 = +a21;
+ a22 = +a22;
+ a23 = +a23;
+ a24 = +a24;
+ a25 = +a25;
+ a26 = +a26;
+ a27 = +a27;
+ a28 = +a28;
+ a29 = +a29;
+ a30 = +a30;
+ a31 = +a31;
+ a32 = +a32;
+ a33 = +a33;
+ a34 = +a34;
+ a35 = +a35;
+ a36 = +a36;
+ a37 = +a37;
+ a38 = +a38;
+ a39 = +a39;
+ a40 = +a40;
+ a41 = +a41;
+ a42 = +a42;
+ a43 = +a43;
+ a44 = +a44;
+ a45 = +a45;
+ a46 = +a46;
+ a47 = +a47;
+ a48 = +a48;
+ a49 = +a49;
+ a50 = +a50;
+ a51 = +a51;
+ a52 = +a52;
+ a53 = +a53;
+ a54 = +a54;
+ a55 = +a55;
+ a56 = +a56;
+ a57 = +a57;
+ a58 = +a58;
+ a59 = +a59;
+ a60 = +a60;
+ a61 = +a61;
+ a62 = +a62;
+ a63 = +a63;
+ a64 = +a64;
+ a65 = +a65;
+ a66 = +a66;
+ a67 = +a67;
+ a68 = +a68;
+ a69 = +a69;
+ a70 = +a70;
+ a71 = +a71;
+ a72 = +a72;
+ a73 = +a73;
+ a74 = +a74;
+ a75 = +a75;
+ a76 = +a76;
+ a77 = +a77;
+ a78 = +a78;
+ a79 = +a79;
+ a80 = +a80;
+ a81 = +a81;
+ a82 = +a82;
+ a83 = +a83;
+ a84 = +a84;
+ a85 = +a85;
+ a86 = +a86;
+ a87 = +a87;
+ a88 = +a88;
+ a89 = +a89;
+ a90 = +a90;
+ a91 = +a91;
+ a92 = +a92;
+ a93 = +a93;
+ a94 = +a94;
+ a95 = +a95;
+ a96 = +a96;
+ a97 = +a97;
+ a98 = +a98;
+ a99 = +a99;
+ a100 = +a100;
+ a101 = +a101;
+ a102 = +a102;
+ a103 = +a103;
+ a104 = +a104;
+ a105 = +a105;
+ a106 = +a106;
+ a107 = +a107;
+ a108 = +a108;
+ a109 = +a109;
+ a110 = +a110;
+ a111 = +a111;
+ a112 = +a112;
+ a113 = +a113;
+ a114 = +a114;
+ a115 = +a115;
+ a116 = +a116;
+ a117 = +a117;
+ a118 = +a118;
+ a119 = +a119;
+ a120 = +a120;
+ a121 = +a121;
+ a122 = +a122;
+ a123 = +a123;
+ a124 = +a124;
+ a125 = +a125;
+ a126 = +a126;
+ a127 = +a127;
+ a128 = +a128;
+ a129 = +a129;
+ a130 = +a130;
+ a131 = +a131;
+ a132 = +a132;
+ a133 = +a133;
+ a134 = +a134;
+ a135 = +a135;
+ a136 = +a136;
+ a137 = +a137;
+ a138 = +a138;
+ a139 = +a139;
+ a140 = +a140;
+ a141 = +a141;
+ a142 = +a142;
+ a143 = +a143;
+ a144 = +a144;
+ a145 = +a145;
+ a146 = +a146;
+ a147 = +a147;
+ a148 = +a148;
+ a149 = +a149;
+ a150 = +a150;
+ a151 = +a151;
+ a152 = +a152;
+ a153 = +a153;
+ a154 = +a154;
+ a155 = +a155;
+ a156 = +a156;
+ a157 = +a157;
+ a158 = +a158;
+ a159 = +a159;
+ a160 = +a160;
+ a161 = +a161;
+ a162 = +a162;
+ a163 = +a163;
+ a164 = +a164;
+ a165 = +a165;
+ a166 = +a166;
+ a167 = +a167;
+ a168 = +a168;
+ a169 = +a169;
+ a170 = +a170;
+ a171 = +a171;
+ a172 = +a172;
+ a173 = +a173;
+ a174 = +a174;
+ a175 = +a175;
+ a176 = +a176;
+ a177 = +a177;
+ a178 = +a178;
+ a179 = +a179;
+ a180 = +a180;
+ a181 = +a181;
+ a182 = +a182;
+ a183 = +a183;
+ a184 = +a184;
+ a185 = +a185;
+ a186 = +a186;
+ a187 = +a187;
+ a188 = +a188;
+ a189 = +a189;
+ a190 = +a190;
+ a191 = +a191;
+ a192 = +a192;
+ a193 = +a193;
+ a194 = +a194;
+ a195 = +a195;
+ a196 = +a196;
+ a197 = +a197;
+ a198 = +a198;
+ a199 = +a199;
+ a200 = +a200;
+ a201 = +a201;
+ a202 = +a202;
+ a203 = +a203;
+ a204 = +a204;
+ a205 = +a205;
+ a206 = +a206;
+ a207 = +a207;
+ a208 = +a208;
+ a209 = +a209;
+ a210 = +a210;
+ a211 = +a211;
+ a212 = +a212;
+ a213 = +a213;
+ a214 = +a214;
+ a215 = +a215;
+ a216 = +a216;
+ a217 = +a217;
+ a218 = +a218;
+ a219 = +a219;
+ a220 = +a220;
+ a221 = +a221;
+ a222 = +a222;
+ a223 = +a223;
+ a224 = +a224;
+ a225 = +a225;
+ a226 = +a226;
+ a227 = +a227;
+ a228 = +a228;
+ a229 = +a229;
+ a230 = +a230;
+ a231 = +a231;
+ a232 = +a232;
+ a233 = +a233;
+ a234 = +a234;
+ a235 = +a235;
+ a236 = +a236;
+ a237 = +a237;
+ a238 = +a238;
+ a239 = +a239;
+ a240 = +a240;
+ a241 = +a241;
+ a242 = +a242;
+ a243 = +a243;
+ a244 = +a244;
+ a245 = +a245;
+ a246 = +a246;
+ a247 = +a247;
+ a248 = +a248;
+ a249 = +a249;
+ a250 = +a250;
+ a251 = +a251;
+ a252 = +a252;
+ a253 = +a253;
+ a254 = +a254;
+ a255 = +a255;
+ a256 = +a256;
+ a257 = +a257;
+ a258 = +a258;
+ a259 = +a259;
+ a260 = +a260;
+ a261 = +a261;
+ a262 = +a262;
+ a263 = +a263;
+ a264 = +a264;
+ a265 = +a265;
+ a266 = +a266;
+ a267 = +a267;
+ a268 = +a268;
+ a269 = +a269;
+ a270 = +a270;
+ a271 = +a271;
+ a272 = +a272;
+ a273 = +a273;
+ a274 = +a274;
+ a275 = +a275;
+ a276 = +a276;
+ a277 = +a277;
+ a278 = +a278;
+ a279 = +a279;
+ a280 = +a280;
+ a281 = +a281;
+ a282 = +a282;
+ a283 = +a283;
+ a284 = +a284;
+ a285 = +a285;
+ a286 = +a286;
+ a287 = +a287;
+ a288 = +a288;
+ a289 = +a289;
+ a290 = +a290;
+ a291 = +a291;
+ a292 = +a292;
+ a293 = +a293;
+ a294 = +a294;
+ a295 = +a295;
+ a296 = +a296;
+ a297 = +a297;
+ a298 = +a298;
+ a299 = +a299;
+ a300 = +a300;
+ a301 = +a301;
+ a302 = +a302;
+ a303 = +a303;
+ a304 = +a304;
+ a305 = +a305;
+ a306 = +a306;
+ a307 = +a307;
+ a308 = +a308;
+ a309 = +a309;
+ a310 = +a310;
+ a311 = +a311;
+ a312 = +a312;
+ a313 = +a313;
+ a314 = +a314;
+ a315 = +a315;
+ a316 = +a316;
+ a317 = +a317;
+ a318 = +a318;
+ a319 = +a319;
+ a320 = +a320;
+ a321 = +a321;
+ a322 = +a322;
+ a323 = +a323;
+ a324 = +a324;
+ a325 = +a325;
+ a326 = +a326;
+ a327 = +a327;
+ a328 = +a328;
+ a329 = +a329;
+ a330 = +a330;
+ a331 = +a331;
+ a332 = +a332;
+ a333 = +a333;
+ a334 = +a334;
+ a335 = +a335;
+ a336 = +a336;
+ a337 = +a337;
+ a338 = +a338;
+ a339 = +a339;
+ a340 = +a340;
+ a341 = +a341;
+ a342 = +a342;
+ a343 = +a343;
+ a344 = +a344;
+ a345 = +a345;
+ a346 = +a346;
+ a347 = +a347;
+ a348 = +a348;
+ a349 = +a349;
+ a350 = +a350;
+ a351 = +a351;
+ a352 = +a352;
+ a353 = +a353;
+ a354 = +a354;
+ a355 = +a355;
+ a356 = +a356;
+ a357 = +a357;
+ a358 = +a358;
+ a359 = +a359;
+ a360 = +a360;
+ a361 = +a361;
+ a362 = +a362;
+ a363 = +a363;
+ a364 = +a364;
+ a365 = +a365;
+ a366 = +a366;
+ a367 = +a367;
+ a368 = +a368;
+ a369 = +a369;
+ a370 = +a370;
+ a371 = +a371;
+ a372 = +a372;
+ a373 = +a373;
+ a374 = +a374;
+ a375 = +a375;
+ a376 = +a376;
+ a377 = +a377;
+ a378 = +a378;
+ a379 = +a379;
+ a380 = +a380;
+ a381 = +a381;
+ a382 = +a382;
+ a383 = +a383;
+ a384 = +a384;
+ a385 = +a385;
+ a386 = +a386;
+ a387 = +a387;
+ a388 = +a388;
+ a389 = +a389;
+ a390 = +a390;
+ a391 = +a391;
+ a392 = +a392;
+ a393 = +a393;
+ a394 = +a394;
+ a395 = +a395;
+ a396 = +a396;
+ a397 = +a397;
+ a398 = +a398;
+ a399 = +a399;
+ a400 = +a400;
+ a401 = +a401;
+ a402 = +a402;
+ a403 = +a403;
+ a404 = +a404;
+ a405 = +a405;
+ a406 = +a406;
+ a407 = +a407;
+ a408 = +a408;
+ a409 = +a409;
+ a410 = +a410;
+ a411 = +a411;
+ a412 = +a412;
+ a413 = +a413;
+ a414 = +a414;
+ a415 = +a415;
+ a416 = +a416;
+ a417 = +a417;
+ a418 = +a418;
+ a419 = +a419;
+ a420 = +a420;
+ a421 = +a421;
+ a422 = +a422;
+ a423 = +a423;
+ a424 = +a424;
+ a425 = +a425;
+ a426 = +a426;
+ a427 = +a427;
+ a428 = +a428;
+ a429 = +a429;
+ a430 = +a430;
+ a431 = +a431;
+ a432 = +a432;
+ a433 = +a433;
+ a434 = +a434;
+ a435 = +a435;
+ a436 = +a436;
+ a437 = +a437;
+ a438 = +a438;
+ a439 = +a439;
+ a440 = +a440;
+ a441 = +a441;
+ a442 = +a442;
+ a443 = +a443;
+ a444 = +a444;
+ a445 = +a445;
+ a446 = +a446;
+ a447 = +a447;
+ a448 = +a448;
+ a449 = +a449;
+ a450 = +a450;
+ a451 = +a451;
+ a452 = +a452;
+ a453 = +a453;
+ a454 = +a454;
+ a455 = +a455;
+ a456 = +a456;
+ a457 = +a457;
+ a458 = +a458;
+ a459 = +a459;
+ a460 = +a460;
+ a461 = +a461;
+ a462 = +a462;
+ a463 = +a463;
+ a464 = +a464;
+ a465 = +a465;
+ a466 = +a466;
+ a467 = +a467;
+ a468 = +a468;
+ a469 = +a469;
+ a470 = +a470;
+ a471 = +a471;
+ a472 = +a472;
+ a473 = +a473;
+ a474 = +a474;
+ a475 = +a475;
+ a476 = +a476;
+ a477 = +a477;
+ a478 = +a478;
+ a479 = +a479;
+ a480 = +a480;
+ a481 = +a481;
+ a482 = +a482;
+ a483 = +a483;
+ a484 = +a484;
+ a485 = +a485;
+ a486 = +a486;
+ a487 = +a487;
+ a488 = +a488;
+ a489 = +a489;
+ a490 = +a490;
+ a491 = +a491;
+ a492 = +a492;
+ a493 = +a493;
+ a494 = +a494;
+ a495 = +a495;
+ a496 = +a496;
+ a497 = +a497;
+ a498 = +a498;
+ a499 = +a499;
+ a500 = +a500;
+ a501 = +a501;
+ a502 = +a502;
+ a503 = +a503;
+ a504 = +a504;
+ a505 = +a505;
+ a506 = +a506;
+ a507 = +a507;
+ a508 = +a508;
+ a509 = +a509;
+ a510 = +a510;
+ a511 = +a511;
+ a512 = +a512;
+ a513 = +a513;
+ a514 = +a514;
+ a515 = +a515;
+ a516 = +a516;
+ a517 = +a517;
+ a518 = +a518;
+ a519 = +a519;
+ a520 = +a520;
+ a521 = +a521;
+ a522 = +a522;
+ a523 = +a523;
+ a524 = +a524;
+ a525 = +a525;
+ a526 = +a526;
+ a527 = +a527;
+ a528 = +a528;
+ a529 = +a529;
+ a530 = +a530;
+ a531 = +a531;
+ a532 = +a532;
+ a533 = +a533;
+ a534 = +a534;
+ a535 = +a535;
+ a536 = +a536;
+ a537 = +a537;
+ a538 = +a538;
+ a539 = +a539;
+ a540 = +a540;
+ a541 = +a541;
+ a542 = +a542;
+ a543 = +a543;
+ a544 = +a544;
+ a545 = +a545;
+ a546 = +a546;
+ a547 = +a547;
+ a548 = +a548;
+ a549 = +a549;
+ a550 = +a550;
+ a551 = +a551;
+ a552 = +a552;
+ a553 = +a553;
+ a554 = +a554;
+ a555 = +a555;
+ a556 = +a556;
+ a557 = +a557;
+ a558 = +a558;
+ a559 = +a559;
+ a560 = +a560;
+ a561 = +a561;
+ a562 = +a562;
+ a563 = +a563;
+ a564 = +a564;
+ a565 = +a565;
+ a566 = +a566;
+ a567 = +a567;
+ a568 = +a568;
+ a569 = +a569;
+ a570 = +a570;
+ a571 = +a571;
+ a572 = +a572;
+ a573 = +a573;
+ a574 = +a574;
+ a575 = +a575;
+ a576 = +a576;
+ a577 = +a577;
+ a578 = +a578;
+ a579 = +a579;
+ a580 = +a580;
+ a581 = +a581;
+ a582 = +a582;
+ a583 = +a583;
+ a584 = +a584;
+ a585 = +a585;
+ a586 = +a586;
+ a587 = +a587;
+ a588 = +a588;
+ a589 = +a589;
+ a590 = +a590;
+ a591 = +a591;
+ a592 = +a592;
+ a593 = +a593;
+ a594 = +a594;
+ a595 = +a595;
+ a596 = +a596;
+ a597 = +a597;
+ a598 = +a598;
+ a599 = +a599;
+ a600 = +a600;
+ a601 = +a601;
+ a602 = +a602;
+ a603 = +a603;
+ a604 = +a604;
+ a605 = +a605;
+ a606 = +a606;
+ a607 = +a607;
+ a608 = +a608;
+ a609 = +a609;
+ a610 = +a610;
+ a611 = +a611;
+ a612 = +a612;
+ a613 = +a613;
+ a614 = +a614;
+ a615 = +a615;
+ a616 = +a616;
+ a617 = +a617;
+ a618 = +a618;
+ a619 = +a619;
+ a620 = +a620;
+ a621 = +a621;
+ a622 = +a622;
+ a623 = +a623;
+ a624 = +a624;
+ a625 = +a625;
+ a626 = +a626;
+ a627 = +a627;
+ a628 = +a628;
+ a629 = +a629;
+ a630 = +a630;
+ a631 = +a631;
+ a632 = +a632;
+ a633 = +a633;
+ a634 = +a634;
+ a635 = +a635;
+ a636 = +a636;
+ a637 = +a637;
+ a638 = +a638;
+ a639 = +a639;
+ a640 = +a640;
+ a641 = +a641;
+ a642 = +a642;
+ a643 = +a643;
+ a644 = +a644;
+ a645 = +a645;
+ a646 = +a646;
+ a647 = +a647;
+ a648 = +a648;
+ a649 = +a649;
+ a650 = +a650;
+ a651 = +a651;
+ a652 = +a652;
+ a653 = +a653;
+ a654 = +a654;
+ a655 = +a655;
+ a656 = +a656;
+ a657 = +a657;
+ a658 = +a658;
+ a659 = +a659;
+ a660 = +a660;
+ a661 = +a661;
+ a662 = +a662;
+ a663 = +a663;
+ a664 = +a664;
+ a665 = +a665;
+ a666 = +a666;
+ a667 = +a667;
+ a668 = +a668;
+ a669 = +a669;
+ a670 = +a670;
+ a671 = +a671;
+ a672 = +a672;
+ a673 = +a673;
+ a674 = +a674;
+ a675 = +a675;
+ a676 = +a676;
+ a677 = +a677;
+ a678 = +a678;
+ a679 = +a679;
+ a680 = +a680;
+ a681 = +a681;
+ a682 = +a682;
+ a683 = +a683;
+ a684 = +a684;
+ a685 = +a685;
+ a686 = +a686;
+ a687 = +a687;
+ a688 = +a688;
+ a689 = +a689;
+ a690 = +a690;
+ a691 = +a691;
+ a692 = +a692;
+ a693 = +a693;
+ a694 = +a694;
+ a695 = +a695;
+ a696 = +a696;
+ a697 = +a697;
+ a698 = +a698;
+ a699 = +a699;
+ a700 = +a700;
+ a701 = +a701;
+ a702 = +a702;
+ a703 = +a703;
+ a704 = +a704;
+ a705 = +a705;
+ a706 = +a706;
+ a707 = +a707;
+ a708 = +a708;
+ a709 = +a709;
+ a710 = +a710;
+ a711 = +a711;
+ a712 = +a712;
+ a713 = +a713;
+ a714 = +a714;
+ a715 = +a715;
+ a716 = +a716;
+ a717 = +a717;
+ a718 = +a718;
+ a719 = +a719;
+ a720 = +a720;
+ a721 = +a721;
+ a722 = +a722;
+ a723 = +a723;
+ a724 = +a724;
+ a725 = +a725;
+ a726 = +a726;
+ a727 = +a727;
+ a728 = +a728;
+ a729 = +a729;
+ a730 = +a730;
+ a731 = +a731;
+ a732 = +a732;
+ a733 = +a733;
+ a734 = +a734;
+ a735 = +a735;
+ a736 = +a736;
+ a737 = +a737;
+ a738 = +a738;
+ a739 = +a739;
+ a740 = +a740;
+ a741 = +a741;
+ a742 = +a742;
+ a743 = +a743;
+ a744 = +a744;
+ a745 = +a745;
+ a746 = +a746;
+ a747 = +a747;
+ a748 = +a748;
+ a749 = +a749;
+ a750 = +a750;
+ a751 = +a751;
+ a752 = +a752;
+ a753 = +a753;
+ a754 = +a754;
+ a755 = +a755;
+ a756 = +a756;
+ a757 = +a757;
+ a758 = +a758;
+ a759 = +a759;
+ a760 = +a760;
+ a761 = +a761;
+ a762 = +a762;
+ a763 = +a763;
+ a764 = +a764;
+ a765 = +a765;
+ a766 = +a766;
+ a767 = +a767;
+ a768 = +a768;
+ a769 = +a769;
+ a770 = +a770;
+ a771 = +a771;
+ a772 = +a772;
+ a773 = +a773;
+ a774 = +a774;
+ a775 = +a775;
+ a776 = +a776;
+ a777 = +a777;
+ a778 = +a778;
+ a779 = +a779;
+ a780 = +a780;
+ a781 = +a781;
+ a782 = +a782;
+ a783 = +a783;
+ a784 = +a784;
+ a785 = +a785;
+ a786 = +a786;
+ a787 = +a787;
+ a788 = +a788;
+ a789 = +a789;
+ a790 = +a790;
+ a791 = +a791;
+ a792 = +a792;
+ a793 = +a793;
+ a794 = +a794;
+ a795 = +a795;
+ a796 = +a796;
+ a797 = +a797;
+ a798 = +a798;
+ a799 = +a799;
+ a800 = +a800;
+ a801 = +a801;
+ a802 = +a802;
+ a803 = +a803;
+ a804 = +a804;
+ a805 = +a805;
+ a806 = +a806;
+ a807 = +a807;
+ a808 = +a808;
+ a809 = +a809;
+ a810 = +a810;
+ a811 = +a811;
+ a812 = +a812;
+ a813 = +a813;
+ a814 = +a814;
+ a815 = +a815;
+ a816 = +a816;
+ a817 = +a817;
+ a818 = +a818;
+ a819 = +a819;
+ a820 = +a820;
+ a821 = +a821;
+ a822 = +a822;
+ a823 = +a823;
+ a824 = +a824;
+ a825 = +a825;
+ a826 = +a826;
+ a827 = +a827;
+ a828 = +a828;
+ a829 = +a829;
+ a830 = +a830;
+ a831 = +a831;
+ a832 = +a832;
+ a833 = +a833;
+ a834 = +a834;
+ a835 = +a835;
+ a836 = +a836;
+ a837 = +a837;
+ a838 = +a838;
+ a839 = +a839;
+ a840 = +a840;
+ a841 = +a841;
+ a842 = +a842;
+ a843 = +a843;
+ a844 = +a844;
+ a845 = +a845;
+ a846 = +a846;
+ a847 = +a847;
+ a848 = +a848;
+ a849 = +a849;
+ a850 = +a850;
+ a851 = +a851;
+ a852 = +a852;
+ a853 = +a853;
+ a854 = +a854;
+ a855 = +a855;
+ a856 = +a856;
+ a857 = +a857;
+ a858 = +a858;
+ a859 = +a859;
+ a860 = +a860;
+ a861 = +a861;
+ a862 = +a862;
+ a863 = +a863;
+ a864 = +a864;
+ a865 = +a865;
+ a866 = +a866;
+ a867 = +a867;
+ a868 = +a868;
+ a869 = +a869;
+ a870 = +a870;
+ a871 = +a871;
+ a872 = +a872;
+ a873 = +a873;
+ a874 = +a874;
+ a875 = +a875;
+ a876 = +a876;
+ a877 = +a877;
+ a878 = +a878;
+ a879 = +a879;
+ a880 = +a880;
+ a881 = +a881;
+ a882 = +a882;
+ a883 = +a883;
+ a884 = +a884;
+ a885 = +a885;
+ a886 = +a886;
+ a887 = +a887;
+ a888 = +a888;
+ a889 = +a889;
+ a890 = +a890;
+ a891 = +a891;
+ a892 = +a892;
+ a893 = +a893;
+ a894 = +a894;
+ a895 = +a895;
+ a896 = +a896;
+ a897 = +a897;
+ a898 = +a898;
+ a899 = +a899;
+ a900 = +a900;
+ a901 = +a901;
+ a902 = +a902;
+ a903 = +a903;
+ a904 = +a904;
+ a905 = +a905;
+ a906 = +a906;
+ a907 = +a907;
+ a908 = +a908;
+ a909 = +a909;
+ a910 = +a910;
+ a911 = +a911;
+ a912 = +a912;
+ a913 = +a913;
+ a914 = +a914;
+ a915 = +a915;
+ a916 = +a916;
+ a917 = +a917;
+ a918 = +a918;
+ a919 = +a919;
+ a920 = +a920;
+ a921 = +a921;
+ a922 = +a922;
+ a923 = +a923;
+ a924 = +a924;
+ a925 = +a925;
+ a926 = +a926;
+ a927 = +a927;
+ a928 = +a928;
+ a929 = +a929;
+ a930 = +a930;
+ a931 = +a931;
+ a932 = +a932;
+ a933 = +a933;
+ a934 = +a934;
+ a935 = +a935;
+ a936 = +a936;
+ a937 = +a937;
+ a938 = +a938;
+ a939 = +a939;
+ a940 = +a940;
+ a941 = +a941;
+ a942 = +a942;
+ a943 = +a943;
+ a944 = +a944;
+ a945 = +a945;
+ a946 = +a946;
+ a947 = +a947;
+ a948 = +a948;
+ a949 = +a949;
+ a950 = +a950;
+ a951 = +a951;
+ a952 = +a952;
+ a953 = +a953;
+ a954 = +a954;
+ a955 = +a955;
+ a956 = +a956;
+ a957 = +a957;
+ a958 = +a958;
+ a959 = +a959;
+ a960 = +a960;
+ a961 = +a961;
+ a962 = +a962;
+ a963 = +a963;
+ a964 = +a964;
+ a965 = +a965;
+ a966 = +a966;
+ a967 = +a967;
+ a968 = +a968;
+ a969 = +a969;
+ a970 = +a970;
+ a971 = +a971;
+ a972 = +a972;
+ a973 = +a973;
+ a974 = +a974;
+ a975 = +a975;
+ a976 = +a976;
+ a977 = +a977;
+ a978 = +a978;
+ a979 = +a979;
+ a980 = +a980;
+ a981 = +a981;
+ a982 = +a982;
+ a983 = +a983;
+ a984 = +a984;
+ a985 = +a985;
+ a986 = +a986;
+ a987 = +a987;
+ a988 = +a988;
+ a989 = +a989;
+ a990 = +a990;
+ a991 = +a991;
+ a992 = +a992;
+ a993 = +a993;
+ a994 = +a994;
+ a995 = +a995;
+ a996 = +a996;
+ a997 = +a997;
+ a998 = +a998;
+ a999 = +a999;
+ a1000 = +a1000;
+ a1001 = +a1001;
+ a1002 = +a1002;
+ a1003 = +a1003;
+ a1004 = +a1004;
+ a1005 = +a1005;
+ return 10;
+ }
+ function bar() {
+ return foo(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 1.0) |
+ 0;
+ }
+ return bar
+})()();
diff --git a/deps/v8/test/mjsunit/samevalue.js b/deps/v8/test/mjsunit/samevalue.js
index 356e888016..1e5384d73d 100644
--- a/deps/v8/test/mjsunit/samevalue.js
+++ b/deps/v8/test/mjsunit/samevalue.js
@@ -32,8 +32,11 @@
var obj1 = {x: 10, y: 11, z: "test"};
var obj2 = {x: 10, y: 11, z: "test"};
+// Object.is() uses the SameValue algorithm.
var sameValue = Object.is;
-var sameValueZero = function(x, y) { return %SameValueZero(x, y); }
+
+// Set#has() uses the SameValueZero algorithm.
+var sameValueZero = (x, y) => new Set([x]).has(y);
// Calls SameValue and SameValueZero and checks that their results match.
function sameValueBoth(a, b) {
diff --git a/deps/v8/test/mjsunit/stack-traces-class-fields.js b/deps/v8/test/mjsunit/stack-traces-class-fields.js
new file mode 100644
index 0000000000..84d7e8a843
--- /dev/null
+++ b/deps/v8/test/mjsunit/stack-traces-class-fields.js
@@ -0,0 +1,246 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-class-fields
+
+// Utility function for testing that the expected strings occur
+// in the stack trace produced when running the given function.
+function testTrace(name, fun, expected, unexpected) {
+ var threw = false;
+ try {
+ fun();
+ } catch (e) {
+ for (var i = 0; i < expected.length; i++) {
+ assertTrue(
+ e.stack.indexOf(expected[i]) != -1,
+ name + " doesn't contain expected[" + i + "] stack = " + e.stack
+ );
+ }
+ if (unexpected) {
+ for (var i = 0; i < unexpected.length; i++) {
+ assertEquals(
+ e.stack.indexOf(unexpected[i]),
+ -1,
+ name + " contains unexpected[" + i + "]"
+ );
+ }
+ }
+ threw = true;
+ }
+ assertTrue(threw, name + " didn't throw");
+}
+
+function thrower() {
+ FAIL;
+}
+
+function testClassConstruction() {
+ class X {
+ static x = thrower();
+ }
+}
+
+// ReferenceError: FAIL is not defined
+// at thrower
+// at <static_fields_initializer>
+// at testClassConstruction
+// at testTrace
+testTrace(
+ "during class construction",
+ testClassConstruction,
+ ["thrower", "<static_fields_initializer>"],
+ ["anonymous"]
+);
+
+function testClassConstruction2() {
+ class X {
+ [thrower()];
+ }
+}
+
+// ReferenceError: FAIL is not defined
+// at thrower
+// at testClassConstruction2
+// at testTrace
+testTrace("during class construction2", testClassConstruction2, ["thrower"]);
+
+function testClassInstantiation() {
+ class X {
+ x = thrower();
+ }
+
+ new X();
+}
+
+// ReferenceError: FAIL is not defined
+// at thrower
+// at X.<instance_fields_initializer>
+// at new X
+// at testClassInstantiation
+// at testTrace
+testTrace(
+ "during class instantiation",
+ testClassInstantiation,
+ ["thrower", "X.<instance_fields_initializer>", "new X"],
+ ["anonymous"]
+);
+
+function testClassInstantiationWithSuper() {
+ class Base {}
+
+ class X extends Base {
+ x = thrower();
+ }
+
+ new X();
+}
+
+// ReferenceError: FAIL is not defined
+// at thrower
+// at X.<instance_fields_initializer>
+// at new X
+// at testClassInstantiation
+// at testTrace
+testTrace(
+ "during class instantiation with super",
+ testClassInstantiationWithSuper,
+ ["thrower", "X.<instance_fields_initializer>", "new X"],
+ ["Base", "anonymous"]
+);
+
+function testClassInstantiationWithSuper2() {
+ class Base {}
+
+ class X extends Base {
+ constructor() {
+ super();
+ }
+ x = thrower();
+ }
+
+ new X();
+}
+
+// ReferenceError: FAIL is not defined
+// at thrower
+// at X.<instance_fields_initializer>
+// at new X
+// at testClassInstantiation
+// at testTrace
+testTrace(
+ "during class instantiation with super2",
+ testClassInstantiationWithSuper2,
+ ["thrower", "X.<instance_fields_initializer>", "new X"],
+ ["Base", "anonymous"]
+);
+
+function testClassInstantiationWithSuper3() {
+ class Base {
+ x = thrower();
+ }
+
+ class X extends Base {
+ constructor() {
+ super();
+ }
+ }
+
+ new X();
+}
+
+// ReferenceError: FAIL is not defined
+// at thrower
+// at X.<instance_fields_initializer>
+// at new Base
+// at new X
+// at testClassInstantiationWithSuper3
+// at testTrace
+testTrace(
+ "during class instantiation with super3",
+ testClassInstantiationWithSuper3,
+ ["thrower", "X.<instance_fields_initializer>", "new Base", "new X"],
+ ["anonymous"]
+);
+
+function testClassFieldCall() {
+ class X {
+ x = thrower;
+ }
+
+ let x = new X();
+ x.x();
+}
+
+// ReferenceError: FAIL is not defined
+// at X.thrower [as x]
+// at testClassFieldCall
+// at testTrace
+testTrace(
+ "during class field call",
+ testClassFieldCall,
+ ["X.thrower"],
+ ["anonymous"]
+);
+
+function testStaticClassFieldCall() {
+ class X {
+ static x = thrower;
+ }
+
+ X.x();
+}
+
+// ReferenceError: FAIL is not defined
+// at Function.thrower [as x]
+// at testStaticClassFieldCall
+// at testTrace
+testTrace(
+ "during static class field call",
+ testStaticClassFieldCall,
+ ["Function.thrower"],
+ ["anonymous"]
+);
+
+function testClassFieldCallWithFNI() {
+ class X {
+ x = function() {
+ FAIL;
+ };
+ }
+
+ let x = new X();
+ x.x();
+}
+
+// ReferenceError: FAIL is not defined
+// at X.x
+// at testClassFieldCallWithFNI
+// at testTrace
+testTrace(
+ "during class field call with FNI",
+ testClassFieldCallWithFNI,
+ ["X.x"],
+ ["anonymous"]
+);
+
+function testStaticClassFieldCallWithFNI() {
+ class X {
+ static x = function() {
+ FAIL;
+ };
+ }
+
+ X.x();
+}
+
+// ReferenceError: FAIL is not defined
+// at Function.x
+// at testStaticClassFieldCallWithFNI
+// at testTrace
+testTrace(
+ "during static class field call with FNI",
+ testStaticClassFieldCallWithFNI,
+ ["Function.x"],
+ ["anonymous"]
+);
diff --git a/deps/v8/test/mjsunit/string-trim.js b/deps/v8/test/mjsunit/string-trim.js
index 201a34f1c9..587e7db5db 100644
--- a/deps/v8/test/mjsunit/string-trim.js
+++ b/deps/v8/test/mjsunit/string-trim.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-string-trimming
-
assertEquals('trim', String.prototype.trim.name);
assertEquals('trimStart', String.prototype.trimStart.name);
assertEquals('trimStart', String.prototype.trimLeft.name);
diff --git a/deps/v8/test/mjsunit/test-async.js b/deps/v8/test/mjsunit/test-async.js
index f8a11c5238..d4fee9bfb9 100644
--- a/deps/v8/test/mjsunit/test-async.js
+++ b/deps/v8/test/mjsunit/test-async.js
@@ -53,7 +53,7 @@ var testAsync;
equals(expected, found, name_opt) {
this.actualAsserts_++;
- if (expected !== found) {
+ if (!deepEquals(expected, found)) {
this.fail(prettyPrinted(expected), found, name_opt);
}
}
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index d843cfaf02..422210365e 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -41,21 +41,6 @@ SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
NO_HARNESS_PATTERN = re.compile(r"^// NO HARNESS$", flags=re.MULTILINE)
-# Patterns for additional resource files on Android. Files that are not covered
-# by one of the other patterns below will be specified in the resources section.
-RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
-# Pattern to auto-detect files to push on Android for statements like:
-# load("path/to/file.js")
-LOAD_PATTERN = re.compile(
- r"(?:load|readbuffer|read)\((?:'|\")([^'\"]*)(?:'|\")\)")
-# Pattern to auto-detect files to push on Android for statements like:
-# import "path/to/file.js"
-MODULE_RESOURCES_PATTERN_1 = re.compile(
- r"(?:import|export)(?:\(| )(?:'|\")([^'\"]*)(?:'|\")")
-# Pattern to auto-detect files to push on Android for statements like:
-# import foobar from "path/to/file.js"
-MODULE_RESOURCES_PATTERN_2 = re.compile(
- r"(?:import|export).*from (?:'|\")([^'\"]*)(?:'|\")")
# Flags known to misbehave when combining arbitrary mjsunit tests. Can also
# be compiled regular expressions.
@@ -100,7 +85,7 @@ class TestSuite(testsuite.TestSuite):
return SuppressedTestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@@ -140,47 +125,6 @@ class TestCase(testcase.TestCase):
self._files_suffix = files_suffix
self._env = self._parse_source_env(source)
- def _get_resources_for_file(self, file):
- """Returns for a given file a list of absolute paths of files needed by the
- given file.
- """
- with open(file) as f:
- source = f.read()
- result = []
- def add_path(path):
- result.append(os.path.abspath(path.replace('/', os.path.sep)))
- for match in RESOURCES_PATTERN.finditer(source):
- # There are several resources per line. Relative to base dir.
- for path in match.group(1).strip().split():
- add_path(path)
- for match in LOAD_PATTERN.finditer(source):
- # Files in load statements are relative to base dir.
- add_path(match.group(1))
- for match in MODULE_RESOURCES_PATTERN_1.finditer(source):
- # Imported files are side by side with the test case.
- add_path(os.path.join(
- self.suite.root, os.path.dirname(self.path), match.group(1)))
- for match in MODULE_RESOURCES_PATTERN_2.finditer(source):
- # Imported files are side by side with the test case.
- add_path(os.path.join(
- self.suite.root, os.path.dirname(self.path), match.group(1)))
- return result
-
- def _get_resources(self):
- """Returns the list of files needed by a test case."""
- result = set()
- to_check = [self._get_source_path()]
- # Recurse over all files until reaching a fixpoint.
- while to_check:
- next_resource = to_check.pop()
- result.add(next_resource)
- for resource in self._get_resources_for_file(next_resource):
- # Only add files that exist on disc. The pattens we check for give some
- # false positives otherwise.
- if resource not in result and os.path.exists(resource):
- to_check.append(resource)
- return sorted(list(result))
-
def _parse_source_env(self, source):
env_match = ENV_PATTERN.search(source)
env = {}
@@ -244,7 +188,7 @@ class TestCombiner(testsuite.TestCombiner):
return CombinedTest
-class CombinedTest(testcase.TestCase):
+class CombinedTest(testcase.D8TestCase):
"""Behaves like normal mjsunit tests except:
1. Expected outcome is always PASS
2. Instead of one file there is a try-catch wrapper with all combined tests
@@ -259,7 +203,7 @@ class CombinedTest(testcase.TestCase):
self._statusfile_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
self.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
- def _get_shell_with_flags(self):
+ def _get_shell_flags(self):
"""In addition to standard set of shell flags it appends:
--disable-abortjs: %AbortJS can abort the test even inside
trycatch-wrapper, so we disable it.
@@ -269,15 +213,13 @@ class CombinedTest(testcase.TestCase):
--quiet-load: suppress any stdout from load() function used by
trycatch-wrapper.
"""
- shell = 'd8'
- shell_flags = [
+ return [
'--test',
'--disable-abortjs',
'--es-staging',
'--omit-quit',
'--quiet-load',
]
- return shell, shell_flags
def _get_cmd_params(self):
return (
diff --git a/deps/v8/test/mjsunit/typeof.js b/deps/v8/test/mjsunit/typeof.js
index 864f1cfdb6..f6c47f8e2e 100644
--- a/deps/v8/test/mjsunit/typeof.js
+++ b/deps/v8/test/mjsunit/typeof.js
@@ -1,39 +1,42 @@
// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
// The type of a regular expression should be 'object', including in
// the context of string equality comparisons.
+{
+ const r = new RegExp;
-var r = new RegExp;
-assertEquals('object', typeof r);
-assertTrue(typeof r == 'object');
-assertFalse(typeof r == 'function');
+ assertEquals('object', typeof r);
+ assertTrue(typeof r == 'object');
+ assertFalse(typeof r == 'function');
-function test(x, y) { return x == y; }
-assertTrue(test('object', typeof r));
+ function equals(x, y) { return x == y; }
+ assertTrue(equals('object', typeof r));
+}
assertFalse(typeof null == "undefined");
+
+assertEquals('undefined', typeof undefined);
+assertEquals('object', typeof null);
+assertEquals('boolean', typeof true);
+assertEquals('boolean', typeof false);
+assertEquals('number', typeof 42.42);
+assertEquals('number', typeof 42);
+assertEquals('bigint', typeof 42n);
+assertEquals('string', typeof '42');
+assertEquals('symbol', typeof Symbol(42));
+assertEquals('object', typeof {});
+assertEquals('object', typeof []);
+assertEquals('object', typeof new Proxy({}, {}));
+assertEquals('object', typeof new Proxy([], {}));
+assertEquals('function', typeof (_ => 42));
+assertEquals('function', typeof function() {});
+assertEquals('function', typeof function*() {});
+assertEquals('function', typeof async function() {});
+assertEquals('function', typeof async function*() {});
+assertEquals('function', typeof new Proxy(_ => 42, {}));
+assertEquals('function', typeof class {});
+assertEquals('function', typeof Object);
diff --git a/deps/v8/test/mjsunit/wasm/anyref.js b/deps/v8/test/mjsunit/wasm/anyref.js
index 6282b588ac..e6306b6a9e 100644
--- a/deps/v8/test/mjsunit/wasm/anyref.js
+++ b/deps/v8/test/mjsunit/wasm/anyref.js
@@ -76,7 +76,6 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([kExprRefNull])
.exportFunc();
-
const instance = builder.instantiate();
assertEquals(null, instance.exports.main());
@@ -98,5 +97,17 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(0, instance.exports.main(undefined));
assertEquals(1, instance.exports.main(null));
assertEquals(0, instance.exports.main(print));
+})();
+(function testAnyRefLocalDefaultValue() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_r_v)
+ .addBody([kExprGetLocal, 0])
+ .addLocals({anyref_count: 1})
+ .exportFunc();
+
+ const instance = builder.instantiate();
+
+ assertEquals(null, instance.exports.main());
})();
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 58d3d950d5..371839ae24 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -432,3 +432,33 @@ function TestStore(func, buffer, value, size) {
assertThrows(() => GetAtomicBinOpFunction(kExprI32AtomicSub16U, 3, 0),
WebAssembly.CompileError);
})();
+
+function CmpExchgLoop(opcode, alignment) {
+ print("TestI64AtomicCompareExchangeLoop" + alignment);
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem", 0, 2, "shared");
+ builder.addFunction("main", makeSig([kWasmI32], []))
+ .addLocals({i64_count: 2})
+ .addBody([
+ kExprLoop, kWasmStmt,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kAtomicPrefix, opcode, alignment, 0,
+ kExprGetLocal, 1,
+ kExprI64Ne,
+ kExprBrIf, 0,
+ kExprEnd
+ ])
+ .exportFunc();
+ let mem = new WebAssembly.Memory({initial: 2, maximum: 2, shared: true});
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module, {m: {imported_mem: mem}});
+}
+
+(function TestAtomicCompareExchgLoop() {
+ CmpExchgLoop(kExprI64AtomicCompareExchange, 3);
+ CmpExchgLoop(kExprI64AtomicCompareExchange32U, 2);
+ CmpExchgLoop(kExprI64AtomicCompareExchange16U, 1);
+ CmpExchgLoop(kExprI64AtomicCompareExchange8U, 0);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
index 9308393da4..d909bcc542 100644
--- a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
@@ -6,7 +6,7 @@ load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
-builder.addMemory(1, kV8MaxPages, false);
+builder.addMemory(1, undefined, false);
builder.addFunction('load', kSig_i_ii)
.addBody([
kExprGetLocal, 0,
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
new file mode 100644
index 0000000000..d308919088
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
@@ -0,0 +1,207 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const kSequenceLength = 8192;
+const kNumberOfWorkers = 4;
+const kBitMask = kNumberOfWorkers - 1;
+const kSequenceStartAddress = 32;
+
+function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
+ builder) {
+ let loadMemOpcode = kTrapUnreachable;
+ switch (size) {
+ case 32:
+ loadMemOpcode = kExprI32LoadMem;
+ break;
+ case 16:
+ loadMemOpcode = kExprI32LoadMem16U;
+ break;
+ case 8:
+ loadMemOpcode = kExprI32LoadMem8U;
+ break;
+ default:
+ throw "!";
+ }
+ const kArgMemoryCell = 0; // target for atomic ops
+ const kArgSequencePtr = 1; // address of sequence
+ const kArgSeqenceLength = 2; // lenght of sequence
+ const kArgWorkerId = 3; // id of this worker
+ const kArgBitMask = 4; // mask to extract worker id from value
+ const kLocalCurrentOffset = 5; // current position in sequence in bytes
+ const kLocalExpectedValue = 6; // the value we are waiting for
+ const kLocalNextValue = 7; // the value to write in the update
+ let body = [
+ // Turn sequence length to equivalent in bytes.
+ kExprGetLocal, kArgSeqenceLength,
+ kExprI32Const, size / 8,
+ kExprI32Mul,
+ kExprSetLocal, kArgSeqenceLength,
+ // Outer block so we have something to jump for return.
+ ...[kExprBlock, kWasmStmt,
+ // Set counter to 0.
+ kExprI32Const, 0,
+ kExprSetLocal, kLocalCurrentOffset,
+ // Outer loop until maxcount.
+ ...[kExprLoop, kWasmStmt,
+ // Find the next value to wait for.
+ ...[kExprLoop, kWasmStmt,
+ // Check end of sequence.
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprGetLocal, kArgSeqenceLength,
+ kExprI32Eq,
+ kExprBrIf, 2, // return
+ ...[kExprBlock, kWasmStmt,
+ // Load next value.
+ kExprGetLocal, kArgSequencePtr,
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Add,
+ loadMemOpcode, 0, 0,
+ // Mask off bits.
+ kExprGetLocal, kArgBitMask,
+ kExprI32And,
+ // Compare with worker id.
+ kExprGetLocal, kArgWorkerId,
+ kExprI32Eq,
+ kExprBrIf, 0,
+ // Not found, increment position.
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Const, size / 8,
+ kExprI32Add,
+ kExprSetLocal, kLocalCurrentOffset,
+ kExprBr, 1,
+ kExprEnd
+ ],
+ // Found, end loop.
+ kExprEnd
+ ],
+ // Load expected value to local.
+ kExprGetLocal, kArgSequencePtr,
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Add,
+ loadMemOpcode, 0, 0,
+ kExprSetLocal, kLocalExpectedValue,
+ // Load value after expected one.
+ kExprGetLocal, kArgSequencePtr,
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Add,
+ kExprI32Const, size / 8,
+ kExprI32Add,
+ loadMemOpcode, 0, 0,
+ kExprSetLocal, kLocalNextValue,
+ // Hammer on memory until value found.
+ ...[kExprLoop, kWasmStmt,
+ // Load address.
+ kExprGetLocal, kArgMemoryCell,
+ // Load expected value.
+ kExprGetLocal, kLocalExpectedValue,
+ // Load updated value.
+ kExprGetLocal, kLocalNextValue,
+ // Try update.
+ kAtomicPrefix, compareExchangeOpcode, 0, 0,
+ // Load expected value.
+ kExprGetLocal, kLocalExpectedValue,
+ // Spin if not what expected.
+ kExprI32Ne,
+ kExprBrIf, 0,
+ kExprEnd
+ ],
+ // Next iteration of loop.
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Const, size / 8,
+ kExprI32Add,
+ kExprSetLocal, kLocalCurrentOffset,
+ kExprBr, 0,
+ kExprEnd
+ ], // outer loop
+ kExprEnd
+ ], // the block
+ kExprReturn
+ ];
+ builder.addFunction(functionName, makeSig([kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32
+ ], []))
+ .addLocals({
+ i32_count: 3
+ })
+ .addBody(body)
+ .exportAs(functionName);
+}
+
+function generateSequence(typedarray, start, count) {
+ let end = count + start;
+ for (let i = start; i < end; i++) {
+ typedarray[i] = Math.floor(Math.random() * 256);
+ }
+}
+
+function spawnWorker(module, memory, address, sequence) {
+ let workers = [];
+ for (let i = 0; i < kNumberOfWorkers; i++) {
+ let worker = new Worker(
+ `onmessage = function(msg) {
+ this.instance = new WebAssembly.Instance(msg.module,
+ {m: {imported_mem: msg.memory}});
+ instance.exports.worker(msg.address, msg.sequence, msg.sequenceLength, msg.workerId,
+ msg.bitMask);
+ postMessage({workerId: msg.workerId});
+ }`,
+ {type: 'string'}
+ );
+ workers.push(worker);
+ worker.postMessage({
+ module: module,
+ memory: memory,
+ address: address,
+ sequence: sequence,
+ sequenceLength: kSequenceLength,
+ workerId: i,
+ bitMask: kBitMask
+ });
+ }
+ return workers;
+}
+
+function waitForWorkers(workers) {
+ for (let worker of workers) {
+ worker.getMessage();
+ worker.terminate();
+ }
+}
+
+function testOpcode(opcode, opcodeSize) {
+ print("Testing I32AtomicCompareExchange" + opcodeSize);
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem", 0, 1, "shared");
+
+ makeWorkerCodeForOpcode(opcode, opcodeSize, "worker", builder);
+
+ let memory = new WebAssembly.Memory({
+ initial: 1,
+ maximum: 1,
+ shared: true
+ });
+ let memoryView = new Uint8Array(memory.buffer);
+ generateSequence(memoryView, kSequenceStartAddress, kSequenceLength * (opcodeSize / 8));
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let workers = spawnWorker(module, memory, 0, kSequenceStartAddress);
+
+ // Fire the workers off
+ for (let i = opcodeSize / 8 - 1; i >= 0; i--) {
+ memoryView[i] = memoryView[kSequenceStartAddress + i];
+ }
+
+ waitForWorkers(workers);
+
+ print("DONE");
+}
+
+testOpcode(kExprI32AtomicCompareExchange, 32);
+testOpcode(kExprI32AtomicCompareExchange16U, 16);
+testOpcode(kExprI32AtomicCompareExchange8U, 8);
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
new file mode 100644
index 0000000000..89cc7ecb34
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
@@ -0,0 +1,213 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const kSequenceLength = 8192;
+const kNumberOfWorkers = 4;
+const kBitMask = kNumberOfWorkers - 1;
+const kSequenceStartAddress = 32;
+
+function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
+ builder) {
+ let loadMemOpcode = kTrapUnreachable;
+ switch (size) {
+ case 64:
+ loadMemOpcode = kExprI64LoadMem;
+ break;
+ case 32:
+ loadMemOpcode = kExprI64LoadMem32U;
+ break;
+ case 16:
+ loadMemOpcode = kExprI64LoadMem16U;
+ break;
+ case 8:
+ loadMemOpcode = kExprI64LoadMem8U;
+ break;
+ default:
+ throw "!";
+ }
+ const kArgMemoryCell = 0; // target for atomic ops
+ const kArgSequencePtr = 1; // address of sequence
+ const kArgSeqenceLength = 2; // lenght of sequence
+ const kArgWorkerId = 3; // id of this worker
+ const kArgBitMask = 4; // mask to extract worker id from value
+ const kLocalCurrentOffset = 5; // current position in sequence in bytes
+ const kLocalExpectedValue = 6; // the value we are waiting for
+ const kLocalNextValue = 7; // the value to write in the update
+ let body = [
+ // Turn sequence length to equivalent in bytes.
+ kExprGetLocal, kArgSeqenceLength,
+ kExprI32Const, size / 8,
+ kExprI32Mul,
+ kExprSetLocal, kArgSeqenceLength,
+ // Outer block so we have something to jump for return.
+ ...[kExprBlock, kWasmStmt,
+ // Set counter to 0.
+ kExprI32Const, 0,
+ kExprSetLocal, kLocalCurrentOffset,
+ // Outer loop until maxcount.
+ ...[kExprLoop, kWasmStmt,
+ // Find the next value to wait for.
+ ...[kExprLoop, kWasmStmt,
+ // Check end of sequence.
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprGetLocal, kArgSeqenceLength,
+ kExprI32Eq,
+ kExprBrIf, 2, // return
+ ...[kExprBlock, kWasmStmt,
+ // Load next value.
+ kExprGetLocal, kArgSequencePtr,
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Add,
+ loadMemOpcode, 0, 0,
+ // Mask off bits.
+ kExprGetLocal, kArgBitMask,
+ kExprI64UConvertI32,
+ kExprI64And,
+ // Compare with worker id.
+ kExprGetLocal, kArgWorkerId,
+ kExprI64UConvertI32,
+ kExprI64Eq,
+ kExprBrIf, 0,
+ // Not found, increment position.
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Const, size / 8,
+ kExprI32Add,
+ kExprSetLocal, kLocalCurrentOffset,
+ kExprBr, 1,
+ kExprEnd
+ ],
+ // Found, end loop.
+ kExprEnd
+ ],
+ // Load expected value to local.
+ kExprGetLocal, kArgSequencePtr,
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Add,
+ loadMemOpcode, 0, 0,
+ kExprSetLocal, kLocalExpectedValue,
+ // Load value after expected one.
+ kExprGetLocal, kArgSequencePtr,
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Add,
+ kExprI32Const, size / 8,
+ kExprI32Add,
+ loadMemOpcode, 0, 0,
+ kExprSetLocal, kLocalNextValue,
+ // Hammer on memory until value found.
+ ...[kExprLoop, kWasmStmt,
+ // Load address.
+ kExprGetLocal, kArgMemoryCell,
+ // Load expected value.
+ kExprGetLocal, kLocalExpectedValue,
+ // Load updated value.
+ kExprGetLocal, kLocalNextValue,
+ // Try update.
+ kAtomicPrefix, compareExchangeOpcode, 0, 0,
+ // Load expected value.
+ kExprGetLocal, kLocalExpectedValue,
+ // Spin if not what expected.
+ kExprI64Ne,
+ kExprBrIf, 0,
+ kExprEnd
+ ],
+ // Next iteration of loop.
+ kExprGetLocal, kLocalCurrentOffset,
+ kExprI32Const, size / 8,
+ kExprI32Add,
+ kExprSetLocal, kLocalCurrentOffset,
+ kExprBr, 0,
+ kExprEnd
+ ], // outer loop
+ kExprEnd
+ ], // the block
+ kExprReturn
+ ];
+ builder.addFunction(functionName, makeSig([kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32
+ ], []))
+ .addLocals({
+ i32_count: 1, i64_count: 2
+ })
+ .addBody(body)
+ .exportAs(functionName);
+}
+
+function generateSequence(typedarray, start, count) {
+ let end = count + start;
+ for (let i = start; i < end; i++) {
+ typedarray[i] = Math.floor(Math.random() * 256);
+ }
+}
+
+function spawnWorker(module, memory, address, sequence) {
+ let workers = [];
+ for (let i = 0; i < kNumberOfWorkers; i++) {
+ let worker = new Worker(
+ `onmessage = function(msg) {
+ this.instance = new WebAssembly.Instance(msg.module,
+ {m: {imported_mem: msg.memory}});
+ instance.exports.worker(msg.address, msg.sequence, msg.sequenceLength, msg.workerId,
+ msg.bitMask);
+ postMessage({workerId: msg.workerId});
+ }`,
+ {type: 'string'}
+ );
+ workers.push(worker);
+ worker.postMessage({
+ module: module,
+ memory: memory,
+ address: address,
+ sequence: sequence,
+ sequenceLength: kSequenceLength,
+ workerId: i,
+ bitMask: kBitMask
+ });
+ }
+ return workers;
+}
+
+function waitForWorkers(workers) {
+ for (let worker of workers) {
+ worker.getMessage();
+ worker.terminate();
+ }
+}
+
+function testOpcode(opcode, opcodeSize) {
+ print("Testing I64AtomicCompareExchange" + opcodeSize);
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "imported_mem", 0, 2, "shared");
+
+ makeWorkerCodeForOpcode(opcode, opcodeSize, "worker", builder);
+
+ let memory = new WebAssembly.Memory({
+ initial: 2,
+ maximum: 2,
+ shared: true
+ });
+ let memoryView = new Uint8Array(memory.buffer);
+ generateSequence(memoryView, kSequenceStartAddress, kSequenceLength * (opcodeSize / 8));
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let workers = spawnWorker(module, memory, 0, kSequenceStartAddress);
+
+ // Fire the workers off
+ for (let i = opcodeSize / 8 - 1; i >= 0; i--) {
+ memoryView[i] = memoryView[kSequenceStartAddress + i];
+ }
+
+ waitForWorkers(workers);
+
+ print("DONE");
+}
+
+testOpcode(kExprI64AtomicCompareExchange, 64);
+testOpcode(kExprI64AtomicCompareExchange32U, 32);
+testOpcode(kExprI64AtomicCompareExchange16U, 16);
+testOpcode(kExprI64AtomicCompareExchange8U, 8);
diff --git a/deps/v8/test/mjsunit/wasm/data-segments.js b/deps/v8/test/mjsunit/wasm/data-segments.js
index 571995931a..e73e3fb3a7 100644
--- a/deps/v8/test/mjsunit/wasm/data-segments.js
+++ b/deps/v8/test/mjsunit/wasm/data-segments.js
@@ -37,8 +37,6 @@ function GlobalImportedInitTest(pad) {
var builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false);
- while (pad-- > 0) builder.addGlobal(kWasmI32); // pad
-
var g = builder.addImportedGlobal("mod", "offset", kWasmI32);
while (pad-- > 0) builder.addGlobal(kWasmI32); // pad
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-export.js b/deps/v8/test/mjsunit/wasm/exceptions-export.js
new file mode 100644
index 0000000000..72ec02dec3
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-export.js
@@ -0,0 +1,81 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestExportSimple() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalException, except);
+ let instance = builder.instantiate();
+
+ assertTrue(Object.prototype.hasOwnProperty.call(instance.exports, 'ex'));
+ assertEquals("object", typeof instance.exports.ex);
+ assertInstanceof(instance.exports.ex, WebAssembly.Exception);
+ assertSame(instance.exports.ex.constructor, WebAssembly.Exception);
+})();
+
+(function TestExportMultiple() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addException(kSig_v_v);
+ let except2 = builder.addException(kSig_v_i);
+ builder.addExportOfKind("ex1a", kExternalException, except1);
+ builder.addExportOfKind("ex1b", kExternalException, except1);
+ builder.addExportOfKind("ex2", kExternalException, except2);
+ let instance = builder.instantiate();
+
+ assertTrue(Object.prototype.hasOwnProperty.call(instance.exports, 'ex1a'));
+ assertTrue(Object.prototype.hasOwnProperty.call(instance.exports, 'ex1b'));
+ assertTrue(Object.prototype.hasOwnProperty.call(instance.exports, 'ex2'));
+ assertSame(instance.exports.ex1a, instance.exports.ex1b);
+ assertNotSame(instance.exports.ex1a, instance.exports.ex2);
+})();
+
+(function TestExportOutOfBounds() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex_oob", kExternalException, except + 1);
+ assertThrows(
+ () => builder.instantiate(), WebAssembly.CompileError,
+ /Wasm decoding failed: exception index 1 out of bounds/);
+})();
+
+(function TestExportSameNameTwice() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalException, except);
+ builder.addExportOfKind("ex", kExternalException, except);
+ assertThrows(
+ () => builder.instantiate(), WebAssembly.CompileError,
+ /Duplicate export name 'ex' for exception 0 and exception 0/);
+})();
+
+(function TestExportModuleGetExports() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalException, except);
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ let exports = WebAssembly.Module.exports(module);
+ assertArrayEquals([{ name: "ex", kind: "exception" }], exports);
+})();
+
+(function TestConstructorNonCallable() {
+ print(arguments.callee.name);
+ // TODO(wasm): Currently the constructor function of an exported exception is
+ // not callable. This can/will change once the proposal matures, at which
+ // point we should add a full exceptions-api.js test suite for the API and
+ // remove this test case from this file.
+ assertThrows(
+ () => WebAssembly.Exception(), TypeError,
+ /WebAssembly.Exception cannot be called/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-import.js b/deps/v8/test/mjsunit/wasm/exceptions-import.js
new file mode 100644
index 0000000000..b5276727ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-import.js
@@ -0,0 +1,96 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Helper function to return a new exported exception with the {kSig_v_v} type
+// signature from an anonymous module. The underlying module is thrown away.
+// This allows tests to reason solely about importing exceptions.
+function NewExportedException() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalException, except);
+ let instance = builder.instantiate();
+ return instance.exports.ex;
+}
+
+(function TestImportSimple() {
+ print(arguments.callee.name);
+ let exported = NewExportedException();
+ let builder = new WasmModuleBuilder();
+ let except = builder.addImportedException("m", "ex", kSig_v_v);
+
+ assertDoesNotThrow(() => builder.instantiate({ m: { ex: exported }}));
+})();
+
+(function TestImportMultiple() {
+ print(arguments.callee.name);
+ let exported = NewExportedException();
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addImportedException("m", "ex1", kSig_v_v);
+ let except2 = builder.addImportedException("m", "ex2", kSig_v_v);
+ let except3 = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex2", kExternalException, except2);
+ builder.addExportOfKind("ex3", kExternalException, except3);
+ let instance = builder.instantiate({ m: { ex1: exported, ex2: exported }});
+
+ assertTrue(except1 < except3 && except2 < except3);
+ assertEquals(undefined, instance.exports.ex1);
+ assertSame(exported, instance.exports.ex2);
+ assertNotSame(exported, instance.exports.ex3);
+})();
+
+(function TestImportMissing() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addImportedException("m", "ex", kSig_v_v);
+
+ assertThrows(
+ () => builder.instantiate({}), TypeError,
+ /module is not an object or function/);
+ assertThrows(
+ () => builder.instantiate({ m: {}}), WebAssembly.LinkError,
+ /exception import requires a WebAssembly.Exception/);
+})();
+
+(function TestImportValueMismatch() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addImportedException("m", "ex", kSig_v_v);
+
+ assertThrows(
+ () => builder.instantiate({ m: { ex: 23 }}), WebAssembly.LinkError,
+ /exception import requires a WebAssembly.Exception/);
+ assertThrows(
+ () => builder.instantiate({ m: { ex: {} }}), WebAssembly.LinkError,
+ /exception import requires a WebAssembly.Exception/);
+ var monkey = Object.create(NewExportedException());
+ assertThrows(
+ () => builder.instantiate({ m: { ex: monkey }}), WebAssembly.LinkError,
+ /exception import requires a WebAssembly.Exception/);
+})();
+
+(function TestImportSignatureMismatch() {
+ print(arguments.callee.name);
+ let exported = NewExportedException();
+ let builder = new WasmModuleBuilder();
+ let except = builder.addImportedException("m", "ex", kSig_v_i);
+
+ assertThrows(
+ () => builder.instantiate({ m: { ex: exported }}), WebAssembly.LinkError,
+ /imported exception does not match the expected type/);
+})();
+
+(function TestImportModuleGetImports() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addImportedException("m", "ex", kSig_v_v);
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ let imports = WebAssembly.Module.imports(module);
+ assertArrayEquals([{ module: "m", name: "ex", kind: "exception" }], imports);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-shared.js b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
new file mode 100644
index 0000000000..f2a5b56e9a
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
@@ -0,0 +1,158 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Helper function to return a new exported exception with the {kSig_v_v} type
+// signature from an anonymous module. The underlying module is thrown away.
+function NewExportedException() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalException, except);
+ let instance = builder.instantiate();
+ return instance.exports.ex;
+}
+
+// Check that an instance matches an exception thrown by itself, even when the
+// exception is re-thrown by a regular JavaScript function.
+(function TestSingleInstance() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_index = builder.addType(kSig_v_v);
+ let fun = builder.addImport("m", "f", sig_index);
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction("throw", kSig_v_v)
+ .addBody([
+ kExprThrow, except
+ ]).exportFunc();
+ builder.addFunction("catch", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, fun,
+ kExprCatch, except,
+ kExprEnd,
+ ]).exportFunc();
+ let ex_obj = new Error("my exception");
+ let instance = builder.instantiate({ m: { f: function() { throw ex_obj }}});
+
+ assertThrows(() => instance.exports.throw(), WebAssembly.RuntimeError);
+ assertThrowsEquals(() => instance.exports.catch(), ex_obj);
+ try {
+ instance.exports.throw();
+ } catch (e) {
+ ex_obj = e;
+ }
+ assertDoesNotThrow(() => instance.exports.catch());
+})();
+
+// Check that two instances distinguish their individual exceptions if they are
+// not shared, even when declared by the same underlying module.
+(function TestMultiInstanceNonShared() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_index = builder.addType(kSig_v_v);
+ let fun = builder.addImport("m", "f", sig_index);
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction("throw", kSig_v_v)
+ .addBody([
+ kExprThrow, except
+ ]).exportFunc();
+ builder.addFunction("catch", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, fun,
+ kExprCatch, except,
+ kExprEnd,
+ ]).exportFunc();
+ let ex_obj = new Error("my exception");
+ let instance1 = builder.instantiate({ m: { f: assertUnreachable }});
+ let instance2 = builder.instantiate({ m: { f: function() { throw ex_obj }}});
+
+ assertThrows(() => instance1.exports.throw(), WebAssembly.RuntimeError);
+ assertThrowsEquals(() => instance2.exports.catch(), ex_obj);
+ try {
+ instance1.exports.throw();
+ } catch (e) {
+ ex_obj = e;
+ }
+ assertThrowsEquals(() => instance2.exports.catch(), ex_obj);
+})();
+
+// Check that two instances match their exceptions if they are shared properly,
+// even if the local exception index of export and import is different.
+(function TestMultiInstanceShared() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_index = builder.addType(kSig_v_v);
+ let fun = builder.addImport("m", "f", sig_index);
+ let except1 = builder.addImportedException("m", "ex1", kSig_v_v);
+ let except2 = builder.addException(kSig_v_v);
+ builder.addExportOfKind("ex2", kExternalException, except2);
+ builder.addFunction("throw", kSig_v_v)
+ .addBody([
+ kExprThrow, except2
+ ]).exportFunc();
+ builder.addFunction("catch", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, fun,
+ kExprCatch, except1,
+ kExprEnd,
+ ]).exportFunc();
+ let ex_obj = new Error("my exception");
+ let instance1 = builder.instantiate({ m: { f: assertUnreachable,
+ ex1: NewExportedException() }});
+ let instance2 = builder.instantiate({ m: { f: function() { throw ex_obj },
+ ex1: instance1.exports.ex2 }});
+
+ assertThrows(() => instance1.exports.throw(), WebAssembly.RuntimeError);
+ assertThrowsEquals(() => instance2.exports.catch(), ex_obj);
+ try {
+ instance1.exports.throw();
+ } catch (e) {
+ ex_obj = e;
+ }
+ assertDoesNotThrow(() => instance2.exports.catch());
+})();
+
+// Check that two instances based on different modules match their exceptions if
+// they are shared properly, even if the local exception index is different.
+(function TestMultiModuleShared() {
+ print(arguments.callee.name);
+ let builder1 = new WasmModuleBuilder();
+ let except1 = builder1.addException(kSig_v_v);
+ let except2 = builder1.addException(kSig_v_v);
+ builder1.addExportOfKind("ex", kExternalException, except2);
+ builder1.addFunction("throw", kSig_v_v)
+ .addBody([
+ kExprThrow, except2
+ ]).exportFunc();
+ let builder2 = new WasmModuleBuilder();
+ let sig_index = builder2.addType(kSig_v_v);
+ let fun = builder2.addImport("m", "f", sig_index);
+ let except = builder2.addImportedException("m", "ex", kSig_v_v);
+ builder2.addFunction("catch", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, fun,
+ kExprCatch, except,
+ kExprEnd,
+ ]).exportFunc();
+ let ex_obj = new Error("my exception");
+ let instance1 = builder1.instantiate();
+ let instance2 = builder2.instantiate({ m: { f: function() { throw ex_obj },
+ ex: instance1.exports.ex }});
+
+ assertThrows(() => instance1.exports.throw(), WebAssembly.RuntimeError);
+ assertThrowsEquals(() => instance2.exports.catch(), ex_obj);
+ try {
+ instance1.exports.throw();
+ } catch (e) {
+ ex_obj = e;
+ }
+ assertDoesNotThrow(() => instance2.exports.catch());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index 74d8e7dfb5..d165c8742d 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -1,84 +1,110 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-eh
+// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-// The following method doesn't attempt to catch an raised exception.
-var test_throw = (function () {
- var builder = new WasmModuleBuilder();
+function assertWasmThrows(instance, runtime_id, values, code) {
+ try {
+ if (typeof code === 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ } catch (e) {
+ assertInstanceof(e, WebAssembly.RuntimeError);
+ var e_runtime_id = %GetWasmExceptionId(e, instance);
+ assertTrue(Number.isInteger(e_runtime_id));
+ assertEquals(e_runtime_id, runtime_id);
+ var e_values = %GetWasmExceptionValues(e);
+ assertArrayEquals(values, e_values);
+ return; // Success.
+ }
+ throw new MjsUnitAssertionError('Did not throw expected <' + runtime_id +
+ '> with values: ' + values);
+}
+
+// First we just test that "except_ref" local variables are allowed.
+(function TestLocalExceptRef() {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("push_and_drop_except_ref", kSig_v_v)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprDrop,
+ ]).addLocals({except_count: 1}).exportFunc();
+ let instance = builder.instantiate();
- builder.addException(kSig_v_v);
+ assertDoesNotThrow(instance.exports.push_and_drop_except_ref);
+})();
+// The following method doesn't attempt to catch an raised exception.
+(function TestThrowSimple() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
builder.addFunction("throw_if_param_not_zero", kSig_i_i)
.addBody([
kExprGetLocal, 0,
kExprI32Const, 0,
kExprI32Ne,
kExprIf, kWasmStmt,
- kExprThrow, 0,
+ kExprThrow, except,
kExprEnd,
kExprI32Const, 1
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertEquals(1, instance.exports.throw_if_param_not_zero(0));
+ assertWasmThrows(instance, except, [], () => instance.exports.throw_if_param_not_zero(10));
+ assertWasmThrows(instance, except, [], () => instance.exports.throw_if_param_not_zero(-1));
})();
-// Check the test_throw exists.
-assertFalse(test_throw === undefined);
-assertFalse(test_throw === null);
-assertFalse(test_throw === 0);
-assertEquals("object", typeof test_throw.exports);
-assertEquals("function", typeof test_throw.exports.throw_if_param_not_zero);
+// Test that empty try/catch blocks work.
+(function TestCatchEmptyBlocks() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction("catch_empty_try", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCatch, except,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
-// Test expected behavior of throws
-assertEquals(1, test_throw.exports.throw_if_param_not_zero(0));
-assertWasmThrows(0, [], function() { test_throw.exports.throw_if_param_not_zero(10) });
-assertWasmThrows(0, [], function() { test_throw.exports.throw_if_param_not_zero(-1) });
+ assertDoesNotThrow(instance.exports.catch_empty_try);
+})();
// Now that we know throwing works, we test catching the exceptions we raise.
-var test_catch = (function () {
- var builder = new WasmModuleBuilder();
-
- builder.addException(kSig_v_v);
+(function TestCatchSimple() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
builder.addFunction("simple_throw_catch_to_0_1", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
kExprGetLocal, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
- kExprThrow, 0,
+ kExprThrow, except,
kExprEnd,
- kExprI32Const, 1,
- kExprCatch, 0,
- kExprI32Const, 0,
+ kExprI32Const, 42,
+ kExprCatch, except,
+ kExprI32Const, 23,
kExprEnd
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertEquals(23, instance.exports.simple_throw_catch_to_0_1(0));
+ assertEquals(42, instance.exports.simple_throw_catch_to_0_1(1));
})();
-// Check the test_catch exists.
-assertFalse(test_catch === undefined);
-assertFalse(test_catch === null);
-assertFalse(test_catch === 0);
-assertEquals("object", typeof test_catch.exports);
-assertEquals("function", typeof test_catch.exports.simple_throw_catch_to_0_1);
-
-// Test expected behavior of simple catch.
-assertEquals(0, test_catch.exports.simple_throw_catch_to_0_1(0));
-assertEquals(1, test_catch.exports.simple_throw_catch_to_0_1(1));
-
// Test that we can distinguish which exception was thrown.
-var test_catch_2 = (function () {
- var builder = new WasmModuleBuilder();
-
- builder.addException(kSig_v_v);
- builder.addException(kSig_v_v);
- builder.addException(kSig_v_v);
+(function TestCatchComplex() {
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addException(kSig_v_v);
+ let except2 = builder.addException(kSig_v_v);
+ let except3 = builder.addException(kSig_v_v);
builder.addFunction("catch_different_exceptions", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
@@ -86,172 +112,122 @@ var test_catch_2 = (function () {
kExprGetLocal, 0,
kExprI32Eqz,
kExprIf, kWasmStmt,
- kExprThrow, 0,
+ kExprThrow, except1,
kExprElse,
kExprGetLocal, 0,
kExprI32Const, 1,
kExprI32Eq,
kExprIf, kWasmStmt,
- kExprThrow, 1,
+ kExprThrow, except2,
kExprElse,
- kExprThrow, 2,
+ kExprThrow, except3,
kExprEnd,
kExprEnd,
kExprI32Const, 2,
- kExprCatch, 0,
+ kExprCatch, except1,
kExprI32Const, 3,
kExprEnd,
- kExprCatch, 1,
+ kExprCatch, except2,
kExprI32Const, 4,
kExprEnd
]).exportFunc();
- return builder.instantiate();
-})();
-
-assertFalse(test_catch_2 === undefined);
-assertFalse(test_catch_2 === null);
-assertFalse(test_catch_2 === 0);
-assertEquals("object", typeof test_catch_2.exports);
-assertEquals("function", typeof test_catch_2.exports.catch_different_exceptions);
+ let instance = builder.instantiate();
-assertEquals(3, test_catch_2.exports.catch_different_exceptions(0));
-assertEquals(4, test_catch_2.exports.catch_different_exceptions(1));
-assertWasmThrows(2, [], function() { test_catch_2.exports.catch_different_exceptions(2) });
+ assertEquals(3, instance.exports.catch_different_exceptions(0));
+ assertEquals(4, instance.exports.catch_different_exceptions(1));
+ assertWasmThrows(instance, except3, [], () => instance.exports.catch_different_exceptions(2));
+})();
// Test throwing an exception with multiple values.
-var test_throw_1_2 = (function() {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_ii);
+(function TestThrowMultipleValues() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_ii);
builder.addFunction("throw_1_2", kSig_v_v)
.addBody([
kExprI32Const, 1,
kExprI32Const, 2,
- kExprThrow, 0,
+ kExprThrow, except,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertWasmThrows(instance, except, [0, 1, 0, 2], () => instance.exports.throw_1_2());
})();
-assertFalse(test_throw_1_2 === undefined);
-assertFalse(test_throw_1_2 === null);
-assertFalse(test_throw_1_2 === 0);
-assertEquals("object", typeof test_throw_1_2.exports);
-assertEquals("function", typeof test_throw_1_2.exports.throw_1_2);
-
-assertWasmThrows(0, [0, 1, 0, 2], function() { test_throw_1_2.exports.throw_1_2(); });
-
// Test throwing/catching the i32 parameter value.
-var test_throw_catch_param_i = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_i);
+(function TestThrowCatchParamI() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_i);
builder.addFunction("throw_catch_param", kSig_i_i)
.addBody([
kExprTry, kWasmI32,
kExprGetLocal, 0,
- kExprThrow, 0,
+ kExprThrow, except,
kExprI32Const, 2,
- kExprCatch, 0,
+ kExprCatch, except,
kExprReturn,
kExprEnd,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertEquals(0, instance.exports.throw_catch_param(0));
+ assertEquals(1, instance.exports.throw_catch_param(1));
+ assertEquals(10, instance.exports.throw_catch_param(10));
})();
-assertFalse(test_throw_catch_param_i === undefined);
-assertFalse(test_throw_catch_param_i === null);
-assertFalse(test_throw_catch_param_i === 0);
-assertEquals("object", typeof test_throw_catch_param_i.exports);
-assertEquals("function",
- typeof test_throw_catch_param_i.exports.throw_catch_param);
-
-assertEquals(0, test_throw_catch_param_i.exports.throw_catch_param(0));
-assertEquals(1, test_throw_catch_param_i.exports.throw_catch_param(1));
-assertEquals(10, test_throw_catch_param_i.exports.throw_catch_param(10));
-
// Test the encoding of a thrown exception with an integer exception.
-
-var test_throw_param_i = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_i);
+(function TestThrowParamI() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_i);
builder.addFunction("throw_param", kSig_v_i)
.addBody([
kExprGetLocal, 0,
- kExprThrow, 0,
+ kExprThrow, except,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertWasmThrows(instance, except, [0, 5], () => instance.exports.throw_param(5));
+ assertWasmThrows(instance, except, [6, 31026], () => instance.exports.throw_param(424242));
})();
-assertFalse(test_throw_param_i === undefined);
-assertFalse(test_throw_param_i === null);
-assertFalse(test_throw_param_i === 0);
-assertEquals("object", typeof test_throw_param_i.exports);
-assertEquals("function",
- typeof test_throw_param_i.exports.throw_param);
-
-assertWasmThrows(0, [0, 5], function() { test_throw_param_i.exports.throw_param(5); });
-assertWasmThrows(0, [6, 31026],
- function() { test_throw_param_i.exports.throw_param(424242); });
-
// Test throwing/catching the f32 parameter value.
-var test_throw_catch_param_f = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_f);
+(function TestThrowCatchParamF() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_f);
builder.addFunction("throw_catch_param", kSig_f_f)
.addBody([
kExprTry, kWasmF32,
kExprGetLocal, 0,
- kExprThrow, 0,
+ kExprThrow, except,
kExprF32Const, 0, 0, 0, 0,
- kExprCatch, 0,
+ kExprCatch, except,
kExprReturn,
kExprEnd,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertEquals(5.0, instance.exports.throw_catch_param(5.0));
+ assertEquals(10.5, instance.exports.throw_catch_param(10.5));
})();
-assertFalse(test_throw_catch_param_f === undefined);
-assertFalse(test_throw_catch_param_f === null);
-assertFalse(test_throw_catch_param_f === 0);
-assertEquals("object", typeof test_throw_catch_param_f.exports);
-assertEquals("function",
- typeof test_throw_catch_param_f.exports.throw_catch_param);
-
-assertEquals(5.0, test_throw_catch_param_f.exports.throw_catch_param(5.0));
-assertEquals(10.5, test_throw_catch_param_f.exports.throw_catch_param(10.5));
-
// Test the encoding of a thrown exception with a float value.
-
-var test_throw_param_f = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_f);
+(function TestThrowParamF() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_f);
builder.addFunction("throw_param", kSig_v_f)
.addBody([
kExprGetLocal, 0,
- kExprThrow, 0,
+ kExprThrow, except,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertWasmThrows(instance, except, [16544, 0], () => instance.exports.throw_param(5.0));
+ assertWasmThrows(instance, except, [16680, 0], () => instance.exports.throw_param(10.5));
})();
-assertFalse(test_throw_param_f === undefined);
-assertFalse(test_throw_param_f === null);
-assertFalse(test_throw_param_f === 0);
-assertEquals("object", typeof test_throw_param_f.exports);
-assertEquals("function",
- typeof test_throw_param_f.exports.throw_param);
-
-assertWasmThrows(0, [16544, 0],
- function() { test_throw_param_f.exports.throw_param(5.0); });
-assertWasmThrows(0, [16680, 0],
- function() { test_throw_param_f.exports.throw_param(10.5); });
-
// Test throwing/catching an I64 value
-var test_throw_catch_param_l = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_l);
+(function TestThrowCatchParamL() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_l);
builder.addFunction("throw_catch_param", kSig_i_i)
.addBody([
kExprGetLocal, 0,
@@ -259,9 +235,9 @@ var test_throw_catch_param_l = (function () {
kExprSetLocal, 1,
kExprTry, kWasmI32,
kExprGetLocal, 1,
- kExprThrow, 0,
+ kExprThrow, except,
kExprI32Const, 2,
- kExprCatch, 0,
+ kExprCatch, except,
kExprGetLocal, 1,
kExprI64Eq,
kExprIf, kWasmI32,
@@ -273,26 +249,17 @@ var test_throw_catch_param_l = (function () {
kExprReturn,
kExprEnd,
]).addLocals({i64_count: 1}).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertEquals(1, instance.exports.throw_catch_param(5));
+ assertEquals(1, instance.exports.throw_catch_param(0));
+ assertEquals(1, instance.exports.throw_catch_param(-1));
})();
-assertFalse(test_throw_catch_param_l === undefined);
-assertFalse(test_throw_catch_param_l === null);
-assertFalse(test_throw_catch_param_l === 0);
-assertEquals("object", typeof test_throw_catch_param_l.exports);
-assertEquals("function",
- typeof test_throw_catch_param_l.exports.throw_catch_param);
-
-assertEquals(1, test_throw_catch_param_l.exports.throw_catch_param(5));
-assertEquals(1, test_throw_catch_param_l.exports.throw_catch_param(0));
-assertEquals(1, test_throw_catch_param_l.exports.throw_catch_param(-1));
-
// Test the encoding of a thrown exception with an I64 value.
-
-var test_throw_param_l = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_l);
+(function TestThrowParamL() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_l);
builder.addFunction("throw_param", kSig_v_ii)
.addBody([
kExprGetLocal, 0,
@@ -302,428 +269,357 @@ var test_throw_param_l = (function () {
kExprGetLocal, 1,
kExprI64UConvertI32,
kExprI64Ior,
- kExprThrow, 0
+ kExprThrow, except,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertWasmThrows(instance, except, [0, 10, 0, 5], () => instance.exports.throw_param(10, 5));
+ assertWasmThrows(instance, except, [65535, 65535, 0, 13], () => instance.exports.throw_param(-1, 13));
})();
-assertFalse(test_throw_param_l === undefined);
-assertFalse(test_throw_param_l === null);
-assertFalse(test_throw_param_l === 0);
-assertEquals("object", typeof test_throw_param_l.exports);
-assertEquals("function",
- typeof test_throw_param_l.exports.throw_param);
-
-assertWasmThrows(0, [0, 10, 0, 5],
- function() { test_throw_param_l.exports.throw_param(10, 5); });
-assertWasmThrows(0, [65535, 65535, 0, 13],
- function() { test_throw_param_l.exports.throw_param(-1, 13); });
-
// Test throwing/catching the F64 parameter value
-var test_throw_catch_param_d = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_d);
+(function TestThrowCatchParamD() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_d);
builder.addFunction("throw_catch_param", kSig_d_d)
.addBody([
kExprTry, kWasmF64,
kExprGetLocal, 0,
- kExprThrow, 0,
+ kExprThrow, except,
kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0,
- kExprCatch, 0,
+ kExprCatch, except,
kExprReturn,
kExprEnd,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertEquals(5.0, instance.exports.throw_catch_param(5.0));
+ assertEquals(10.5, instance.exports.throw_catch_param(10.5));
})();
-assertFalse(test_throw_catch_param_d === undefined);
-assertFalse(test_throw_catch_param_d === null);
-assertFalse(test_throw_catch_param_d === 0);
-assertEquals("object", typeof test_throw_catch_param_d.exports);
-assertEquals("function",
- typeof test_throw_catch_param_d.exports.throw_catch_param);
-
-assertEquals(5.0, test_throw_catch_param_d.exports.throw_catch_param(5.0));
-assertEquals(10.5, test_throw_catch_param_d.exports.throw_catch_param(10.5));
-
// Test the encoding of a thrown exception with an f64 value.
-
-var test_throw_param_d = (function () {
- var builder = new WasmModuleBuilder();
- builder.addException(kSig_v_d);
+(function TestThrowParamD() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_d);
builder.addFunction("throw_param", kSig_v_f)
.addBody([
kExprGetLocal, 0,
kExprF64ConvertF32,
- kExprThrow, 0
+ kExprThrow, except,
]).exportFunc();
+ let instance = builder.instantiate();
- return builder.instantiate();
+ assertWasmThrows(instance, except, [16404, 0, 0, 0], () => instance.exports.throw_param(5.0));
+ assertWasmThrows(instance, except, [16739, 4816, 0, 0], () => instance.exports.throw_param(10000000.5));
})();
-assertFalse(test_throw_param_d === undefined);
-assertFalse(test_throw_param_d === null);
-assertFalse(test_throw_param_d === 0);
-assertEquals("object", typeof test_throw_param_d.exports);
-assertEquals("function",
- typeof test_throw_param_d.exports.throw_param);
-
-assertWasmThrows(0, [16404, 0, 0, 0],
- function() { test_throw_param_d.exports.throw_param(5.0); });
-assertWasmThrows(0, [16739, 4816, 0, 0],
- function() { test_throw_param_d.exports.throw_param(10000000.5); });
-
-/* TODO(kschimpf) Convert these tests to work for the proposed exceptions.
-
-// The following methods do not attempt to catch the exception they raise.
-var test_throw = (function () {
- var builder = new WasmModuleBuilder();
-
+// Test the encoding of a computed parameter value.
+(function TestThrowParamComputed() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_i);
builder.addFunction("throw_expr_with_params", kSig_v_ddi)
- .addBody([
- // p2 * (p0 + min(p0, p1))|0 - 20
- kExprGetLocal, 2,
- kExprGetLocal, 0,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprF64Min,
- kExprF64Add,
- kExprI32SConvertF64,
- kExprI32Mul,
- kExprI32Const, 20,
- kExprI32Sub,
- kExprThrow,
- ])
- .exportFunc()
-
- return builder.instantiate();
+ .addBody([
+ // p2 * (p0 + min(p0, p1))|0 - 20
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprF64Min,
+ kExprF64Add,
+ kExprI32SConvertF64,
+ kExprI32Mul,
+ kExprI32Const, 20,
+ kExprI32Sub,
+ kExprThrow, except,
+ ]).exportFunc()
+ let instance = builder.instantiate();
+
+ assertWasmThrows(instance, except, [65535, 65536-8], () => instance.exports.throw_expr_with_params(1.5, 2.5, 4));
+ assertWasmThrows(instance, except, [0, 12], () => instance.exports.throw_expr_with_params(5.7, 2.5, 4));
})();
-// Check the test_throw exists.
-assertFalse(test_throw === undefined);
-assertFalse(test_throw === null);
-assertFalse(test_throw === 0);
-assertEquals("object", typeof test_throw.exports);
-assertEquals("function", typeof test_throw.exports.throw_expr_with_params);
-
-assertEquals(1, test_throw.exports.throw_param_if_not_zero(0));
-assertWasmThrows(
- -8, function() { test_throw.exports.throw_expr_with_params(1.5, 2.5, 4); });
-assertWasmThrows(
- 12, function() { test_throw.exports.throw_expr_with_params(5.7, 2.5, 4); });
-
-// Now that we know throwing works, we test catching the exceptions we raise.
-var test_catch = (function () {
- var builder = new WasmModuleBuilder();
-
- // Helper function for throwing from js. It is imported by the Wasm module
- // as throw_i.
- function throw_value(value) {
- throw value;
- }
- var sig_index = builder.addType(kSig_v_i);
- var kJSThrowI = builder.addImport("", "throw_i", sig_index);
-
- // Helper function that throws a string. Wasm should not catch it.
- function throw_string() {
- throw "use wasm;";
- }
- sig_index = builder.addType(kSig_v_v);
- var kJSThrowString = builder.addImport("", "throw_string", sig_index);
-
- // Helper function that throws undefined. Wasm should not catch it.
- function throw_undefined() {
- throw undefined;
- }
- var kJSThrowUndefined = builder.addImport("", "throw_undefined", sig_index);
-
- // Helper function that throws an fp. Wasm should not catch it.
- function throw_fp() {
- throw 10.5;
- }
- var kJSThrowFP = builder.addImport("", "throw_fp", sig_index);
-
- // Helper function that throws a large number. Wasm should not catch it.
- function throw_large() {
- throw 1e+28;
- }
- var kJSThrowLarge = builder.addImport("", "throw_large", sig_index);
+// Now that we know catching works locally, we test catching exceptions that
+// cross function boundaries and/or raised by JavaScript.
+(function TestCatchCrossFunctions() {
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_i);
+
+ // Helper function for throwing from JS. It is imported by the Wasm module
+ // as throw_i.
+ function throw_value(value) {
+ throw value;
+ }
+ let sig_index = builder.addType(kSig_v_i);
+ let kJSThrowI = builder.addImport("", "throw_i", sig_index);
+
+ // Helper function that throws a string. Wasm should not catch it.
+ function throw_string() {
+ throw "use wasm";
+ }
+ sig_index = builder.addType(kSig_v_v);
+ let kJSThrowString = builder.addImport("", "throw_string", sig_index);
+
+ // Helper function that throws undefined. Wasm should not catch it.
+ function throw_undefined() {
+ throw undefined;
+ }
+ let kJSThrowUndefined = builder.addImport("", "throw_undefined", sig_index);
+
+ // Helper function that throws an fp. Wasm should not catch it.
+ function throw_fp() {
+ throw 10.5;
+ }
+ let kJSThrowFP = builder.addImport("", "throw_fp", sig_index);
+
+ // Helper function that throws a large number. Wasm should not catch it.
+ function throw_large() {
+ throw 1e+28;
+ }
+ let kJSThrowLarge = builder.addImport("", "throw_large", sig_index);
+
+ // Helper function for throwing from WebAssembly.
+ let kWasmThrowFunction =
+ builder.addFunction("throw", kSig_v_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprThrow, except,
+ ])
+ .index;
- // Helper function for throwing from WebAssembly.
- var kWasmThrowFunction =
- builder.addFunction("throw", kSig_v_i)
- .addBody([
+ // Scenario 1: Throw and catch appear on the same function. This should
+ // happen in case of inlining, for example.
+ builder.addFunction("same_scope", kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprGetLocal, 0,
+ kExprI32Const, 0,
+ kExprI32Ne,
+ kExprIf, kWasmStmt,
kExprGetLocal, 0,
- kExprThrow
- ])
- .index;
+ kExprThrow, except,
+ kExprUnreachable,
+ kExprEnd,
+ kExprI32Const, 63,
+ kExprCatch, except,
+ kExprEnd
+ ])
+ .exportFunc();
- // Scenario 1: Throw and catch appear on the same function. This should
- // happen in case of inlining, for example.
- builder.addFunction("same_scope", kSig_i_i)
- .addBody([
+ builder.addFunction("same_scope_ignore", kSig_i_i)
+ .addBody([
kExprTry, kWasmI32,
kExprGetLocal, 0,
- kExprI32Const, 0,
- kExprI32Ne,
- kExprIf, kWasmStmt,
- kExprGetLocal, 0,
- kExprThrow,
- kExprUnreachable,
- kExprEnd,
- kExprI32Const, 63,
- kExprCatch, 1,
- kExprGetLocal, 1,
- kExprEnd
- ])
- .addLocals({i32_count: 1})
- .exportFunc()
- .index;
-
- builder.addFunction("same_scope_ignore", kSig_i_i)
- .addBody([
- kExprTry, kWasmI32,
- kExprGetLocal, 0,
- kExprThrow,
- kExprUnreachable,
- kExprCatch, 1,
- kExprGetLocal, 0,
- kExprEnd,
- ])
- .addLocals({i32_count: 1})
- .exportFunc();
-
- builder.addFunction("same_scope_multiple", kSig_i_i)
- // path = 0;
- //
- // try {
- // try {
- // try {
- // if (p == 1)
- // throw 1;
- // path |= 2
- // } catch (v) {
- // path |= v | 4;
- // throw path;
- // }
- // if (p == 2)
- // throw path|8;
- // path |= 16;
- // } catch (v) {
- // path |= v | 32;
- // throw path;
- // }
- // if (p == 3)
- // throw path|64;
- // path |= 128
- // } catch (v) {
- // path |= v | 256;
- // }
- //
- // return path;
- //
- // p == 1 -> path == 293
- // p == 2 -> path == 298
- // p == 3 -> path == 338
- // else -> path == 146
- .addBody([
+ kExprThrow, except,
+ kExprUnreachable,
+ kExprCatch, except,
+ kExprEnd,
+ ])
+ .exportFunc();
+
+ builder.addFunction("same_scope_multiple", kSig_i_i)
+ // path = 0;
+ //
+ // try {
+ // try {
+ // try {
+ // if (p == 1)
+ // throw 1;
+ // path |= 2
+ // } catch (v) {
+ // path |= v | 4;
+ // throw path;
+ // }
+ // if (p == 2)
+ // throw path|8;
+ // path |= 16;
+ // } catch (v) {
+ // path |= v | 32;
+ // throw path;
+ // }
+ // if (p == 3)
+ // throw path|64;
+ // path |= 128
+ // } catch (v) {
+ // path |= v | 256;
+ // }
+ //
+ // return path;
+ //
+ // p == 1 -> path == 293
+ // p == 2 -> path == 298
+ // p == 3 -> path == 338
+ // else -> path == 146
+ .addBody([
+ kExprTry, kWasmI32,
kExprTry, kWasmI32,
kExprTry, kWasmI32,
- kExprTry, kWasmI32,
- kExprGetLocal, 0,
- kExprI32Const, 1,
- kExprI32Eq,
- kExprIf, kWasmStmt,
- kExprI32Const, 1,
- kExprThrow,
- kExprUnreachable,
- kExprEnd,
- kExprI32Const, 2,
- kExprCatch, 1,
- kExprGetLocal, 1,
- kExprI32Const, 4,
- kExprI32Ior,
- kExprThrow,
- kExprUnreachable,
- kExprEnd,
- kExprTeeLocal, 2,
kExprGetLocal, 0,
- kExprI32Const, 2,
+ kExprI32Const, 1,
kExprI32Eq,
kExprIf, kWasmStmt,
- kExprGetLocal, 2,
- kExprI32Const, 8,
- kExprI32Ior,
- kExprThrow,
+ kExprI32Const, 1,
+ kExprThrow, except,
kExprUnreachable,
kExprEnd,
- kExprI32Const, 16,
- kExprI32Ior,
- kExprCatch, 1,
- kExprGetLocal, 1,
- kExprI32Const, 32,
+ kExprI32Const, 2,
+ kExprCatch, except,
+ kExprI32Const, 4,
kExprI32Ior,
- kExprThrow,
+ kExprThrow, except,
kExprUnreachable,
kExprEnd,
- kExprTeeLocal, 2,
+ kExprTeeLocal, 1,
kExprGetLocal, 0,
- kExprI32Const, 3,
+ kExprI32Const, 2,
kExprI32Eq,
kExprIf, kWasmStmt,
- kExprGetLocal, 2,
- kExprI32Const, / *64=* / 192, 0,
+ kExprGetLocal, 1,
+ kExprI32Const, 8,
kExprI32Ior,
- kExprThrow,
+ kExprThrow, except,
kExprUnreachable,
kExprEnd,
- kExprI32Const, / *128=* / 128, 1,
+ kExprI32Const, 16,
kExprI32Ior,
- kExprCatch, 1,
+ kExprCatch, except,
+ kExprI32Const, 32,
+ kExprI32Ior,
+ kExprThrow, except,
+ kExprUnreachable,
+ kExprEnd,
+ kExprTeeLocal, 1,
+ kExprGetLocal, 0,
+ kExprI32Const, 3,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
kExprGetLocal, 1,
- kExprI32Const, / *256=* / 128, 2,
+ kExprI32Const, /*64=*/ 192, 0,
kExprI32Ior,
+ kExprThrow, except,
+ kExprUnreachable,
kExprEnd,
- ])
- .addLocals({i32_count: 2})
- .exportFunc();
+ kExprI32Const, /*128=*/ 128, 1,
+ kExprI32Ior,
+ kExprCatch, except,
+ kExprI32Const, /*256=*/ 128, 2,
+ kExprI32Ior,
+ kExprEnd,
+ ])
+ .addLocals({i32_count: 1})
+ .exportFunc();
- // Scenario 2: Catches an exception raised from the direct callee.
- var kFromDirectCallee =
- builder.addFunction("from_direct_callee", kSig_i_i)
- .addBody([
- kExprTry, kWasmI32,
- kExprGetLocal, 0,
- kExprCallFunction, kWasmThrowFunction,
- kExprI32Const, / *-1=* / 127,
- kExprCatch, 1,
- kExprGetLocal, 1,
- kExprEnd
- ])
- .addLocals({i32_count: 1})
- .exportFunc()
- .index;
-
- // Scenario 3: Catches an exception raised from an indirect callee.
- var kFromIndirectCalleeHelper = kFromDirectCallee + 1;
- builder.addFunction("from_indirect_callee_helper", kSig_v_ii)
- .addBody([
+ // Scenario 2: Catches an exception raised from the direct callee.
+ builder.addFunction("from_direct_callee", kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
kExprGetLocal, 0,
- kExprI32Const, 0,
- kExprI32GtS,
- kExprIf, kWasmStmt,
- kExprGetLocal, 0,
- kExprI32Const, 1,
- kExprI32Sub,
- kExprGetLocal, 1,
- kExprI32Const, 1,
- kExprI32Sub,
- kExprCallFunction, kFromIndirectCalleeHelper,
- kExprEnd,
- kExprGetLocal, 1,
kExprCallFunction, kWasmThrowFunction,
- ]);
+ kExprUnreachable,
+ kExprCatch, except,
+ kExprEnd,
+ ])
+ .exportFunc();
- builder.addFunction("from_indirect_callee", kSig_i_i)
- .addBody([
- kExprTry, kWasmI32,
- kExprGetLocal, 0,
- kExprI32Const, 0,
- kExprCallFunction, kFromIndirectCalleeHelper,
- kExprI32Const, / *-1=* / 127,
- kExprCatch, 1,
- kExprGetLocal, 1,
- kExprEnd
- ])
- .addLocals({i32_count: 1})
- .exportFunc();
+ // Scenario 3: Catches an exception raised from an indirect callee.
+ let sig_v_i = builder.addType(kSig_v_i);
+ builder.appendToTable([kWasmThrowFunction, kWasmThrowFunction]);
+ builder.addFunction("from_indirect_callee", kSig_i_ii)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallIndirect, sig_v_i, kTableZero,
+ kExprUnreachable,
+ kExprCatch, except,
+ kExprEnd
+ ])
+ .exportFunc();
- // Scenario 4: Catches an exception raised in JS.
- builder.addFunction("from_js", kSig_i_i)
- .addBody([
- kExprTry, kWasmI32,
- kExprGetLocal, 0,
- kExprCallFunction, kJSThrowI,
- kExprI32Const, / *-1=* / 127,
- kExprCatch, 1,
- kExprGetLocal, 1,
- kExprEnd,
- ])
- .addLocals({i32_count: 1})
- .exportFunc();
+ // Scenario 4: Does not catch an exception raised in JS, even if primitive
+ // values are being used as exceptions.
+ builder.addFunction("i_from_js", kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprGetLocal, 0,
+ kExprCallFunction, kJSThrowI,
+ kExprUnreachable,
+ kExprCatch, except,
+ kExprUnreachable,
+ kExprEnd,
+ ])
+ .exportFunc();
- // Scenario 5: Does not catch an exception raised in JS if it is not a
- // number.
- builder.addFunction("string_from_js", kSig_v_v)
- .addBody([
- kExprCallFunction, kJSThrowString
- ])
- .exportFunc();
+ builder.addFunction("string_from_js", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, kJSThrowString,
+ kExprCatch, except,
+ kExprUnreachable,
+ kExprEnd,
+ ])
+ .exportFunc();
- builder.addFunction("fp_from_js", kSig_v_v)
- .addBody([
- kExprCallFunction, kJSThrowFP
- ])
- .exportFunc();
+ builder.addFunction("fp_from_js", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, kJSThrowFP,
+ kExprCatch, except,
+ kExprUnreachable,
+ kExprEnd,
+ ])
+ .exportFunc();
- builder.addFunction("large_from_js", kSig_v_v)
- .addBody([
- kExprCallFunction, kJSThrowLarge
- ])
- .exportFunc();
+ builder.addFunction("large_from_js", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, kJSThrowLarge,
+ kExprCatch, except,
+ kExprUnreachable,
+ kExprEnd,
+ ])
+ .exportFunc();
- builder.addFunction("undefined_from_js", kSig_v_v)
- .addBody([
- kExprCallFunction, kJSThrowUndefined
- ])
- .exportFunc();
+ builder.addFunction("undefined_from_js", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprCallFunction, kJSThrowUndefined,
+ kExprCatch, except,
+ kExprUnreachable,
+ kExprEnd,
+ ])
+ .exportFunc();
- return builder.instantiate({"": {
+ let instance = builder.instantiate({"": {
throw_i: throw_value,
throw_string: throw_string,
throw_fp: throw_fp,
throw_large, throw_large,
throw_undefined: throw_undefined
}});
-})();
-// Check the test_catch exists.
-assertFalse(test_catch === undefined);
-assertFalse(test_catch === null);
-assertFalse(test_catch === 0);
-assertEquals("object", typeof test_catch.exports);
-assertEquals("function", typeof test_catch.exports.same_scope);
-assertEquals("function", typeof test_catch.exports.same_scope_ignore);
-assertEquals("function", typeof test_catch.exports.same_scope_multiple);
-assertEquals("function", typeof test_catch.exports.from_direct_callee);
-assertEquals("function", typeof test_catch.exports.from_indirect_callee);
-assertEquals("function", typeof test_catch.exports.from_js);
-assertEquals("function", typeof test_catch.exports.string_from_js);
-
-assertEquals(63, test_catch.exports.same_scope(0));
-assertEquals(1024, test_catch.exports.same_scope(1024));
-assertEquals(-3, test_catch.exports.same_scope(-3));
-assertEquals(-1, test_catch.exports.same_scope_ignore(-1));
-assertEquals(1, test_catch.exports.same_scope_ignore(1));
-assertEquals(0x7FFFFFFF, test_catch.exports.same_scope_ignore(0x7FFFFFFF));
-assertEquals(1024, test_catch.exports.same_scope_ignore(1024));
-assertEquals(-1, test_catch.exports.same_scope_ignore(-1));
-assertEquals(293, test_catch.exports.same_scope_multiple(1));
-assertEquals(298, test_catch.exports.same_scope_multiple(2));
-assertEquals(338, test_catch.exports.same_scope_multiple(3));
-assertEquals(146, test_catch.exports.same_scope_multiple(0));
-assertEquals(-10024, test_catch.exports.from_direct_callee(-10024));
-assertEquals(3334333, test_catch.exports.from_direct_callee(3334333));
-assertEquals(-1, test_catch.exports.from_direct_callee(0xFFFFFFFF));
-assertEquals(0x7FFFFFFF, test_catch.exports.from_direct_callee(0x7FFFFFFF));
-assertEquals(-10, test_catch.exports.from_indirect_callee(10));
-assertEquals(-77, test_catch.exports.from_indirect_callee(77));
-assertEquals(10, test_catch.exports.from_js(10));
-assertEquals(-10, test_catch.exports.from_js(-10));
-
-assertThrowsEquals(test_catch.exports.string_from_js, "use wasm;");
-assertThrowsEquals(test_catch.exports.large_from_js, 1e+28);
-assertThrowsEquals(test_catch.exports.undefined_from_js, undefined);
-*/
+ assertEquals(63, instance.exports.same_scope(0));
+ assertEquals(1024, instance.exports.same_scope(1024));
+ assertEquals(-3, instance.exports.same_scope(-3));
+ assertEquals(-1, instance.exports.same_scope_ignore(-1));
+ assertEquals(1, instance.exports.same_scope_ignore(1));
+ assertEquals(0x7FFFFFFF, instance.exports.same_scope_ignore(0x7FFFFFFF));
+ assertEquals(1024, instance.exports.same_scope_ignore(1024));
+ assertEquals(-1, instance.exports.same_scope_ignore(-1));
+ assertEquals(293, instance.exports.same_scope_multiple(1));
+ assertEquals(298, instance.exports.same_scope_multiple(2));
+ assertEquals(338, instance.exports.same_scope_multiple(3));
+ assertEquals(146, instance.exports.same_scope_multiple(0));
+ assertEquals(-10024, instance.exports.from_direct_callee(-10024));
+ assertEquals(3334333, instance.exports.from_direct_callee(3334333));
+ assertEquals(-1, instance.exports.from_direct_callee(0xFFFFFFFF));
+ assertEquals(0x7FFFFFFF, instance.exports.from_direct_callee(0x7FFFFFFF));
+ assertEquals(10, instance.exports.from_indirect_callee(10, 0));
+ assertEquals(77, instance.exports.from_indirect_callee(77, 1));
+
+ assertThrowsEquals(() => instance.exports.i_from_js(10), 10);
+ assertThrowsEquals(() => instance.exports.i_from_js(-10), -10);
+ assertThrowsEquals(instance.exports.string_from_js, "use wasm");
+ assertThrowsEquals(instance.exports.fp_from_js, 10.5);
+ assertThrowsEquals(instance.exports.large_from_js, 1e+28);
+ assertThrowsEquals(instance.exports.undefined_from_js, undefined);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
index da6516afd7..69273d9184 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
@@ -9,7 +9,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let module = (() => {
let builder = new WasmModuleBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
builder.addFunction("grow_memory", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
index fc82dc6004..48e3252d08 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -37,11 +37,15 @@ function genGrowMemoryBuilder() {
return builder;
}
+// V8 internal memory size limit.
+var kV8MaxPages = 32767;
+
+
// TODO(gdeepti): Generate tests programatically for all the sizes instead of
// current implementation.
function testGrowMemoryReadWrite32() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load(offset); }
@@ -88,7 +92,7 @@ testGrowMemoryReadWrite32();
function testGrowMemoryReadWrite16() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load16(offset); }
@@ -135,7 +139,7 @@ testGrowMemoryReadWrite16();
function testGrowMemoryReadWrite8() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load8(offset); }
@@ -182,7 +186,7 @@ testGrowMemoryReadWrite8();
function testGrowMemoryZeroInitialSize() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(0, kV8MaxPages, false);
+ builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load(offset); }
@@ -216,7 +220,7 @@ testGrowMemoryZeroInitialSize();
function testGrowMemoryZeroInitialSize32() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(0, kV8MaxPages, false);
+ builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load(offset); }
@@ -242,7 +246,7 @@ testGrowMemoryZeroInitialSize32();
function testGrowMemoryZeroInitialSize16() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(0, kV8MaxPages, false);
+ builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load16(offset); }
@@ -268,7 +272,7 @@ testGrowMemoryZeroInitialSize16();
function testGrowMemoryZeroInitialSize8() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(0, kV8MaxPages, false);
+ builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load8(offset); }
@@ -294,7 +298,7 @@ testGrowMemoryZeroInitialSize8();
function testGrowMemoryTrapMaxPagesZeroInitialMemory() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(0, kV8MaxPages, false);
+ builder.addMemory(0, undefined, false);
var module = builder.instantiate();
function growMem(pages) { return module.exports.grow_memory(pages); }
assertEquals(-1, growMem(kV8MaxPages + 1));
@@ -314,7 +318,7 @@ testGrowMemoryTrapMaxPages();
function testGrowMemoryTrapsWithNonSmiInput() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(0, kV8MaxPages, false);
+ builder.addMemory(0, undefined, false);
var module = builder.instantiate();
function growMem(pages) { return module.exports.grow_memory(pages); }
// The parameter of grow_memory is unsigned. Therefore -1 stands for
@@ -326,7 +330,7 @@ testGrowMemoryTrapsWithNonSmiInput();
function testGrowMemoryCurrentMemory() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
builder.addFunction("memory_size", kSig_i_v)
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
@@ -342,7 +346,7 @@ testGrowMemoryCurrentMemory();
function testGrowMemoryPreservesDataMemOp32() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
function peek() { return module.exports.load(offset); }
@@ -365,7 +369,7 @@ testGrowMemoryPreservesDataMemOp32();
function testGrowMemoryPreservesDataMemOp16() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
function peek() { return module.exports.load16(offset); }
@@ -388,7 +392,7 @@ testGrowMemoryPreservesDataMemOp16();
function testGrowMemoryPreservesDataMemOp8() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val = 0;
function peek() { return module.exports.load8(offset); }
@@ -415,7 +419,7 @@ testGrowMemoryPreservesDataMemOp8();
function testGrowMemoryOutOfBoundsOffset() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
function peek() { return module.exports.load(offset); }
@@ -492,7 +496,7 @@ testGrowMemoryDeclaredSpecMaxTraps();
function testGrowMemory2Gb() {
print("testGrowMemory2Gb");
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, kV8MaxPages, false);
+ builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
function peek() { return module.exports.load(offset); }
diff --git a/deps/v8/test/mjsunit/wasm/import-memory.js b/deps/v8/test/mjsunit/wasm/import-memory.js
index df4f0d3fe8..ca91c698c8 100644
--- a/deps/v8/test/mjsunit/wasm/import-memory.js
+++ b/deps/v8/test/mjsunit/wasm/import-memory.js
@@ -7,6 +7,9 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
+// V8 internal memory size limit.
+var kV8MaxPages = 32767;
+
(function TestOne() {
print("TestOne");
let memory = new WebAssembly.Memory({initial: 1});
diff --git a/deps/v8/test/mjsunit/wasm/import-mutable-global.js b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
index 4985d99211..46b9ca5f5a 100644
--- a/deps/v8/test/mjsunit/wasm/import-mutable-global.js
+++ b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
@@ -195,7 +195,7 @@ function addGlobalGetterAndSetter(builder, index, name, type) {
(function TestImportedAndNonImportedMutableGlobal() {
let global = new WebAssembly.Global({value: 'i32', mutable: true}, 1);
let builder = new WasmModuleBuilder();
- builder.addGlobal(kWasmI32, true).exportAs('i32');
builder.addImportedGlobal("mod", "g", kWasmI32, true);
+ builder.addGlobal(kWasmI32, true).exportAs('i32');
builder.instantiate({mod: {g: global}});
})();
diff --git a/deps/v8/test/mjsunit/wasm/table.js b/deps/v8/test/mjsunit/wasm/table.js
index 4724561670..32bdecad66 100644
--- a/deps/v8/test/mjsunit/wasm/table.js
+++ b/deps/v8/test/mjsunit/wasm/table.js
@@ -159,8 +159,8 @@ function assertTableIsValid(table, length) {
(function TestSet() {
let builder = new WasmModuleBuilder;
- builder.addExport("wasm", builder.addFunction("", kSig_v_v).addBody([]));
builder.addExport("host", builder.addImport("test", "f", kSig_v_v));
+ builder.addExport("wasm", builder.addFunction("", kSig_v_v).addBody([]));
let {wasm, host} = builder.instantiate({test: {f() {}}}).exports;
let table = new WebAssembly.Table({element: "anyfunc", initial: 10});
@@ -207,8 +207,8 @@ function assertTableIsValid(table, length) {
(function TestIndexing() {
let builder = new WasmModuleBuilder;
- builder.addExport("wasm", builder.addFunction("", kSig_v_v).addBody([]));
builder.addExport("host", builder.addImport("test", "f", kSig_v_v));
+ builder.addExport("wasm", builder.addFunction("", kSig_v_v).addBody([]));
let {wasm, host} = builder.instantiate({test: {f() {}}}).exports;
let table = new WebAssembly.Table({element: "anyfunc", initial: 10});
@@ -230,8 +230,8 @@ function assertTableIsValid(table, length) {
(function TestGrow() {
let builder = new WasmModuleBuilder;
- builder.addExport("wasm", builder.addFunction("", kSig_v_v).addBody([]));
builder.addExport("host", builder.addImport("test", "f", kSig_v_v));
+ builder.addExport("wasm", builder.addFunction("", kSig_v_v).addBody([]));
let {wasm, host} = builder.instantiate({test: {f() {}}}).exports;
function init(table) {
diff --git a/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js b/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js
index 26296696c5..465ca449cc 100644
--- a/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js
+++ b/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js
@@ -33,17 +33,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
- for (var i = 0; i < 135; i++) {
+ for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
- fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
- if (fallback_occurred) {
- break;
- }
+ fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
@@ -63,17 +60,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
- for (var i = 0; i < 135; i++) {
+ for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
- fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
- if (fallback_occurred) {
- break;
- }
+ fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
@@ -132,17 +126,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
const module = builder.toModule();
- for (var i = 0; i < 135; i++) {
+ for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = new WebAssembly.Instance(module, {mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
- fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
- if (fallback_occurred) {
- break;
- }
+ fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
diff --git a/deps/v8/test/mjsunit/wasm/unicode.js b/deps/v8/test/mjsunit/wasm/unicode.js
index 49c95da030..7e29c00f33 100644
--- a/deps/v8/test/mjsunit/wasm/unicode.js
+++ b/deps/v8/test/mjsunit/wasm/unicode.js
@@ -52,7 +52,7 @@ checkExports('☺☺mul☺☺', '☺☺mul☺☺', '☺☺add☺☺', '☺☺add
builder.addFunction('three snowmen: ☃☃☃', kSig_i_v).addBody([]).exportFunc();
assertThrows(
() => builder.instantiate(), WebAssembly.CompileError,
- /Compiling wasm function #0:three snowmen: ☃☃☃ failed: /);
+ /Compiling wasm function "three snowmen: ☃☃☃" failed: /);
})();
(function errorMessageUnicodeInImportModuleName() {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index f5aead9fb6..cc10e9953c 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -15,9 +15,6 @@ function bytes() {
return buffer;
}
-// V8 internal constants
-var kV8MaxPages = 32767;
-
// Header declaration constants
var kWasmH0 = 0;
var kWasmH1 = 0x61;
@@ -94,11 +91,13 @@ let kWasmF32 = 0x7d;
let kWasmF64 = 0x7c;
let kWasmS128 = 0x7b;
let kWasmAnyRef = 0x6f;
+let kWasmExceptRef = 0x68;
let kExternalFunction = 0;
let kExternalTable = 1;
let kExternalMemory = 2;
let kExternalGlobal = 3;
+let kExternalException = 4;
let kTableZero = 0;
let kMemoryZero = 0;
@@ -373,6 +372,43 @@ let kExprI32AtomicCompareExchange = 0x48
let kExprI32AtomicCompareExchange8U = 0x4a
let kExprI32AtomicCompareExchange16U = 0x4b
+let kExprI64AtomicLoad = 0x11;
+let kExprI64AtomicLoad8U = 0x14;
+let kExprI64AtomicLoad16U = 0x15;
+let kExprI64AtomicLoad32U = 0x16;
+let kExprI64AtomicStore = 0x18;
+let kExprI64AtomicStore8U = 0x1b;
+let kExprI64AtomicStore16U = 0x1c;
+let kExprI64AtomicStore32U = 0x1d;
+let kExprI64AtomicAdd = 0x1f;
+let kExprI64AtomicAdd8U = 0x22;
+let kExprI64AtomicAdd16U = 0x23;
+let kExprI64AtomicAdd32U = 0x24;
+let kExprI64AtomicSub = 0x26;
+let kExprI64AtomicSub8U = 0x29;
+let kExprI64AtomicSub16U = 0x2a;
+let kExprI64AtomicSub32U = 0x2b;
+let kExprI64AtomicAnd = 0x2d;
+let kExprI64AtomicAnd8U = 0x30;
+let kExprI64AtomicAnd16U = 0x31;
+let kExprI64AtomicAnd32U = 0x32;
+let kExprI64AtomicOr = 0x34;
+let kExprI64AtomicOr8U = 0x37;
+let kExprI64AtomicOr16U = 0x38;
+let kExprI64AtomicOr32U = 0x39;
+let kExprI64AtomicXor = 0x3b;
+let kExprI64AtomicXor8U = 0x3e;
+let kExprI64AtomicXor16U = 0x3f;
+let kExprI64AtomicXor32U = 0x40;
+let kExprI64AtomicExchange = 0x42;
+let kExprI64AtomicExchange8U = 0x45;
+let kExprI64AtomicExchange16U = 0x46;
+let kExprI64AtomicExchange32U = 0x47;
+let kExprI64AtomicCompareExchange = 0x49
+let kExprI64AtomicCompareExchange8U = 0x4c;
+let kExprI64AtomicCompareExchange16U = 0x4d;
+let kExprI64AtomicCompareExchange32U = 0x4e;
+
let kTrapUnreachable = 0;
let kTrapMemOutOfBounds = 1;
let kTrapDivByZero = 2;
@@ -413,29 +449,6 @@ function assertTraps(trap, code) {
throw new MjsUnitAssertionError('Did not trap, expected: ' + kTrapMsgs[trap]);
}
-function assertWasmThrows(runtime_id, values, code) {
- try {
- if (typeof code === 'function') {
- code();
- } else {
- eval(code);
- }
- } catch (e) {
- assertTrue(e instanceof WebAssembly.RuntimeError);
- var e_runtime_id = e['WasmExceptionRuntimeId'];
- assertEquals(e_runtime_id, runtime_id);
- assertTrue(Number.isInteger(e_runtime_id));
- var e_values = e['WasmExceptionValues'];
- assertEquals(values.length, e_values.length);
- for (i = 0; i < values.length; ++i) {
- assertEquals(values[i], e_values[i]);
- }
- // Success.
- return;
- }
- throw new MjsUnitAssertionError('Did not throw expected: ' + runtime_id + values);
-}
-
function wasmI32Const(val) {
let bytes = [kExprI32Const];
for (let i = 0; i < 4; ++i) {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 98a3c6d4e0..38b4a0e308 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -179,6 +179,7 @@ class WasmModuleBuilder {
this.explicit = [];
this.num_imported_funcs = 0;
this.num_imported_globals = 0;
+ this.num_imported_exceptions = 0;
return this;
}
@@ -228,10 +229,12 @@ class WasmModuleBuilder {
}
addException(type) {
- if (type.results.length != 0)
- throw new Error('Invalid exception signature: ' + type);
+ if (type.results.length != 0) {
+ throw new Error('Exception signature must have void result: ' + type);
+ }
+ let except_index = this.exceptions.length + this.num_imported_exceptions;
this.exceptions.push(type);
- return this.exceptions.length - 1;
+ return except_index;
}
addFunction(name, type) {
@@ -243,6 +246,9 @@ class WasmModuleBuilder {
}
addImport(module = "", name, type) {
+ if (this.functions.length != 0) {
+ throw new Error('Imported functions must be declared before local ones');
+ }
let type_index = (typeof type) == "number" ? type : this.addType(type);
this.imports.push({module: module, name: name, kind: kExternalFunction,
type: type_index});
@@ -250,6 +256,9 @@ class WasmModuleBuilder {
}
addImportedGlobal(module = "", name, type, mutable = false) {
+ if (this.globals.length != 0) {
+ throw new Error('Imported globals must be declared before local ones');
+ }
let o = {module: module, name: name, kind: kExternalGlobal, type: type,
mutable: mutable};
this.imports.push(o);
@@ -269,6 +278,18 @@ class WasmModuleBuilder {
this.imports.push(o);
}
+ addImportedException(module = "", name, type) {
+ if (type.results.length != 0) {
+ throw new Error('Exception signature must have void result: ' + type);
+ }
+ if (this.exceptions.length != 0) {
+ throw new Error('Imported exceptions must be declared before local ones');
+ }
+ let o = {module: module, name: name, kind: kExternalException, type: type};
+ this.imports.push(o);
+ return this.num_imported_exceptions++;
+ }
+
addExport(name, index) {
this.exports.push({name: name, kind: kExternalFunction, index: index});
return this;
@@ -378,6 +399,11 @@ class WasmModuleBuilder {
section.emit_u8(has_max ? 1 : 0); // flags
section.emit_u32v(imp.initial); // initial
if (has_max) section.emit_u32v(imp.maximum); // maximum
+ } else if (imp.kind == kExternalException) {
+ section.emit_u32v(imp.type.params.length);
+ for (let param of imp.type.params) {
+ section.emit_u8(param);
+ }
} else {
throw new Error("unknown/unsupported import kind " + imp.kind);
}
@@ -478,6 +504,20 @@ class WasmModuleBuilder {
});
}
+ // Add exceptions.
+ if (wasm.exceptions.length > 0) {
+ if (debug) print("emitting exceptions @ " + binary.length);
+ binary.emit_section(kExceptionSectionCode, section => {
+ section.emit_u32v(wasm.exceptions.length);
+ for (let type of wasm.exceptions) {
+ section.emit_u32v(type.params.length);
+ for (let param of type.params) {
+ section.emit_u8(param);
+ }
+ }
+ });
+ }
+
// Add export table.
var mem_export = (wasm.memory !== undefined && wasm.memory.exp);
var exports_count = wasm.exports.length + (mem_export ? 1 : 0);
@@ -530,20 +570,6 @@ class WasmModuleBuilder {
});
}
- // Add exceptions.
- if (wasm.exceptions.length > 0) {
- if (debug) print("emitting exceptions @ " + binary.length);
- binary.emit_section(kExceptionSectionCode, section => {
- section.emit_u32v(wasm.exceptions.length);
- for (let type of wasm.exceptions) {
- section.emit_u32v(type.params.length);
- for (let param of type.params) {
- section.emit_u8(param);
- }
- }
- });
- }
-
// Add function bodies.
if (wasm.functions.length > 0) {
// emit function bodies
@@ -569,6 +595,12 @@ class WasmModuleBuilder {
if (l.s128_count > 0) {
local_decls.push({count: l.s128_count, type: kWasmS128});
}
+ if (l.anyref_count > 0) {
+ local_decls.push({count: l.anyref_count, type: kWasmAnyRef});
+ }
+ if (l.except_count > 0) {
+ local_decls.push({count: l.except_count, type: kWasmExceptRef});
+ }
}
let header = new Binary;
diff --git a/deps/v8/test/mjsunit/wasm/worker-interpreter.js b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
index d730ed7a74..9bc1e1e11c 100644
--- a/deps/v8/test/mjsunit/wasm/worker-interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
@@ -34,7 +34,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
}
}
`;
- let worker = new Worker(workerScript);
+ let worker = new Worker(workerScript, {type: 'string'});
// Call method without using the interpreter.
var initial_interpreted = %WasmNumInterpretedCalls(instance);
diff --git a/deps/v8/test/mjsunit/wasm/worker-memory.js b/deps/v8/test/mjsunit/wasm/worker-memory.js
index e2a8cf8857..c5b99ede7e 100644
--- a/deps/v8/test/mjsunit/wasm/worker-memory.js
+++ b/deps/v8/test/mjsunit/wasm/worker-memory.js
@@ -5,7 +5,7 @@
// Flags: --experimental-wasm-threads
(function TestPostMessageUnsharedMemory() {
- let worker = new Worker('');
+ let worker = new Worker('', {type: 'string'});
let memory = new WebAssembly.Memory({initial: 1, maximum: 2});
assertThrows(() => worker.postMessage(memory), Error);
@@ -39,7 +39,7 @@ let workerHelpers =
postMessage("OK");
};`;
- let worker = new Worker(workerScript);
+ let worker = new Worker(workerScript, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
worker.postMessage(memory);
assertEquals("OK", worker.getMessage());
@@ -60,7 +60,7 @@ let workerHelpers =
postMessage("OK");
};`;
- let worker = new Worker(workerScript);
+ let worker = new Worker(workerScript, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
let obj = {memories: [memory, memory], buffer: memory.buffer, foo: 1};
worker.postMessage(obj);
@@ -75,7 +75,8 @@ let workerHelpers =
postMessage("OK");
};`;
- let workers = [new Worker(workerScript), new Worker(workerScript)];
+ let workers = [new Worker(workerScript, {type: 'string'}),
+ new Worker(workerScript, {type: 'string'})];
let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
for (let worker of workers) {
worker.postMessage(memory);
diff --git a/deps/v8/test/mjsunit/wasm/worker-module.js b/deps/v8/test/mjsunit/wasm/worker-module.js
index 72645f8dbf..b60b19571b 100644
--- a/deps/v8/test/mjsunit/wasm/worker-module.js
+++ b/deps/v8/test/mjsunit/wasm/worker-module.js
@@ -27,7 +27,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
}
`;
- let worker = new Worker(workerScript);
+ let worker = new Worker(workerScript, {type: 'string'});
worker.postMessage(module);
assertEquals(42, worker.getMessage());
worker.terminate();
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 713e952378..387d064974 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -41,36 +41,28 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
void Free(void* p, size_t) override {}
};
-#define RO_ROOT_LIST_CASE(type, name, camel_name) \
- if (n == NULL && o == roots.name()) n = #camel_name;
-#define ROOT_LIST_CASE(type, name, camel_name) \
- if (n == NULL && o == space->heap()->name()) n = #camel_name;
-#define STRUCT_LIST_CASE(upper_name, camel_name, name) \
- if (n == NULL && o == roots.name##_map()) n = #camel_name "Map";
-#define ALLOCATION_SITE_LIST_CASE(upper_name, camel_name, size, name) \
- if (n == NULL && o == roots.name##_map()) n = #camel_name "Map";
+#define RO_ROOT_LIST_CASE(type, name, CamelName) \
+ if (n == NULL && o == roots.name()) n = #CamelName;
+#define MUTABLE_ROOT_LIST_CASE(type, name, CamelName) \
+ if (n == NULL && o == space->heap()->name()) n = #CamelName;
static void DumpMaps(i::PagedSpace* space) {
i::HeapObjectIterator it(space);
i::ReadOnlyRoots roots(space->heap());
- for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ for (i::Object* o = it.Next(); o != nullptr; o = it.Next()) {
if (!o->IsMap()) continue;
i::Map* m = i::Map::cast(o);
- const char* n = NULL;
+ const char* n = nullptr;
intptr_t p = reinterpret_cast<intptr_t>(m) & 0x7FFFF;
int t = m->instance_type();
- STRONG_READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
- MUTABLE_ROOT_LIST(ROOT_LIST_CASE)
- STRUCT_LIST(STRUCT_LIST_CASE)
- ALLOCATION_SITE_LIST(ALLOCATION_SITE_LIST_CASE)
- if (n == NULL) continue;
+ READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
+ MUTABLE_ROOT_LIST(MUTABLE_ROOT_LIST_CASE)
+ if (n == nullptr) continue;
const char* sname = space->name();
i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): (%d, \"%s\"),\n", sname, p, t,
n);
}
}
-#undef ALLOCATION_SITE_LIST_CASE
-#undef STRUCT_LIST_CASE
-#undef ROOT_LIST_CASE
+#undef MUTABLE_ROOT_LIST_CASE
#undef RO_ROOT_LIST_CASE
static int DumpHeapConstants(const char* argv0) {
@@ -103,33 +95,33 @@ static int DumpHeapConstants(const char* argv0) {
// Dump the KNOWN_OBJECTS table to the console.
i::PrintF("\n# List of known V8 objects.\n");
-#define RO_ROOT_LIST_CASE(type, name, camel_name) \
- if (n == NULL && o == roots.name()) { \
- n = #camel_name; \
- i = i::Heap::k##camel_name##RootIndex; \
+#define RO_ROOT_LIST_CASE(type, name, CamelName) \
+ if (n == NULL && o == roots.name()) { \
+ n = #CamelName; \
+ i = i::RootIndex::k##CamelName; \
}
-#define ROOT_LIST_CASE(type, name, camel_name) \
- if (n == NULL && o == heap->name()) { \
- n = #camel_name; \
- i = i::Heap::k##camel_name##RootIndex; \
+#define ROOT_LIST_CASE(type, name, CamelName) \
+ if (n == NULL && o == heap->name()) { \
+ n = #CamelName; \
+ i = i::RootIndex::k##CamelName; \
}
i::PagedSpaces spit(heap, i::PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
i::PrintF("KNOWN_OBJECTS = {\n");
- for (i::PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
+ for (i::PagedSpace* s = spit.next(); s != nullptr; s = spit.next()) {
i::HeapObjectIterator it(s);
// Code objects are generally platform-dependent.
if (s->identity() == i::CODE_SPACE || s->identity() == i::MAP_SPACE)
continue;
const char* sname = s->name();
- for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ for (i::Object* o = it.Next(); o != nullptr; o = it.Next()) {
// Skip maps in RO_SPACE since they will be reported elsewhere.
if (o->IsMap()) continue;
- const char* n = NULL;
- i::Heap::RootListIndex i = i::Heap::kStrongRootListLength;
+ const char* n = nullptr;
+ i::RootIndex i = i::RootIndex::kFirstSmiRoot;
intptr_t p = reinterpret_cast<intptr_t>(o) & 0x7FFFF;
STRONG_READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
MUTABLE_ROOT_LIST(ROOT_LIST_CASE)
- if (n == NULL) continue;
+ if (n == nullptr) continue;
if (!i::Heap::RootIsImmortalImmovable(i)) continue;
i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", sname, p, n);
}
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 8e7a4a6490..c7dd68db3b 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -158,6 +158,11 @@
'js1_5/Regress/regress-462292': [SKIP],
'js1_5/decompilation/regress-443071-01': [SKIP],
+ # This test checks that 'unshift' doesn't cause a OOM. Since the range error
+ # will get thrown at the end of the operation, this test runs for a long time.
+ # https://crbug.com/v8/8120
+ 'ecma_3/Array/regress-322135-04': [SKIP],
+
##################### SLOW TESTS #####################
# Compiles a long chain of && or || operations, can time out under slower
@@ -184,6 +189,11 @@
# characters. This takes a long time to run (~32 seconds).
'js1_5/GC/regress-348532': [SKIP],
+ # Takes a really long time to run, creating an Array of length
+ # 2^32 - 1. Related to removal of "sparse" array support for
+ # splice and friends:
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8131.
+ 'ecma_3/Array/regress-322135-03': [SKIP],
# Runs for too long: huge array with getters and setters. As it says
# in the test: "This test will probably run out of memory".
@@ -1013,6 +1023,11 @@
'js1_5/extensions/regress-355497': [FAIL_OK, '--sim-stack-size=512'],
}], # 'arch == arm64 and simulator_run'
+['system == android', {
+ # https://crbug.com/v8/8146
+ 'ecma/Array/15.4.4.5-3': [FAIL],
+}], # 'system == android'
+
['tsan', {
# https://crbug.com/v8/7632
'ecma_3/RegExp/regress-85721': [SKIP],
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 52ba9dcbcb..3727fccef3 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -85,7 +85,7 @@ class TestSuite(testsuite.TestSuite):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def _get_files_params(self):
files = [os.path.join(self.suite.root, "mozilla-shell-emulation.js")]
testfilename = self.path + ".js"
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index 0ffde419d4..11e6135444 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -81,7 +81,7 @@ class TestSuite(testsuite.TestSuite):
return VariantsGenerator
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def __init__(self, suite, path, name, test_config, source, template_flags):
super(TestCase, self).__init__(suite, path, name, test_config)
diff --git a/deps/v8/test/test262/harness-agent.js b/deps/v8/test/test262/harness-agent.js
index 83f688cf07..254df2469f 100644
--- a/deps/v8/test/test262/harness-agent.js
+++ b/deps/v8/test/test262/harness-agent.js
@@ -76,7 +76,7 @@ var agent = {
if (i32a === null) {
i32a = new Int32Array(new SharedArrayBuffer(256));
}
- var w = new Worker(workerScript(script));
+ var w = new Worker(workerScript(script), {type: 'string'});
w.index = workers.length;
w.postMessage({kind: 'start', i32a: i32a, index: w.index});
workers.push(w);
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index c581a9806c..4210263d40 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -178,15 +178,148 @@
'language/expressions/assignment/destructuring/keyed-destructuring-property-reference-target-evaluation-order': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=896
- 'built-ins/RegExp/property-escapes/binary-properties-with-value': [FAIL],
- 'built-ins/RegExp/property-escapes/character-class': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extensions': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-properties-without-value': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-properties': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-values': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-properties': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-properties': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/character-class-range-end': [FAIL],
+ 'built-ins/RegExp/property-escapes/character-class-range-no-dash-end': [FAIL],
+ 'built-ins/RegExp/property-escapes/character-class-range-no-dash-start': [FAIL],
+ 'built-ins/RegExp/property-escapes/character-class-range-start': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-empty': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-empty-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-invalid': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-invalid-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-only': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-only-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unclosed': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unclosed-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unopened': [FAIL],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unopened-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-01': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-01-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-02': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-02-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-03': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-03-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-04': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-04-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-05': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-05-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-06': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-06-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-07': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-07-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-08': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-08-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-09': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-09-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-10': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-10-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-11': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-11-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-12': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-12-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-13': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-13-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-14': [FAIL],
+ 'built-ins/RegExp/property-escapes/loose-matching-14-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-binary-property': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-binary-property-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-and-value': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-and-value-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-existing-value': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-existing-value-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-General_Category-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-general-category': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-negated': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value': [FAIL],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value-negated': [FAIL],
'language/literals/regexp/early-err-pattern': [FAIL],
'language/literals/regexp/invalid-braced-quantifier-exact': [FAIL],
'language/literals/regexp/invalid-braced-quantifier-lower': [FAIL],
@@ -231,9 +364,6 @@
'language/global-code/script-decl-func-err-non-configurable': [FAIL],
'language/global-code/script-decl-var-collision': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4958
- 'built-ins/Function/prototype/toString/*': ['--harmony-function-tostring'],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5116
'built-ins/TypedArray/prototype/fill/fill-values-conversion-operations-consistent-nan': [PASS, FAIL],
@@ -313,9 +443,6 @@
'annexB/language/eval-code/direct/func-switch-case-eval-func-no-skip-try': [FAIL],
'annexB/language/eval-code/direct/func-switch-dflt-eval-func-no-skip-try': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5537
- 'built-ins/global/*': [SKIP],
-
# PreParser doesn't produce early errors
# https://bugs.chromium.org/p/v8/issues/detail?id=2728
'language/expressions/async-arrow-function/early-errors-arrow-formals-body-duplicate': [FAIL],
@@ -371,18 +498,9 @@
'built-ins/TypedArray/prototype/set/typedarray-arg-set-values-diff-buffer-same-type-sab': ['--harmony-sharedarraybuffer'],
'built-ins/TypedArray/prototype/set/typedarray-arg-set-values-same-buffer-same-type-sab': ['--harmony-sharedarraybuffer'],
- # 64-bit Atomics are not implemented yet.
- 'built-ins/Atomics/wake/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/xor/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/load/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/compareExchange/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/wait/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/exchange/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/sub/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/store/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/or/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/and/bad-range': ['--noharmony-bigint'],
- 'built-ins/Atomics/add/bad-range': ['--noharmony-bigint'],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8100
+ 'built-ins/Atomics/notify/bigint/*': [SKIP],
+ 'built-ins/Atomics/wait/bigint/*': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=6049
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-caller': [FAIL_SLOPPY],
@@ -432,6 +550,9 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7669
'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8051
+ 'intl402/Collator/unicode-ext-seq-in-private-tag': [FAIL],
+
# Tests assume that the sort order of "same elements" (comparator returns 0)
# is deterministic.
# https://crbug.com/v8/7808
@@ -439,43 +560,43 @@
'intl402/Collator/prototype/compare/bound-to-collator-instance': [SKIP],
'intl402/Collator/ignore-invalid-unicode-ext-values': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7684
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8260
+ 'intl402/Locale/constructor-non-iana-canon': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8261
+ 'intl402/Locale/constructor-options-language-valid': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8262
+ 'intl402/Locale/constructor-parse-twice': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8246
+ 'intl402/Locale/constructor-tag': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8244
'intl402/Locale/constructor-getter-order': [FAIL],
'intl402/Locale/constructor-locale-object': [FAIL],
- 'intl402/Locale/constructor-non-iana-canon': [FAIL],
'intl402/Locale/constructor-options-language-grandfathered': [FAIL],
'intl402/Locale/constructor-options-language-invalid': [FAIL],
- 'intl402/Locale/constructor-options-language-valid': [FAIL],
'intl402/Locale/constructor-options-region-invalid': [FAIL],
'intl402/Locale/constructor-options-region-valid': [FAIL],
'intl402/Locale/constructor-options-script-invalid': [FAIL],
'intl402/Locale/constructor-options-script-valid': [FAIL],
- 'intl402/Locale/constructor-parse-twice': [FAIL],
- 'intl402/Locale/constructor-tag': [FAIL],
- 'intl402/Locale/constructor-unicode-ext-invalid': [FAIL],
- 'intl402/Locale/constructor-unicode-ext-valid': [FAIL],
- 'intl402/Locale/extensions-grandfathered': [FAIL],
- 'intl402/Locale/extensions-private': [FAIL],
'intl402/Locale/getters': [FAIL],
- 'intl402/Locale/getters-grandfathered': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8243
+ 'intl402/Locale/extensions-private': [FAIL],
'intl402/Locale/getters-privateuse': [FAIL],
- 'intl402/Locale/invalid-tag-throws': [FAIL],
- 'intl402/Locale/likely-subtags': [FAIL],
- 'intl402/Locale/likely-subtags-grandfathered': [FAIL],
- 'intl402/Locale/prototype/toStringTag/toStringTag': [FAIL],
- 'intl402/Locale/prototype/toStringTag/toString': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7869
- 'intl402/RelativeTimeFormat/constructor/supportedLocalesOf/branding': [FAIL],
- 'intl402/RelativeTimeFormat/constructor/supportedLocalesOf/length': [FAIL],
- 'intl402/RelativeTimeFormat/constructor/supportedLocalesOf/name': [FAIL],
- 'intl402/RelativeTimeFormat/constructor/supportedLocalesOf/prop-desc': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8236
+ 'intl402/Locale/likely-subtags': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7993
- 'intl402/RelativeTimeFormat/prototype/toStringTag/toStringTag': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8242
+ 'intl402/Locale/extensions-grandfathered': [FAIL],
+ 'intl402/Locale/getters-grandfathered': [FAIL],
+ 'intl402/Locale/likely-subtags-grandfathered': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7814
- 'built-ins/Array/prototype/splice/property-traps-order-with-species': [FAIL],
+ # Wrong test see https://github.com/tc39/test262/pull/1835
+ 'intl402/Locale/constructor-options-numeric-valid': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=6705
'built-ins/Object/assign/strings-and-symbol-order': [FAIL],
@@ -489,6 +610,12 @@
'language/expressions/async-generator/generator-created-after-decl-inst': [FAIL],
'language/statements/async-generator/generator-created-after-decl-inst': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8099
+ 'intl402/NumberFormat/prototype/format/format-negative-numbers': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7871
+ 'intl402/ListFormat/prototype/formatToParts/en-us-disjunction': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
@@ -500,15 +627,21 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
'built-ins/Atomics/wait/cannot-suspend-throws': [SKIP],
'built-ins/Atomics/wait/undefined-index-defaults-to-zero': [SKIP],
- 'built-ins/Atomics/wait/waiterlist-order-of-operations-is-fifo': [SKIP],
- 'built-ins/Atomics/wake/count-defaults-to-infinity-missing': [SKIP],
- 'built-ins/Atomics/wake/count-defaults-to-infinity-undefined': [SKIP],
- 'built-ins/Atomics/wake/undefined-index-defaults-to-zero': [SKIP],
- # Flaky failure
- # https://bugs.chromium.org/p/v8/issues/detail?id=7876
- 'built-ins/Atomics/wait/waiterlist-block-indexedposition-wake': [SKIP],
- 'built-ins/Atomics/wake/wake-in-order': [SKIP],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=6890#c12
+ 'built-ins/RegExp/prototype/Symbol.matchAll/isregexp-called-once': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.matchAll/species-constructor': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.matchAll/species-regexp-get-global-throws': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.matchAll/species-regexp-get-unicode-throws': [FAIL],
+ 'built-ins/String/prototype/matchAll/regexp-prototype-has-no-matchAll': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8258
+ 'intl402/Locale/constructor-options-language-valid-undefined': [FAIL],
+ 'intl402/Locale/constructor-options-throwing-getters': [FAIL],
+ 'intl402/Locale/constructor-tag-tostring': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-fraction-digits-precision': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-significant-digits-precision': [FAIL],
+ 'intl402/NumberFormat/prototype/formatToParts/value-tonumber': [FAIL],
##################### DELIBERATE INCOMPATIBILITIES #####################
@@ -516,9 +649,6 @@
'annexB/language/function-code/block-decl-func-skip-arguments': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=6538
- 'built-ins/Array/prototype/unshift/throws-if-integer-limit-exceeded': [SKIP],
- 'built-ins/Array/prototype/splice/create-species-length-exceeding-integer-limit': [FAIL],
- 'built-ins/Array/prototype/splice/throws-if-integer-limit-exceeded': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=6541
'language/export/escaped-as-export-specifier': [FAIL],
@@ -568,7 +698,6 @@
'built-ins/decodeURIComponent/S15.1.3.2_A1.11_T1': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T1': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP],
- 'built-ins/RegExp/S15.10.2.12_A3_T1': [SKIP],
'language/literals/regexp/S7.8.5_A1.1_T2': [SKIP],
'language/literals/regexp/S7.8.5_A1.4_T2': [SKIP],
'language/literals/regexp/S7.8.5_A2.1_T2': [SKIP],
@@ -578,6 +707,18 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7187
'built-ins/Function/prototype/toString/line-terminator-normalisation-CR': [SKIP],
+ 'language/expressions/class/fields-private-derived-cls-direct-eval-err-contains-supercall': [FAIL],
+ 'language/expressions/class/fields-private-derived-cls-direct-eval-err-contains-supercall-1': [FAIL],
+ 'language/expressions/class/fields-private-derived-cls-direct-eval-err-contains-supercall-2': [FAIL],
+ 'language/expressions/class/fields-private-derived-cls-indirect-eval-err-contains-supercall': [FAIL],
+ 'language/expressions/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-1': [FAIL],
+ 'language/expressions/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-2': [FAIL],
+ 'language/statements/class/fields-private-derived-cls-direct-eval-err-contains-supercall': [FAIL],
+ 'language/statements/class/fields-private-derived-cls-direct-eval-err-contains-supercall-1': [FAIL],
+ 'language/statements/class/fields-private-derived-cls-direct-eval-err-contains-supercall-2': [FAIL],
+ 'language/statements/class/fields-private-derived-cls-indirect-eval-err-contains-supercall': [FAIL],
+ 'language/statements/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-1': [FAIL],
+ 'language/statements/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-2': [FAIL],
############################ SLOW TESTS #############################
@@ -586,8 +727,6 @@
'language/comments/S7.4_A5': [PASS, SLOW],
'language/comments/S7.4_A6': [PASS, SLOW],
- # https://crbug.com/v8/7841
- 'built-ins/Atomics/wake/wake-rewake-noop': [SKIP],
}], # ALWAYS
['no_i18n == True', {
@@ -652,6 +791,59 @@
# BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
# asan's --omit-quit flag.
'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-cls-anon': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-cls-named': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-cls-name-meth': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-cls-anon': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-cls-named': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-cls-name-meth': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-fn-anon': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-fn-named': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-gen-anon': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-gen-named': [SKIP],
+ 'language/module-code/dynamic-import/eval-export-dflt-expr-in': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-arrow-assignment-expression-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-arrow-import-then-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-await-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-return-await-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-async-function-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-block-import-then-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-do-while-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-else-import-then-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-function-import-then-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-if-braceless-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-if-import-then-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/nested-while-import-then-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/syntax-nested-block-labeled-returns-promise': [SKIP],
+ 'language/module-code/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/module-code/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/module-code/dynamic-import/usage/top-level-import-then-returns-promise': [SKIP],
}], # asan == True
['asan == True or msan == True or tsan == True', {
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 7a1de38ce1..105f6713f2 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -42,9 +42,8 @@ from testrunner.outproc import test262
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
- 'BigInt': '--harmony-bigint',
'class-fields-public': '--harmony-public-fields',
- 'class-fields-private': '--harmony-private-fields',
+ 'class-static-fields-public': '--harmony-class-fields',
'Array.prototype.flat': '--harmony-array-flat',
'Array.prototype.flatMap': '--harmony-array-flat',
'String.prototype.matchAll': '--harmony-string-matchall',
@@ -53,10 +52,18 @@ FEATURE_FLAGS = {
'Intl.ListFormat': '--harmony-intl-list-format',
'Intl.Locale': '--harmony-locale',
'Intl.RelativeTimeFormat': '--harmony-intl-relative-time-format',
+ 'Intl.Segmenter': '--harmony-intl-segmenter',
'Symbol.prototype.description': '--harmony-symbol-description',
+ 'globalThis': '--harmony-global',
+ 'well-formed-json-stringify': '--harmony-json-stringify',
}
-SKIPPED_FEATURES = set([])
+SKIPPED_FEATURES = set(['Object.fromEntries',
+ 'export-star-as-namespace-from-module',
+ 'class-fields-private',
+ 'class-static-fields-private',
+ 'class-methods-private',
+ 'class-static-methods-private'])
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
@@ -149,7 +156,7 @@ class TestSuite(testsuite.TestSuite):
return VariantsGenerator
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 3c258607fc..ee0cba9c5a 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -4,10 +4,9 @@
module test {
macro ElementsKindTestHelper1(kind: constexpr ElementsKind): bool {
- if constexpr((kind == UINT8_ELEMENTS) || (kind == UINT16_ELEMENTS)) {
- return true;
- }
- else {
+ if constexpr ((kind == UINT8_ELEMENTS) || (kind == UINT16_ELEMENTS)) {
+ return true;
+ } else {
return false;
}
}
@@ -21,22 +20,22 @@ module test {
}
macro LabelTestHelper1(): never
- labels Label1 {
+ labels Label1 {
goto Label1;
}
macro LabelTestHelper2(): never
- labels Label2(Smi) {
+ labels Label2(Smi) {
goto Label2(42);
}
macro LabelTestHelper3(): never
- labels Label3(String, Smi) {
+ labels Label3(String, Smi) {
goto Label3('foo', 7);
}
macro TestConstexpr1() {
- check(from_constexpr<bool>(IsFastElementsKind(PACKED_SMI_ELEMENTS)));
+ check(FromConstexpr<bool>(IsFastElementsKind(PACKED_SMI_ELEMENTS)));
}
macro TestConstexprIf() {
@@ -46,10 +45,10 @@ module test {
}
macro TestConstexprReturn() {
- check(from_constexpr<bool>(ElementsKindTestHelper3(UINT8_ELEMENTS)));
- check(from_constexpr<bool>(ElementsKindTestHelper3(UINT16_ELEMENTS)));
- check(!from_constexpr<bool>(ElementsKindTestHelper3(UINT32_ELEMENTS)));
- check(from_constexpr<bool>(!ElementsKindTestHelper3(UINT32_ELEMENTS)));
+ check(FromConstexpr<bool>(ElementsKindTestHelper3(UINT8_ELEMENTS)));
+ check(FromConstexpr<bool>(ElementsKindTestHelper3(UINT16_ELEMENTS)));
+ check(!FromConstexpr<bool>(ElementsKindTestHelper3(UINT32_ELEMENTS)));
+ check(FromConstexpr<bool>(!ElementsKindTestHelper3(UINT32_ELEMENTS)));
}
macro TestGotoLabel(): Boolean {
@@ -82,7 +81,7 @@ module test {
}
}
- builtin GenericBuiltinTest<T : type>(c: Context, param: T): Object {
+ builtin GenericBuiltinTest<T: type>(c: Context, param: T): Object {
return Null;
}
@@ -97,8 +96,9 @@ module test {
check(GenericBuiltinTest<Object>(c, Undefined) == Undefined);
}
- macro LabelTestHelper4(flag: constexpr bool): never labels Label4, Label5 {
- if constexpr(flag) {
+ macro LabelTestHelper4(flag: constexpr bool): never
+ labels Label4, Label5 {
+ if constexpr (flag) {
goto Label4;
} else {
goto Label5;
@@ -128,7 +128,7 @@ module test {
}
}
- macro GenericMacroTest<T : type>(param: T): Object {
+ macro GenericMacroTest<T: type>(param: T): Object {
return Undefined;
}
@@ -136,25 +136,31 @@ module test {
return param2;
}
- macro GenericMacroTestWithLabels<T : type>(param: T): Object labels X {
+ macro GenericMacroTestWithLabels<T: type>(param: T): Object
+ labels X {
return Undefined;
}
- GenericMacroTestWithLabels<Object>(param2: Object): Object labels Y {
- return param2;
+ GenericMacroTestWithLabels<Object>(param2: Object): Object
+ labels Y {
+ return Cast<Smi>(param2) otherwise Y;
}
macro TestMacroSpecialization() {
try {
+ const smi0: Smi = 0;
check(GenericMacroTest<Smi>(0) == Undefined);
check(GenericMacroTest<Smi>(1) == Undefined);
check(GenericMacroTest<Object>(Null) == Null);
check(GenericMacroTest<Object>(False) == False);
check(GenericMacroTest<Object>(True) == True);
- check(GenericMacroTestWithLabels<Smi>(0) otherwise Fail == Undefined);
- check(GenericMacroTestWithLabels<Smi>(0) otherwise Fail == Undefined);
- check(GenericMacroTestWithLabels<Object>(Null) otherwise Fail == Null);
- check(GenericMacroTestWithLabels<Object>(False) otherwise Fail == False);
+ check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
+ check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
+ check((GenericMacroTestWithLabels<Object>(smi0) otherwise Fail) == smi0);
+ try {
+ GenericMacroTestWithLabels<Object>(False) otherwise Expected;
+ }
+ label Expected {}
}
label Fail {
unreachable;
@@ -177,8 +183,8 @@ module test {
}
macro TestVariableRedeclaration(context: Context): Boolean {
- let var1: int31 = from_constexpr<bool>(42 == 0) ? 0 : 1;
- let var2: int31 = from_constexpr<bool>(42 == 0) ? 1 : 0;
+ let var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
+ let var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
return True;
}
@@ -204,7 +210,7 @@ module test {
macro TestUnsafeCast(c: Context, n: Number): Boolean {
if (TaggedIsSmi(n)) {
- let m: Smi = unsafe_cast<Smi>(n);
+ let m: Smi = UnsafeCast<Smi>(n);
check(TestHelperPlus1(c, m) == 11);
return True;
@@ -213,8 +219,8 @@ module test {
}
macro TestHexLiteral() {
- check(convert<intptr>(0xffff) + 1 == 0x10000);
- check(convert<intptr>(-0xffff) == -65535);
+ check(Convert<intptr>(0xffff) + 1 == 0x10000);
+ check(Convert<intptr>(-0xffff) == -65535);
}
macro TestLargeIntegerLiterals(c: Context) {
@@ -244,17 +250,17 @@ module test {
}
macro TestLocalConstBindings() {
- const x : constexpr int31 = 3;
- const x_smi : Smi = x;
+ const x: constexpr int31 = 3;
+ const xSmi: Smi = x;
{
- const x : Smi = x + from_constexpr<Smi>(1);
- check(x == x_smi + 1);
- const x_smi : Smi = x;
- check(x == x_smi);
+ const x: Smi = x + FromConstexpr<Smi>(1);
+ check(x == xSmi + 1);
+ const xSmi: Smi = x;
+ check(x == xSmi);
check(x == 4);
}
- check(x_smi == 3);
- check(x == x_smi);
+ check(xSmi == 3);
+ check(x == xSmi);
}
struct TestStructA {
@@ -273,12 +279,12 @@ module test {
}
macro TestStruct2(): TestStructA {
- return TestStructA{unsafe_cast<FixedArray>(kEmptyFixedArray), 27, 31};
+ return TestStructA{UnsafeCast<FixedArray>(kEmptyFixedArray), 27, 31};
}
macro TestStruct3(): TestStructA {
let a: TestStructA =
- TestStructA{unsafe_cast<FixedArray>(kEmptyFixedArray), 13, 5};
+ TestStructA{UnsafeCast<FixedArray>(kEmptyFixedArray), 13, 5};
let b: TestStructA = a;
let c: TestStructA = TestStruct2();
a.i = TestStruct1(c);
@@ -287,14 +293,15 @@ module test {
d.x = a;
d = TestStructB{a, 7};
let e: TestStructA = d.x;
- let f: Smi = TestStructA{unsafe_cast<FixedArray>(kEmptyFixedArray), 27, 31}.i;
+ let f: Smi =
+ TestStructA{UnsafeCast<FixedArray>(kEmptyFixedArray), 27, 31}.i;
f = TestStruct2().i;
return a;
}
struct TestStructC {
- x : TestStructA;
- y : TestStructA;
+ x: TestStructA;
+ y: TestStructA;
}
macro TestStruct4(): TestStructC {
@@ -387,38 +394,42 @@ module test {
check(sum == 7);
}
- macro TestSubtyping(x : Smi) {
- const foo : Object = x;
+ macro TestSubtyping(x: Smi) {
+ const foo: Object = x;
}
- macro IncrementIfSmi<A : type>(x : A) : A {
+ macro IncrementIfSmi<A: type>(x: A): A {
typeswitch (x) {
- case (x : Smi) {
+ case (x: Smi): {
return x + 1;
- } case (o : A) {
+ }
+ case (o: A): {
return o;
}
}
}
- macro TypeswitchExample(x : Number | FixedArray) : int32 {
- let result : int32 = 0;
- typeswitch (IncrementIfSmi<(Number|FixedArray)>(x)) {
- case (x : FixedArray) {
+ macro TypeswitchExample(x: Number | FixedArray): int32 {
+ let result: int32 = 0;
+ typeswitch (IncrementIfSmi<(Number | FixedArray)>(x)) {
+ case (x: FixedArray): {
result = result + 1;
- } case (Number) {
+ }
+ case (Number): {
result = result + 2;
}
}
result = result * 10;
- typeswitch (IncrementIfSmi<(Number|FixedArray)>(x)) {
- case (x : Smi) {
- result = result + convert<int32>(x);
- } case (a : FixedArray) {
- result = result + convert<int32>(a.length);
- } case (x : HeapNumber) {
+ typeswitch (IncrementIfSmi<(Number | FixedArray)>(x)) {
+ case (x: Smi): {
+ result = result + Convert<int32>(x);
+ }
+ case (a: FixedArray): {
+ result = result + Convert<int32>(a.length);
+ }
+ case (x: HeapNumber): {
result = result + 7;
}
}
@@ -427,23 +438,154 @@ module test {
}
macro TestTypeswitch() {
- check(TypeswitchExample(from_constexpr<Smi>(5)) == 26);
- const a : FixedArray = AllocateZeroedFixedArray(3);
+ check(TypeswitchExample(FromConstexpr<Smi>(5)) == 26);
+ const a: FixedArray = AllocateZeroedFixedArray(3);
check(TypeswitchExample(a) == 13);
- check(TypeswitchExample(from_constexpr<Number>(0.5)) == 27);
+ check(TypeswitchExample(FromConstexpr<Number>(0.5)) == 27);
}
- macro ExampleGenericOverload<A: type>(o : Object) : A {
+ macro ExampleGenericOverload<A: type>(o: Object): A {
return o;
}
- macro ExampleGenericOverload<A: type>(o : Smi) : A {
+ macro ExampleGenericOverload<A: type>(o: Smi): A {
return o + 1;
}
macro TestGenericOverload() {
- const x_smi : Smi = 5;
- const x_object : Object = x_smi;
- check(ExampleGenericOverload<Smi>(x_smi) == 6);
- check(unsafe_cast<Smi>(ExampleGenericOverload<Object>(x_object)) == 5);
+ const xSmi: Smi = 5;
+ const xObject: Object = xSmi;
+ check(ExampleGenericOverload<Smi>(xSmi) == 6);
+ check(UnsafeCast<Smi>(ExampleGenericOverload<Object>(xObject)) == 5);
+ }
+
+ macro BoolToBranch(x: bool): never
+ labels Taken, NotTaken {
+ if (x) {
+ goto Taken;
+ } else {
+ goto NotTaken;
+ }
+ }
+
+ macro TestOrAnd1(x: bool, y: bool, z: bool): bool {
+ return BoolToBranch(x) || y && z ? true : false;
+ }
+
+ macro TestOrAnd2(x: bool, y: bool, z: bool): bool {
+ return x || BoolToBranch(y) && z ? true : false;
+ }
+
+ macro TestOrAnd3(x: bool, y: bool, z: bool): bool {
+ return x || y && BoolToBranch(z) ? true : false;
+ }
+
+ macro TestAndOr1(x: bool, y: bool, z: bool): bool {
+ return BoolToBranch(x) && y || z ? true : false;
+ }
+
+ macro TestAndOr2(x: bool, y: bool, z: bool): bool {
+ return x && BoolToBranch(y) || z ? true : false;
+ }
+
+ macro TestAndOr3(x: bool, y: bool, z: bool): bool {
+ return x && y || BoolToBranch(z) ? true : false;
+ }
+
+ macro TestLogicalOperators() {
+ check(TestAndOr1(true, true, true));
+ check(TestAndOr2(true, true, true));
+ check(TestAndOr3(true, true, true));
+ check(TestAndOr1(true, true, false));
+ check(TestAndOr2(true, true, false));
+ check(TestAndOr3(true, true, false));
+ check(TestAndOr1(true, false, true));
+ check(TestAndOr2(true, false, true));
+ check(TestAndOr3(true, false, true));
+ check(!TestAndOr1(true, false, false));
+ check(!TestAndOr2(true, false, false));
+ check(!TestAndOr3(true, false, false));
+ check(TestAndOr1(false, true, true));
+ check(TestAndOr2(false, true, true));
+ check(TestAndOr3(false, true, true));
+ check(!TestAndOr1(false, true, false));
+ check(!TestAndOr2(false, true, false));
+ check(!TestAndOr3(false, true, false));
+ check(TestAndOr1(false, false, true));
+ check(TestAndOr2(false, false, true));
+ check(TestAndOr3(false, false, true));
+ check(!TestAndOr1(false, false, false));
+ check(!TestAndOr2(false, false, false));
+ check(!TestAndOr3(false, false, false));
+ check(TestOrAnd1(true, true, true));
+ check(TestOrAnd2(true, true, true));
+ check(TestOrAnd3(true, true, true));
+ check(TestOrAnd1(true, true, false));
+ check(TestOrAnd2(true, true, false));
+ check(TestOrAnd3(true, true, false));
+ check(TestOrAnd1(true, false, true));
+ check(TestOrAnd2(true, false, true));
+ check(TestOrAnd3(true, false, true));
+ check(TestOrAnd1(true, false, false));
+ check(TestOrAnd2(true, false, false));
+ check(TestOrAnd3(true, false, false));
+ check(TestOrAnd1(false, true, true));
+ check(TestOrAnd2(false, true, true));
+ check(TestOrAnd3(false, true, true));
+ check(!TestOrAnd1(false, true, false));
+ check(!TestOrAnd2(false, true, false));
+ check(!TestOrAnd3(false, true, false));
+ check(!TestOrAnd1(false, false, true));
+ check(!TestOrAnd2(false, false, true));
+ check(!TestOrAnd3(false, false, true));
+ check(!TestOrAnd1(false, false, false));
+ check(!TestOrAnd2(false, false, false));
+ check(!TestOrAnd3(false, false, false));
+ }
+
+ macro TestCall(i: Smi): Smi
+ labels A {
+ if (i < 5) return i;
+ goto A;
+ }
+
+ macro TestOtherwiseWithCode1() {
+ let v: Smi = 0;
+ let s: Smi = 1;
+ try {
+ TestCall(10) otherwise goto B(++s);
+ }
+ label B(v1: Smi) {
+ v = v1;
+ }
+ assert(v == 2);
+ }
+
+ macro TestOtherwiseWithCode2() {
+ let s: Smi = 0;
+ for (let i: Smi = 0; i < 10; ++i) {
+ TestCall(i) otherwise break;
+ ++s;
+ }
+ assert(s == 5);
+ }
+
+ macro TestOtherwiseWithCode3() {
+ let s: Smi = 0;
+ for (let i: Smi = 0; i < 10; ++i) {
+ s += TestCall(i) otherwise break;
+ }
+ assert(s == 10);
+ }
+
+ macro TestForwardLabel() {
+ try {
+ goto A;
+ }
+ label A {
+ goto B(5);
+ }
+ label B(b: Smi) {
+ assert(b == 5);
+ }
}
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 606fe9c343..f63e2af197 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -55,6 +55,7 @@ v8_source_set("unittests_sources") {
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
"asmjs/switch-logic-unittest.cc",
+ "base/address-region-unittest.cc",
"base/atomic-utils-unittest.cc",
"base/bits-unittest.cc",
"base/cpu-unittest.cc",
@@ -72,8 +73,10 @@ v8_source_set("unittests_sources") {
"base/platform/platform-unittest.cc",
"base/platform/semaphore-unittest.cc",
"base/platform/time-unittest.cc",
+ "base/region-allocator-unittest.cc",
"base/sys-info-unittest.cc",
"base/template-utils-unittest.cc",
+ "base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
"bigint-unittest.cc",
"cancelable-tasks-unittest.cc",
@@ -112,6 +115,7 @@ v8_source_set("unittests_sources") {
"compiler/js-call-reducer-unittest.cc",
"compiler/js-create-lowering-unittest.cc",
"compiler/js-intrinsic-lowering-unittest.cc",
+ "compiler/js-native-context-specialization-unittest.cc",
"compiler/js-operator-unittest.cc",
"compiler/js-typed-lowering-unittest.cc",
"compiler/linkage-tail-call-unittest.cc",
@@ -128,6 +132,7 @@ v8_source_set("unittests_sources") {
"compiler/node-unittest.cc",
"compiler/opcodes-unittest.cc",
"compiler/persistent-unittest.cc",
+ "compiler/redundancy-elimination-unittest.cc",
"compiler/regalloc/live-range-unittest.cc",
"compiler/regalloc/move-optimizer-unittest.cc",
"compiler/regalloc/register-allocator-unittest.cc",
@@ -182,6 +187,7 @@ v8_source_set("unittests_sources") {
"libplatform/worker-thread-unittest.cc",
"locked-queue-unittest.cc",
"object-unittest.cc",
+ "objects/microtask-queue-unittest.cc",
"parser/ast-value-unittest.cc",
"parser/preparser-unittest.cc",
"register-configuration-unittest.cc",
diff --git a/deps/v8/test/unittests/allocation-unittest.cc b/deps/v8/test/unittests/allocation-unittest.cc
index 3e43cdd4ea..7b543ece24 100644
--- a/deps/v8/test/unittests/allocation-unittest.cc
+++ b/deps/v8/test/unittests/allocation-unittest.cc
@@ -40,7 +40,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
#endif
protected:
- virtual void SetUp() {
+ void SetUp() override {
struct sigaction action;
action.sa_sigaction = SignalHandler;
sigemptyset(&action.sa_mask);
@@ -51,7 +51,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
#endif
}
- virtual void TearDown() {
+ void TearDown() override {
// Be a good citizen and restore the old signal handler.
sigaction(SIGSEGV, &old_action_, nullptr);
#if V8_OS_MACOSX
@@ -95,12 +95,14 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
void TestPermissions(PageAllocator::Permission permission, bool can_read,
bool can_write) {
- const size_t page_size = AllocatePageSize();
- int* buffer = static_cast<int*>(
- AllocatePages(nullptr, page_size, page_size, permission));
+ v8::PageAllocator* page_allocator =
+ v8::internal::GetPlatformPageAllocator();
+ const size_t page_size = page_allocator->AllocatePageSize();
+ int* buffer = static_cast<int*>(AllocatePages(
+ page_allocator, nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write);
- CHECK(FreePages(buffer, page_size));
+ CHECK(FreePages(page_allocator, buffer, page_size));
}
};
@@ -125,41 +127,46 @@ TEST(AllocationTest, AllocateAndFree) {
size_t page_size = v8::internal::AllocatePageSize();
CHECK_NE(0, page_size);
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
+
// A large allocation, aligned at native allocation granularity.
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
- v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
- PageAllocator::Permission::kReadWrite);
+ page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
+ page_size, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(mem_addr);
- CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
+ CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
// A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * v8::internal::MB;
void* aligned_mem_addr = v8::internal::AllocatePages(
- AlignedAddress(v8::internal::GetRandomMmapAddr(), kBigAlignment),
+ page_allocator,
+ AlignedAddress(page_allocator->GetRandomMmapAddr(), kBigAlignment),
kAllocationSize, kBigAlignment, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
- CHECK(v8::internal::FreePages(aligned_mem_addr, kAllocationSize));
+ CHECK(v8::internal::FreePages(page_allocator, aligned_mem_addr,
+ kAllocationSize));
}
TEST(AllocationTest, ReserveMemory) {
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
size_t page_size = v8::internal::AllocatePageSize();
const size_t kAllocationSize = 1 * v8::internal::MB;
void* mem_addr = v8::internal::AllocatePages(
- v8::internal::GetRandomMmapAddr(), kAllocationSize, page_size,
- PageAllocator::Permission::kReadWrite);
+ page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
+ page_size, PageAllocator::Permission::kReadWrite);
CHECK_NE(0, page_size);
CHECK_NOT_NULL(mem_addr);
- size_t commit_size = v8::internal::CommitPageSize();
- CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
+ size_t commit_size = page_allocator->CommitPageSize();
+ CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kReadWrite));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[v8::internal::KB - 1] = 2;
- CHECK(v8::internal::SetPermissions(mem_addr, commit_size,
+ CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kNoAccess));
- CHECK(v8::internal::FreePages(mem_addr, kAllocationSize));
+ CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/api/interceptor-unittest.cc b/deps/v8/test/unittests/api/interceptor-unittest.cc
index b13384f18a..8a1db3f823 100644
--- a/deps/v8/test/unittests/api/interceptor-unittest.cc
+++ b/deps/v8/test/unittests/api/interceptor-unittest.cc
@@ -35,7 +35,7 @@ namespace {
class InterceptorLoggingTest : public TestWithNativeContext {
public:
- InterceptorLoggingTest() {}
+ InterceptorLoggingTest() = default;
static const int kTestIndex = 0;
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index 377ad83187..8ddf8a29c8 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -70,4 +70,73 @@ TEST_F(IsolateTest, MemoryPressureNotificationBackground) {
v8::platform::PumpMessageLoop(internal::V8::GetCurrentPlatform(), isolate());
}
+using IncumbentContextTest = TestWithIsolate;
+
+// Check that Isolate::GetIncumbentContext() returns the correct one in basic
+// scenarios.
+#if !defined(V8_USE_ADDRESS_SANITIZER)
+TEST_F(IncumbentContextTest, MAYBE_Basic) {
+ auto Str = [&](const char* s) {
+ return String::NewFromUtf8(isolate(), s, NewStringType::kNormal)
+ .ToLocalChecked();
+ };
+ auto Run = [&](Local<Context> context, const char* script) {
+ Context::Scope scope(context);
+ return Script::Compile(context, Str(script))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ };
+
+ // Set up the test environment; three contexts with getIncumbentGlobal()
+ // function.
+ Local<FunctionTemplate> get_incumbent_global = FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<Value>& info) {
+ Local<Context> incumbent_context =
+ info.GetIsolate()->GetIncumbentContext();
+ info.GetReturnValue().Set(incumbent_context->Global());
+ });
+ Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate());
+ global_template->Set(Str("getIncumbentGlobal"), get_incumbent_global);
+
+ Local<Context> context_a = Context::New(isolate(), nullptr, global_template);
+ Local<Context> context_b = Context::New(isolate(), nullptr, global_template);
+ Local<Context> context_c = Context::New(isolate(), nullptr, global_template);
+ Local<Object> global_a = context_a->Global();
+ Local<Object> global_b = context_b->Global();
+ Local<Object> global_c = context_c->Global();
+
+ Local<String> security_token = Str("security_token");
+ context_a->SetSecurityToken(security_token);
+ context_b->SetSecurityToken(security_token);
+ context_c->SetSecurityToken(security_token);
+
+ global_a->Set(context_a, Str("b"), global_b).ToChecked();
+ global_b->Set(context_b, Str("c"), global_c).ToChecked();
+
+ // Test scenario 2: A -> B -> C, then the incumbent is C.
+ Run(context_a, "funcA = function() { return b.funcB(); }");
+ Run(context_b, "funcB = function() { return c.getIncumbentGlobal(); }");
+ // Without BackupIncumbentScope.
+ EXPECT_EQ(global_b, Run(context_a, "funcA()"));
+ {
+ // With BackupIncumbentScope.
+ Context::BackupIncumbentScope backup_incumbent(context_a);
+ EXPECT_EQ(global_b, Run(context_a, "funcA()"));
+ }
+
+ // Test scenario 2: A -> B -> C -> C, then the incumbent is C.
+ Run(context_a, "funcA = function() { return b.funcB(); }");
+ Run(context_b, "funcB = function() { return c.funcC(); }");
+ Run(context_c, "funcC = function() { return getIncumbentGlobal(); }");
+ // Without BackupIncumbentScope.
+ EXPECT_EQ(global_c, Run(context_a, "funcA()"));
+ {
+ // With BackupIncumbentScope.
+ Context::BackupIncumbentScope backup_incumbent(context_a);
+ EXPECT_EQ(global_c, Run(context_a, "funcA()"));
+ }
+}
+#endif // !defined(V8_USE_ADDRESS_SANITIZER)
+
} // namespace v8
diff --git a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
index f17528977c..db5ed2ba52 100644
--- a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
@@ -203,7 +203,7 @@ TEST_F(AsmTypeTest, SaneParentsMap) {
<< Type::CamelName()->Name() << ", parents " \
<< reinterpret_cast<void*>(parents) << ", type " \
<< static_cast<void*>(Type::CamelName()); \
- } while (0);
+ } while (false);
FOR_EACH_ASM_VALUE_TYPE_LIST(V)
#undef V
}
@@ -212,7 +212,7 @@ TEST_F(AsmTypeTest, Names) {
#define V(CamelName, string_name, number, parent_types) \
do { \
EXPECT_THAT(Type::CamelName()->Name(), StrEq(string_name)); \
- } while (0);
+ } while (false);
FOR_EACH_ASM_VALUE_TYPE_LIST(V)
#undef V
diff --git a/deps/v8/test/unittests/base/address-region-unittest.cc b/deps/v8/test/unittests/base/address-region-unittest.cc
new file mode 100644
index 0000000000..8dffc10247
--- /dev/null
+++ b/deps/v8/test/unittests/base/address-region-unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/address-region.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+using Address = AddressRegion::Address;
+
+TEST(AddressRegionTest, Contains) {
+ struct {
+ Address start;
+ size_t size;
+ } test_cases[] = {{153, 771}, {0, 227}, {-447, 447}};
+
+ for (size_t i = 0; i < arraysize(test_cases); i++) {
+ Address start = test_cases[i].start;
+ size_t size = test_cases[i].size;
+ Address end = start + size; // exclusive
+
+ AddressRegion region(start, size);
+
+ // Test single-argument contains().
+ CHECK(!region.contains(start - 1041));
+ CHECK(!region.contains(start - 1));
+ CHECK(!region.contains(end));
+ CHECK(!region.contains(end + 1));
+ CHECK(!region.contains(end + 113));
+
+ CHECK(region.contains(start));
+ CHECK(region.contains(start + 1));
+ CHECK(region.contains(start + size / 2));
+ CHECK(region.contains(end - 1));
+
+ // Test two-arguments contains().
+ CHECK(!region.contains(start - 1, size));
+ CHECK(!region.contains(start, size + 1));
+ CHECK(!region.contains(start - 17, 17));
+ CHECK(!region.contains(start - 17, size * 2));
+ CHECK(!region.contains(end, 1));
+ CHECK(!region.contains(end, static_cast<size_t>(0 - end)));
+
+ CHECK(region.contains(start, size));
+ CHECK(region.contains(start, 10));
+ CHECK(region.contains(start + 11, 120));
+ CHECK(region.contains(end - 13, 13));
+ CHECK(!region.contains(end, 0));
+
+ // Zero-size queries.
+ CHECK(!region.contains(start - 10, 0));
+ CHECK(!region.contains(start - 1, 0));
+ CHECK(!region.contains(end, 0));
+ CHECK(!region.contains(end + 10, 0));
+
+ CHECK(region.contains(start, 0));
+ CHECK(region.contains(start + 10, 0));
+ CHECK(region.contains(end - 1, 0));
+ }
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index b9295d49a0..207d5cbdd7 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -44,7 +44,7 @@ class FunctionalTest : public ::testing::Test {
public:
FunctionalTest()
: rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
- virtual ~FunctionalTest() {}
+ ~FunctionalTest() override = default;
RandomNumberGenerator* rng() { return &rng_; }
diff --git a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
index 43fd335270..b32863f4b2 100644
--- a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
@@ -113,8 +113,8 @@ class ThreadWithSharedMutexAndConditionVariable final : public Thread {
: Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
running_(false),
finished_(false),
- cv_(NULL),
- mutex_(NULL) {}
+ cv_(nullptr),
+ mutex_(nullptr) {}
void Run() override {
LockGuard<Mutex> lock_guard(mutex_);
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index f9fc26a2df..d31d85447c 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -30,7 +30,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
keys_[i] = Thread::CreateThreadLocalKey();
}
}
- ~ThreadLocalStorageTest() {
+ ~ThreadLocalStorageTest() override {
for (size_t i = 0; i < arraysize(keys_); ++i) {
Thread::DeleteThreadLocalKey(keys_[i]);
}
diff --git a/deps/v8/test/unittests/base/region-allocator-unittest.cc b/deps/v8/test/unittests/base/region-allocator-unittest.cc
new file mode 100644
index 0000000000..5024ac85eb
--- /dev/null
+++ b/deps/v8/test/unittests/base/region-allocator-unittest.cc
@@ -0,0 +1,356 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/region-allocator.h"
+#include "test/unittests/test-utils.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+using Address = RegionAllocator::Address;
+using v8::internal::KB;
+using v8::internal::MB;
+
+class RegionAllocatorTest : public ::testing::TestWithParam<int> {};
+
+TEST(RegionAllocatorTest, SimpleAllocateRegionAt) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.free_size(), kEnd - address);
+ CHECK(ra.AllocateRegionAt(address, kPageSize));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free one region and then the allocation should succeed.
+ CHECK_EQ(ra.FreeRegion(kBegin), kPageSize);
+ CHECK_EQ(ra.free_size(), kPageSize);
+ CHECK(ra.AllocateRegionAt(kBegin, kPageSize));
+
+ // Free all the pages.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, SimpleAllocateRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ CHECK_EQ(ra.free_size(), kSize - kPageSize * i);
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free one page and ensure that we are able to allocate it again.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), address);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST_P(RegionAllocatorTest, AllocateRegionRandom) {
+ const size_t kPageSize = 8 * KB;
+ const size_t kPageCountLog = 16;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(153 * MB);
+ const Address kEnd = kBegin + kSize;
+
+ base::RandomNumberGenerator rng(GetParam());
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ std::set<Address> allocated_pages;
+ // The page addresses must be randomized this number of allocated pages.
+ const size_t kRandomizationLimit = ra.max_load_for_randomization_ / kPageSize;
+ CHECK_LT(kRandomizationLimit, kPageCount);
+
+ Address last_address = kBegin;
+ bool saw_randomized_pages = false;
+
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(&rng, kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK(IsAligned(address, kPageSize));
+ CHECK_LE(kBegin, address);
+ CHECK_LT(address, kEnd);
+ CHECK_EQ(allocated_pages.find(address), allocated_pages.end());
+ allocated_pages.insert(address);
+
+ saw_randomized_pages |= (address < last_address);
+ last_address = address;
+
+ if (i == kRandomizationLimit) {
+ // We must evidence allocation randomization till this point.
+ // The rest of the allocations may still be randomized depending on
+ // the free ranges distribution, however it is not guaranteed.
+ CHECK(saw_randomized_pages);
+ }
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+}
+
+TEST(RegionAllocatorTest, AllocateBigRegions) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog) - 1;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * ((size_t{1} << i) - 1));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free one page and ensure that we are able to allocate it again.
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ const size_t size = kPageSize * (size_t{1} << i);
+ Address address = kBegin + kPageSize * ((size_t{1} << i) - 1);
+ CHECK_EQ(ra.FreeRegion(address), size);
+ CHECK_EQ(ra.AllocateRegion(size), address);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST(RegionAllocatorTest, MergeLeftToRightCoalecsingRegions) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region using the following page size pattern:
+ // |0|1|22|3333|...
+ CHECK_EQ(ra.AllocateRegion(kPageSize), kBegin);
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ Address address = ra.AllocateRegion(kPageSize * (size_t{1} << i));
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * (size_t{1} << i));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Try to free two coalescing regions and ensure the new page of bigger size
+ // can be allocated.
+ size_t current_size = kPageSize;
+ for (size_t i = 0; i < kPageCountLog; i++) {
+ CHECK_EQ(ra.FreeRegion(kBegin), current_size);
+ CHECK_EQ(ra.FreeRegion(kBegin + current_size), current_size);
+ current_size += current_size;
+ CHECK_EQ(ra.AllocateRegion(current_size), kBegin);
+ }
+ CHECK_EQ(ra.free_size(), 0);
+}
+
+TEST_P(RegionAllocatorTest, MergeRightToLeftCoalecsingRegions) {
+ base::RandomNumberGenerator rng(GetParam());
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCountLog = 10;
+ const size_t kPageCount = (size_t{1} << kPageCountLog);
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free pages with even indices left-to-right.
+ for (size_t i = 0; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * i;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ }
+
+ // Free pages with odd indices right-to-left.
+ for (size_t i = 1; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * (kPageCount - i);
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ // Now we should be able to allocate a double-sized page.
+ CHECK_EQ(ra.AllocateRegion(kPageSize * 2), address - kPageSize);
+ // .. but there's a window for only one such page.
+ CHECK_EQ(ra.AllocateRegion(kPageSize * 2),
+ RegionAllocator::kAllocationFailure);
+ }
+
+ // Free all the double-sized pages.
+ for (size_t i = 0; i < kPageCount; i += 2) {
+ Address address = kBegin + kPageSize * i;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize * 2);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, Fragmentation) {
+ const size_t kPageSize = 64 * KB;
+ const size_t kPageCount = 9;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = ra.AllocateRegion(kPageSize);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK_EQ(address, kBegin + kPageSize * i);
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // Free pages in the following order and check the freed size.
+ struct {
+ size_t page_index_to_free;
+ size_t expected_page_count;
+ } testcase[] = { // .........
+ {0, 9}, // x........
+ {2, 9}, // x.x......
+ {4, 9}, // x.x.x....
+ {6, 9}, // x.x.x.x..
+ {8, 9}, // x.x.x.x.x
+ {1, 7}, // xxx.x.x.x
+ {7, 5}, // xxx.x.xxx
+ {3, 3}, // xxxxx.xxx
+ {5, 1}}; // xxxxxxxxx
+ CHECK_EQ(kPageCount, arraysize(testcase));
+
+ CHECK_EQ(ra.all_regions_.size(), kPageCount);
+ for (size_t i = 0; i < kPageCount; i++) {
+ Address address = kBegin + kPageSize * testcase[i].page_index_to_free;
+ CHECK_EQ(ra.FreeRegion(address), kPageSize);
+ CHECK_EQ(ra.all_regions_.size(), testcase[i].expected_page_count);
+ }
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+TEST(RegionAllocatorTest, FindRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+ const Address kEnd = kBegin + kSize;
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate the whole region.
+ for (Address address = kBegin; address < kEnd; address += kPageSize) {
+ CHECK_EQ(ra.free_size(), kEnd - address);
+ CHECK(ra.AllocateRegionAt(address, kPageSize));
+ }
+
+ // No free regions left, the allocation should fail.
+ CHECK_EQ(ra.free_size(), 0);
+ CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
+
+ // The out-of region requests must return end iterator.
+ CHECK_EQ(ra.FindRegion(kBegin - 1), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kBegin - kPageSize), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kBegin / 2), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd + kPageSize), ra.all_regions_.end());
+ CHECK_EQ(ra.FindRegion(kEnd * 2), ra.all_regions_.end());
+
+ for (Address address = kBegin; address < kEnd; address += kPageSize / 4) {
+ RegionAllocator::AllRegionsSet::iterator region_iter =
+ ra.FindRegion(address);
+ CHECK_NE(region_iter, ra.all_regions_.end());
+ RegionAllocator::Region* region = *region_iter;
+ Address region_start = RoundDown(address, kPageSize);
+ CHECK_EQ(region->begin(), region_start);
+ CHECK_LE(region->begin(), address);
+ CHECK_LT(address, region->end());
+ }
+}
+
+TEST(RegionAllocatorTest, TrimRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 64;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ Address address = kBegin + 13 * kPageSize;
+ size_t size = 37 * kPageSize;
+ size_t free_size = kSize - size;
+ CHECK(ra.AllocateRegionAt(address, size));
+
+ size_t trim_size = kPageSize;
+ do {
+ CHECK_EQ(ra.CheckRegion(address), size);
+ CHECK_EQ(ra.free_size(), free_size);
+
+ trim_size = std::min(size, trim_size);
+ size -= trim_size;
+ free_size += trim_size;
+ CHECK_EQ(ra.TrimRegion(address, size), trim_size);
+ trim_size *= 2;
+ } while (size != 0);
+
+ // Check that the whole region is free and can be fully allocated.
+ CHECK_EQ(ra.free_size(), kSize);
+ CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/threaded-list-unittest.cc b/deps/v8/test/unittests/base/threaded-list-unittest.cc
new file mode 100644
index 0000000000..96a730370b
--- /dev/null
+++ b/deps/v8/test/unittests/base/threaded-list-unittest.cc
@@ -0,0 +1,309 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iterator>
+
+#include "src/v8.h"
+
+#include "src/base/threaded-list.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+struct ThreadedListTestNode {
+ ThreadedListTestNode() : next_(nullptr), other_next_(nullptr) {}
+
+ ThreadedListTestNode** next() { return &next_; }
+
+ ThreadedListTestNode* next_;
+
+ struct OtherTraits {
+ static ThreadedListTestNode** next(ThreadedListTestNode* t) {
+ return t->other_next();
+ }
+ };
+
+ ThreadedListTestNode** other_next() { return &other_next_; }
+
+ ThreadedListTestNode* other_next_;
+};
+
+struct ThreadedListTest : public ::testing::Test {
+ static const size_t INIT_NODES = 5;
+ ThreadedListTest() {}
+
+ void SetUp() override {
+ for (size_t i = 0; i < INIT_NODES; i++) {
+ nodes[i] = ThreadedListTestNode();
+ }
+
+ for (size_t i = 0; i < INIT_NODES; i++) {
+ list.Add(&nodes[i]);
+ normal_next_list.Add(&nodes[i]);
+ }
+
+ // Verify if setup worked
+ CHECK(list.Verify());
+ CHECK_EQ(list.LengthForTest(), INIT_NODES);
+ CHECK(normal_next_list.Verify());
+ CHECK_EQ(normal_next_list.LengthForTest(), INIT_NODES);
+
+ extra_test_node_0 = ThreadedListTestNode();
+ extra_test_node_1 = ThreadedListTestNode();
+ extra_test_node_2 = ThreadedListTestNode();
+
+ extra_test_list.Add(&extra_test_node_0);
+ extra_test_list.Add(&extra_test_node_1);
+ extra_test_list.Add(&extra_test_node_2);
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ CHECK(extra_test_list.Verify());
+
+ normal_extra_test_list.Add(&extra_test_node_0);
+ normal_extra_test_list.Add(&extra_test_node_1);
+ normal_extra_test_list.Add(&extra_test_node_2);
+ CHECK_EQ(normal_extra_test_list.LengthForTest(), 3);
+ CHECK(normal_extra_test_list.Verify());
+ }
+
+ void TearDown() override {
+ // Check if the normal list threaded through next is still untouched.
+ CHECK(normal_next_list.Verify());
+ CHECK_EQ(normal_next_list.LengthForTest(), INIT_NODES);
+ CHECK_EQ(normal_next_list.AtForTest(0), &nodes[0]);
+ CHECK_EQ(normal_next_list.AtForTest(4), &nodes[4]);
+ CHECK(normal_extra_test_list.Verify());
+ CHECK_EQ(normal_extra_test_list.LengthForTest(), 3);
+ CHECK_EQ(normal_extra_test_list.AtForTest(0), &extra_test_node_0);
+ CHECK_EQ(normal_extra_test_list.AtForTest(2), &extra_test_node_2);
+
+ list.Clear();
+ extra_test_list.Clear();
+ }
+
+ ThreadedListTestNode nodes[INIT_NODES];
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> list;
+ ThreadedList<ThreadedListTestNode> normal_next_list;
+
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits>
+ extra_test_list;
+ ThreadedList<ThreadedListTestNode> normal_extra_test_list;
+ ThreadedListTestNode extra_test_node_0;
+ ThreadedListTestNode extra_test_node_1;
+ ThreadedListTestNode extra_test_node_2;
+};
+
+TEST_F(ThreadedListTest, Add) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ ThreadedListTestNode new_node;
+ // Add to existing list
+ list.Add(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 6);
+ CHECK_EQ(list.AtForTest(5), &new_node);
+
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ new_node = ThreadedListTestNode();
+ // Add to empty list
+ list.Add(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 1);
+ CHECK_EQ(list.AtForTest(0), &new_node);
+}
+
+TEST_F(ThreadedListTest, AddFront) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ ThreadedListTestNode new_node;
+ // AddFront to existing list
+ list.AddFront(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 6);
+ CHECK_EQ(list.first(), &new_node);
+
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ new_node = ThreadedListTestNode();
+ // AddFront to empty list
+ list.AddFront(&new_node);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 1);
+ CHECK_EQ(list.first(), &new_node);
+}
+
+TEST_F(ThreadedListTest, ReinitializeHead) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ CHECK_NE(extra_test_list.first(), list.first());
+ list.ReinitializeHead(&extra_test_node_0);
+ list.Verify();
+ CHECK_EQ(extra_test_list.first(), list.first());
+ CHECK_EQ(extra_test_list.end(), list.end());
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+}
+
+TEST_F(ThreadedListTest, DropHead) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ CHECK_EQ(extra_test_list.first(), &extra_test_node_0);
+ extra_test_list.DropHead();
+ extra_test_list.Verify();
+ CHECK_EQ(extra_test_list.first(), &extra_test_node_1);
+ CHECK_EQ(extra_test_list.LengthForTest(), 2);
+}
+
+TEST_F(ThreadedListTest, Append) {
+ auto initial_extra_list_end = extra_test_list.end();
+ CHECK_EQ(list.LengthForTest(), 5);
+ list.Append(std::move(extra_test_list));
+ list.Verify();
+ extra_test_list.Verify();
+ CHECK(extra_test_list.is_empty());
+ CHECK_EQ(list.LengthForTest(), 8);
+ CHECK_EQ(list.AtForTest(4), &nodes[4]);
+ CHECK_EQ(list.AtForTest(5), &extra_test_node_0);
+ CHECK_EQ(list.end(), initial_extra_list_end);
+}
+
+TEST_F(ThreadedListTest, Prepend) {
+ CHECK_EQ(list.LengthForTest(), 5);
+ list.Prepend(std::move(extra_test_list));
+ list.Verify();
+ extra_test_list.Verify();
+ CHECK(extra_test_list.is_empty());
+ CHECK_EQ(list.LengthForTest(), 8);
+ CHECK_EQ(list.first(), &extra_test_node_0);
+ CHECK_EQ(list.AtForTest(2), &extra_test_node_2);
+ CHECK_EQ(list.AtForTest(3), &nodes[0]);
+}
+
+TEST_F(ThreadedListTest, Clear) {
+ CHECK_NE(list.LengthForTest(), 0);
+ list.Clear();
+ CHECK_EQ(list.LengthForTest(), 0);
+ CHECK_NULL(list.first());
+}
+
+TEST_F(ThreadedListTest, MoveAssign) {
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list;
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ m_list = std::move(extra_test_list);
+
+ m_list.Verify();
+ CHECK_EQ(m_list.first(), &extra_test_node_0);
+ CHECK_EQ(m_list.LengthForTest(), 3);
+
+ // move assign from empty list
+ extra_test_list.Clear();
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+ m_list = std::move(extra_test_list);
+ CHECK_EQ(m_list.LengthForTest(), 0);
+
+ m_list.Verify();
+ CHECK_NULL(m_list.first());
+}
+
+TEST_F(ThreadedListTest, MoveCtor) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list(
+ std::move(extra_test_list));
+
+ m_list.Verify();
+ CHECK_EQ(m_list.LengthForTest(), 3);
+ CHECK_EQ(m_list.first(), &extra_test_node_0);
+
+ // move construct from empty list
+ extra_test_list.Clear();
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> m_list2(
+ std::move(extra_test_list));
+ CHECK_EQ(m_list2.LengthForTest(), 0);
+
+ m_list2.Verify();
+ CHECK_NULL(m_list2.first());
+}
+
+TEST_F(ThreadedListTest, Remove) {
+ CHECK_EQ(list.LengthForTest(), 5);
+
+ // Remove first
+ CHECK_EQ(list.first(), &nodes[0]);
+ list.Remove(&nodes[0]);
+ list.Verify();
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.LengthForTest(), 4);
+
+ // Remove middle
+ list.Remove(&nodes[2]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 3);
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.AtForTest(1), &nodes[3]);
+
+ // Remove last
+ list.Remove(&nodes[4]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 2);
+ CHECK_EQ(list.first(), &nodes[1]);
+ CHECK_EQ(list.AtForTest(1), &nodes[3]);
+
+ // Remove rest
+ list.Remove(&nodes[1]);
+ list.Remove(&nodes[3]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 0);
+
+ // Remove not found
+ list.Remove(&nodes[4]);
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 0);
+}
+
+TEST_F(ThreadedListTest, Rewind) {
+ CHECK_EQ(extra_test_list.LengthForTest(), 3);
+ for (auto iter = extra_test_list.begin(); iter != extra_test_list.end();
+ ++iter) {
+ if (*iter == &extra_test_node_2) {
+ extra_test_list.Rewind(iter);
+ break;
+ }
+ }
+ CHECK_EQ(extra_test_list.LengthForTest(), 2);
+ auto iter = extra_test_list.begin();
+ CHECK_EQ(*iter, &extra_test_node_0);
+ std::advance(iter, 1);
+ CHECK_EQ(*iter, &extra_test_node_1);
+
+ extra_test_list.Rewind(extra_test_list.begin());
+ CHECK_EQ(extra_test_list.LengthForTest(), 0);
+}
+
+TEST_F(ThreadedListTest, IterComp) {
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits> c_list =
+ std::move(extra_test_list);
+ bool found_first;
+ for (auto iter = c_list.begin(); iter != c_list.end(); ++iter) {
+ // This triggers the operator== on the iterator
+ if (iter == c_list.begin()) {
+ found_first = true;
+ }
+ }
+ CHECK(found_first);
+}
+
+TEST_F(ThreadedListTest, ConstIterComp) {
+ const ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits>
+ c_list = std::move(extra_test_list);
+ bool found_first;
+ for (auto iter = c_list.begin(); iter != c_list.end(); ++iter) {
+ // This triggers the operator== on the iterator
+ if (iter == c_list.begin()) {
+ found_first = true;
+ }
+ }
+ CHECK(found_first);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
index d0462877f5..97ac4d4b7d 100644
--- a/deps/v8/test/unittests/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
@@ -71,7 +71,7 @@ class ThreadedRunner final : public base::Thread {
explicit ThreadedRunner(TestTask* task)
: Thread(Options("runner thread")), task_(task) {}
- virtual void Run() {
+ void Run() override {
task_->Run();
delete task_;
}
diff --git a/deps/v8/test/unittests/code-stub-assembler-unittest.h b/deps/v8/test/unittests/code-stub-assembler-unittest.h
index 2c32e0f9b7..c48eb772c0 100644
--- a/deps/v8/test/unittests/code-stub-assembler-unittest.h
+++ b/deps/v8/test/unittests/code-stub-assembler-unittest.h
@@ -14,8 +14,8 @@ namespace internal {
class CodeStubAssemblerTest : public TestWithIsolateAndZone {
public:
- CodeStubAssemblerTest() {}
- ~CodeStubAssemblerTest() override {}
+ CodeStubAssemblerTest() = default;
+ ~CodeStubAssemblerTest() override = default;
};
class CodeStubAssemblerTestState : public compiler::CodeAssemblerState {
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 45121aedb3..bfc111aed5 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -9,6 +9,8 @@
#include "include/v8-platform.h"
#include "src/api-inl.h"
#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
@@ -25,16 +27,6 @@
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
-// V8 is smart enough to know something was already compiled and return compiled
-// code straight away. We need a unique name for each test function so that V8
-// returns an empty SharedFunctionInfo.
-#define _STR(x) #x
-#define STR(x) _STR(x)
-#define _SCRIPT(fn, a, b, c) a fn b fn c
-#define SCRIPT(a, b, c) _SCRIPT("f" STR(__LINE__), a, b, c)
-#define TEST_SCRIPT() \
- "function f" STR(__LINE__) "(x, y) { return x * y }; f" STR(__LINE__) ";"
-
namespace v8 {
namespace internal {
@@ -77,6 +69,37 @@ class CompilerDispatcherTest : public TestWithNativeContext {
CompilerDispatcherTestFlags::RestoreFlags();
}
+ static base::Optional<CompilerDispatcher::JobId> EnqueueUnoptimizedCompileJob(
+ CompilerDispatcher* dispatcher, Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
+ std::unique_ptr<ParseInfo> outer_parse_info =
+ test::OuterParseInfoForShared(isolate, shared);
+ AstValueFactory* ast_value_factory =
+ outer_parse_info->GetOrCreateAstValueFactory();
+ AstNodeFactory ast_node_factory(ast_value_factory,
+ outer_parse_info->zone());
+
+ const AstRawString* function_name =
+ ast_value_factory->GetOneByteString("f");
+ DeclarationScope* script_scope = new (outer_parse_info->zone())
+ DeclarationScope(outer_parse_info->zone(), ast_value_factory);
+ DeclarationScope* function_scope =
+ new (outer_parse_info->zone()) DeclarationScope(
+ outer_parse_info->zone(), script_scope, FUNCTION_SCOPE);
+ function_scope->set_start_position(shared->StartPosition());
+ function_scope->set_end_position(shared->EndPosition());
+ const FunctionLiteral* function_literal =
+ ast_node_factory.NewFunctionLiteral(
+ function_name, function_scope, nullptr, -1, -1, -1,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
+ shared->FunctionLiteralId(isolate), nullptr);
+
+ return dispatcher->Enqueue(outer_parse_info.get(), function_name,
+ function_literal);
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTest);
};
@@ -238,7 +261,7 @@ class MockPlatform : public v8::Platform {
TaskWrapper(MockPlatform* platform,
std::vector<std::unique_ptr<Task>> tasks, bool signal)
: platform_(platform), tasks_(std::move(tasks)), signal_(signal) {}
- ~TaskWrapper() = default;
+ ~TaskWrapper() override = default;
void Run() override {
for (auto& task : tasks_) {
@@ -313,17 +336,27 @@ TEST_F(CompilerDispatcherTest, IsEnqueued) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+ ASSERT_TRUE(job_id);
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared)); // SFI not yet registered.
+
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
+
dispatcher.AbortAll(BlockingBehavior::kBlock);
+ ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(platform.IdleTaskPending());
+ platform.ClearWorkerTasks();
platform.ClearIdleTask();
}
@@ -331,79 +364,71 @@ TEST_F(CompilerDispatcherTest, FinishNow) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+
ASSERT_TRUE(dispatcher.FinishNow(shared));
// Finishing removes the SFI from the queue.
+ ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_TRUE(platform.IdleTaskPending());
- platform.ClearIdleTask();
-}
-
-TEST_F(CompilerDispatcherTest, FinishAllNow) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- constexpr int num_funcs = 2;
- Handle<JSFunction> f[num_funcs];
- Handle<SharedFunctionInfo> shared[num_funcs];
-
- for (int i = 0; i < num_funcs; ++i) {
- std::stringstream ss;
- ss << 'f' << STR(__LINE__) << '_' << i;
- std::string func_name = ss.str();
- std::string script("function f" + func_name + "(x, y) { return x * y }; f" +
- func_name + ";");
- f[i] = RunJS<JSFunction>(script.c_str());
- shared[i] = Handle<SharedFunctionInfo>(f[i]->shared(), i_isolate());
- ASSERT_FALSE(shared[i]->is_compiled());
- ASSERT_TRUE(dispatcher.Enqueue(shared[i]));
- }
- dispatcher.FinishAllNow();
- for (int i = 0; i < num_funcs; ++i) {
- // Finishing removes the SFI from the queue.
- ASSERT_FALSE(dispatcher.IsEnqueued(shared[i]));
- ASSERT_TRUE(shared[i]->is_compiled());
- }
- platform.ClearIdleTask();
platform.ClearWorkerTasks();
+ platform.ClearIdleTask();
}
TEST_F(CompilerDispatcherTest, IdleTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_TRUE(platform.IdleTaskPending());
// Since time doesn't progress on the MockPlatform, this is enough idle time
// to finish compiling the function.
platform.RunIdleTask(1000.0, 0.0);
+ // Since we haven't yet registered the SFI for the job, it should still be
+ // enqueued and waiting.
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
+ ASSERT_FALSE(shared->is_compiled());
+ ASSERT_FALSE(platform.IdleTaskPending());
+
+ // Register SFI, which should schedule another idle task to complete the
+ // compilation.
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(platform.IdleTaskPending());
+ platform.RunIdleTask(1000.0, 0.0);
+
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+
ASSERT_TRUE(platform.IdleTaskPending());
// The job should be scheduled for the main thread.
@@ -419,9 +444,9 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
ASSERT_TRUE(platform.IdleTaskPending());
// The job should be still scheduled for the main thread, but ready for
- // parsing.
+ // finalization.
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
dispatcher.jobs_.begin()->second->status());
// Now grant a lot of idle time and freeze time.
@@ -430,25 +455,28 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, IdleTaskException) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
- std::string func_name("f" STR(__LINE__));
- std::string script("function " + func_name + "(x) { var a = ");
- for (int i = 0; i < 500; i++) {
+ std::string raw_script("(x) { var a = ");
+ for (int i = 0; i < 1000; i++) {
// Alternate + and - to avoid n-ary operation nodes.
- script += "'x' + 'x' - ";
+ raw_script += "'x' + 'x' - ";
}
- script += " 'x'; }; " + func_name + ";";
- Handle<JSFunction> f = RunJS<JSFunction>(script.c_str());
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ raw_script += " 'x'; };";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), script);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
// Since time doesn't progress on the MockPlatform, this is enough idle time
// to finish compiling the function.
@@ -457,41 +485,33 @@ TEST_F(CompilerDispatcherTest, IdleTaskException) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(i_isolate()->has_pending_exception());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared));
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
dispatcher.jobs_.begin()->second->status());
// Now grant a lot of idle time and freeze time.
@@ -500,34 +520,30 @@ TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.WorkerTasksPending());
}
TEST_F(CompilerDispatcherTest, FinishNowWithWorkerTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_EQ(dispatcher.jobs_.size(), 1u);
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
+ dispatcher.jobs_.begin()->second->status());
ASSERT_TRUE(platform.WorkerTasksPending());
// This does not block, but races with the FinishNow() call below.
@@ -545,46 +561,54 @@ TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
- Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared_1 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_1->is_compiled());
+ Handle<SharedFunctionInfo> shared_2 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_2->is_compiled());
- const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
- Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared1));
- ASSERT_TRUE(dispatcher.Enqueue(shared2));
- ASSERT_TRUE(platform.IdleTaskPending());
+ dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
+
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
// Since time doesn't progress on the MockPlatform, this is enough idle time
// to finish compiling the function.
platform.RunIdleTask(1000.0, 0.0);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared1));
- ASSERT_FALSE(dispatcher.IsEnqueued(shared2));
- ASSERT_TRUE(shared1->is_compiled());
- ASSERT_TRUE(shared2->is_compiled());
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_2));
+ ASSERT_TRUE(shared_1->is_compiled());
+ ASSERT_TRUE(shared_2->is_compiled());
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, FinishNowException) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
- std::string func_name("f" STR(__LINE__));
- std::string script("function " + func_name + "(x) { var a = ");
- for (int i = 0; i < 500; i++) {
+ std::string raw_script("(x) { var a = ");
+ for (int i = 0; i < 1000; i++) {
// Alternate + and - to avoid n-ary operation nodes.
- script += "'x' + 'x' - ";
+ raw_script += "'x' + 'x' - ";
}
- script += " 'x'; }; " + func_name + ";";
- Handle<JSFunction> f = RunJS<JSFunction>(script.c_str());
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ raw_script += " 'x'; };";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), script);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_FALSE(dispatcher.FinishNow(shared));
@@ -594,34 +618,26 @@ TEST_F(CompilerDispatcherTest, FinishNowException) {
i_isolate()->clear_pending_exception();
platform.ClearIdleTask();
+ platform.ClearWorkerTasks();
}
TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
// The background task hasn't yet started, so we can just cancel it.
@@ -642,32 +658,23 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
- Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared_1 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_1->is_compiled());
+ Handle<SharedFunctionInfo> shared_2 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_2->is_compiled());
- const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
- Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
-
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared1));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared1));
- ASSERT_FALSE(shared1->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_FALSE(shared_1->is_compiled());
+ ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
// Kick off background tasks and freeze them.
@@ -681,7 +688,9 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask) {
ASSERT_TRUE(platform.ForegroundTasksPending());
// We can't schedule new tasks while we're aborting.
- ASSERT_FALSE(dispatcher.Enqueue(shared2));
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
+ ASSERT_FALSE(job_id_2);
// Run the first AbortTask. Since the background job is still pending, it
// can't do anything.
@@ -711,10 +720,14 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask) {
ASSERT_FALSE(platform.ForegroundTasksPending());
// Now it's possible to enqueue new functions again.
- ASSERT_TRUE(dispatcher.Enqueue(shared2));
+ job_id_2 = EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
+ ASSERT_TRUE(job_id_2);
ASSERT_TRUE(platform.IdleTaskPending());
- ASSERT_FALSE(platform.WorkerTasksPending());
+ ASSERT_TRUE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.ForegroundTasksPending());
+
+ dispatcher.AbortAll(BlockingBehavior::kBlock);
+ platform.ClearWorkerTasks();
platform.ClearIdleTask();
}
@@ -722,28 +735,20 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared));
+ ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
dispatcher.jobs_.begin()->second->status());
-
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
// Kick off background tasks and freeze them.
@@ -764,7 +769,12 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
ASSERT_TRUE(dispatcher.abort_);
}
- // While the background thread holds on to a job, it is still enqueud.
+ // Run the idle task, which should have already been canceled and won't do
+ // anything.
+ ASSERT_TRUE(platform.IdleTaskPending());
+ platform.RunIdleTask(5.0, 1.0);
+
+ // While the background thread holds on to a job, it is still enqueued.
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
// Release background task.
@@ -783,7 +793,7 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
}
ASSERT_TRUE(platform.ForegroundTasksPending());
- ASSERT_TRUE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
platform.RunForegroundTasks();
@@ -791,32 +801,34 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
base::LockGuard<base::Mutex> lock(&dispatcher.mutex_);
ASSERT_FALSE(dispatcher.abort_);
}
-
- platform.ClearForegroundTasks();
- platform.ClearIdleTask();
}
TEST_F(CompilerDispatcherTest, MemoryPressure) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
// Can't enqueue tasks under memory pressure.
dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
true);
- ASSERT_FALSE(dispatcher.Enqueue(shared));
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ ASSERT_FALSE(job_id);
dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kNone, true);
- ASSERT_TRUE(dispatcher.Enqueue(shared));
+
+ job_id = EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ ASSERT_TRUE(job_id);
// Memory pressure cancels current jobs.
dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
true);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
+ ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
platform.ClearIdleTask();
+ platform.ClearWorkerTasks();
}
namespace {
@@ -826,7 +838,7 @@ class PressureNotificationTask : public CancelableTask {
PressureNotificationTask(Isolate* isolate, CompilerDispatcher* dispatcher,
base::Semaphore* sem)
: CancelableTask(isolate), dispatcher_(dispatcher), sem_(sem) {}
- ~PressureNotificationTask() override {}
+ ~PressureNotificationTask() override = default;
void RunInternal() override {
dispatcher_->MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
@@ -847,11 +859,14 @@ TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
+ dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
- ASSERT_TRUE(dispatcher.Enqueue(shared));
base::Semaphore sem(0);
V8::GetCurrentPlatform()->CallOnWorkerThread(
base::make_unique<PressureNotificationTask>(i_isolate(), &dispatcher,
@@ -873,44 +888,6 @@ TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
ASSERT_FALSE(platform.ForegroundTasksPending());
platform.ClearIdleTask();
-}
-
-TEST_F(CompilerDispatcherTest, EnqueueJob) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
- std::unique_ptr<CompilerDispatcherJob> job(
- new UnoptimizedCompileJob(i_isolate(), dispatcher.tracer_.get(), shared,
- dispatcher.max_stack_size_));
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- dispatcher.Enqueue(std::move(job));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
-
- ASSERT_TRUE(platform.IdleTaskPending());
- platform.ClearIdleTask();
- ASSERT_FALSE(platform.WorkerTasksPending());
-}
-
-TEST_F(CompilerDispatcherTest, EnqueueAndStep) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
-
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(platform.IdleTaskPending());
- platform.ClearIdleTask();
- ASSERT_TRUE(platform.WorkerTasksPending());
platform.ClearWorkerTasks();
}
@@ -919,14 +896,16 @@ TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
// enqueued functions.
CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
- const char script[] = "function lazy() { return 42; }; lazy;";
+ const char raw_script[] = "function lazy() { return 42; }; lazy;";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
ASSERT_FALSE(shared->is_compiled());
- ASSERT_FALSE(dispatcher->IsEnqueued(shared));
- ASSERT_TRUE(dispatcher->Enqueue(shared));
- ASSERT_TRUE(dispatcher->IsEnqueued(shared));
+
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared);
+ dispatcher->RegisterSharedFunctionInfo(*job_id, *shared);
// Now force the function to run and ensure CompileLazy finished and dequeues
// it from the dispatcher.
@@ -940,66 +919,57 @@ TEST_F(CompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
// enqueued functions.
CompilerDispatcher* dispatcher = i_isolate()->compiler_dispatcher();
- const char source2[] = "function lazy2() { return 42; }; lazy2;";
- Handle<JSFunction> lazy2 = RunJS<JSFunction>(source2);
- Handle<SharedFunctionInfo> shared2(lazy2->shared(), i_isolate());
- ASSERT_FALSE(shared2->is_compiled());
+ const char raw_source_2[] = "function lazy2() { return 42; }; lazy2;";
+ test::ScriptResource* source_2 =
+ new test::ScriptResource(raw_source_2, strlen(raw_source_2));
+ Handle<JSFunction> lazy2 = RunJS<JSFunction>(source_2);
+ Handle<SharedFunctionInfo> shared_2(lazy2->shared(), i_isolate());
+ ASSERT_FALSE(shared_2->is_compiled());
- const char source1[] = "function lazy1() { return lazy2(); }; lazy1;";
- Handle<JSFunction> lazy1 = RunJS<JSFunction>(source1);
- Handle<SharedFunctionInfo> shared1(lazy1->shared(), i_isolate());
- ASSERT_FALSE(shared1->is_compiled());
+ const char raw_source_1[] = "function lazy1() { return lazy2(); }; lazy1;";
+ test::ScriptResource* source_1 =
+ new test::ScriptResource(raw_source_1, strlen(raw_source_1));
+ Handle<JSFunction> lazy1 = RunJS<JSFunction>(source_1);
+ Handle<SharedFunctionInfo> shared_1(lazy1->shared(), i_isolate());
+ ASSERT_FALSE(shared_1->is_compiled());
- ASSERT_TRUE(dispatcher->Enqueue(shared1));
- ASSERT_TRUE(dispatcher->Enqueue(shared2));
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared_1);
+ dispatcher->RegisterSharedFunctionInfo(*job_id_1, *shared_1);
- RunJS("lazy1();");
- ASSERT_TRUE(shared1->is_compiled());
- ASSERT_TRUE(shared2->is_compiled());
- ASSERT_FALSE(dispatcher->IsEnqueued(shared1));
- ASSERT_FALSE(dispatcher->IsEnqueued(shared2));
-}
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(dispatcher, i_isolate(), shared_2);
+ dispatcher->RegisterSharedFunctionInfo(*job_id_2, *shared_2);
-TEST_F(CompilerDispatcherTest, EnqueueAndStepTwice) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ ASSERT_TRUE(dispatcher->IsEnqueued(shared_1));
+ ASSERT_TRUE(dispatcher->IsEnqueued(shared_2));
- const char script[] = TEST_SCRIPT();
- Handle<JSFunction> f = RunJS<JSFunction>(script);
- Handle<SharedFunctionInfo> shared(f->shared(), i_isolate());
-
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- // EnqueueAndStep of the same function again (shouldn't step the job.
- ASSERT_TRUE(dispatcher.EnqueueAndStep(shared));
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
-
- ASSERT_TRUE(platform.IdleTaskPending());
- ASSERT_TRUE(platform.WorkerTasksPending());
- platform.ClearIdleTask();
- platform.ClearWorkerTasks();
+ RunJS("lazy1();");
+ ASSERT_TRUE(shared_1->is_compiled());
+ ASSERT_TRUE(shared_2->is_compiled());
+ ASSERT_FALSE(dispatcher->IsEnqueued(shared_1));
+ ASSERT_FALSE(dispatcher->IsEnqueued(shared_2));
}
TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
- const char script1[] = TEST_SCRIPT();
- Handle<JSFunction> f1 = RunJS<JSFunction>(script1);
- Handle<SharedFunctionInfo> shared1(f1->shared(), i_isolate());
- const char script2[] = TEST_SCRIPT();
- Handle<JSFunction> f2 = RunJS<JSFunction>(script2);
- Handle<SharedFunctionInfo> shared2(f2->shared(), i_isolate());
+ Handle<SharedFunctionInfo> shared_1 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_1->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_TRUE(dispatcher.Enqueue(shared1));
- ASSERT_TRUE(dispatcher.Enqueue(shared2));
- ASSERT_TRUE(platform.IdleTaskPending());
+ Handle<SharedFunctionInfo> shared_2 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_2->is_compiled());
+
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
+
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
@@ -1007,21 +977,11 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
(++dispatcher.jobs_.begin())->second->status());
- // Make compiling super expensive, and advance job as much as possible on the
- // foreground thread.
- dispatcher.tracer_->RecordCompile(50000.0, 1);
- platform.RunIdleTask(10.0, 0.0);
- ASSERT_EQ(dispatcher.jobs_.size(), 2u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kPrepared,
- (++dispatcher.jobs_.begin())->second->status());
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared1));
- ASSERT_TRUE(dispatcher.IsEnqueued(shared2));
- ASSERT_FALSE(shared1->is_compiled());
- ASSERT_FALSE(shared2->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
+ ASSERT_FALSE(shared_1->is_compiled());
+ ASSERT_FALSE(shared_2->is_compiled());
+ ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
@@ -1029,26 +989,20 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
dispatcher.jobs_.begin()->second->status());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kCompiled,
+ ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
(++dispatcher.jobs_.begin())->second->status());
// Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0);
- ASSERT_FALSE(dispatcher.IsEnqueued(shared1));
- ASSERT_FALSE(dispatcher.IsEnqueued(shared2));
- ASSERT_TRUE(shared1->is_compiled());
- ASSERT_TRUE(shared2->is_compiled());
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_1));
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_2));
+ ASSERT_TRUE(shared_1->is_compiled());
+ ASSERT_TRUE(shared_2->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
}
-#undef _STR
-#undef STR
-#undef _SCRIPT
-#undef SCRIPT
-#undef TEST_SCRIPT
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
index 5a0e89326b..e3d4ae078b 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
@@ -16,6 +16,7 @@
#include "src/flags.h"
#include "src/isolate-inl.h"
#include "src/parsing/parse-info.h"
+#include "src/parsing/preparsed-scope-data.h"
#include "src/v8.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
@@ -26,9 +27,11 @@ namespace internal {
class UnoptimizedCompileJobTest : public TestWithNativeContext {
public:
- UnoptimizedCompileJobTest() : tracer_(isolate()) {}
- ~UnoptimizedCompileJobTest() override {}
+ UnoptimizedCompileJobTest()
+ : tracer_(isolate()), allocator_(isolate()->allocator()) {}
+ ~UnoptimizedCompileJobTest() override = default;
+ AccountingAllocator* allocator() { return allocator_; }
CompilerDispatcherTracer* tracer() { return &tracer_; }
static void SetUpTestCase() {
@@ -44,15 +47,43 @@ class UnoptimizedCompileJobTest : public TestWithNativeContext {
save_flags_ = nullptr;
}
- static Variable* LookupVariableByName(UnoptimizedCompileJob* job,
- const char* name) {
- const AstRawString* name_raw_string =
- job->parse_info_->ast_value_factory()->GetOneByteString(name);
- return job->parse_info_->literal()->scope()->Lookup(name_raw_string);
+ UnoptimizedCompileJob* NewUnoptimizedCompileJob(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ size_t stack_size = FLAG_stack_size) {
+ std::unique_ptr<ParseInfo> outer_parse_info =
+ test::OuterParseInfoForShared(isolate, shared);
+ AstValueFactory* ast_value_factory =
+ outer_parse_info->GetOrCreateAstValueFactory();
+ AstNodeFactory ast_node_factory(ast_value_factory,
+ outer_parse_info->zone());
+
+ const AstRawString* function_name =
+ ast_value_factory->GetOneByteString("f");
+ DeclarationScope* script_scope = new (outer_parse_info->zone())
+ DeclarationScope(outer_parse_info->zone(), ast_value_factory);
+ DeclarationScope* function_scope =
+ new (outer_parse_info->zone()) DeclarationScope(
+ outer_parse_info->zone(), script_scope, FUNCTION_SCOPE);
+ function_scope->set_start_position(shared->StartPosition());
+ function_scope->set_end_position(shared->EndPosition());
+ const FunctionLiteral* function_literal =
+ ast_node_factory.NewFunctionLiteral(
+ function_name, function_scope, nullptr, -1, -1, -1,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
+ shared->FunctionLiteralId(isolate), nullptr);
+
+ return new UnoptimizedCompileJob(
+ tracer(), allocator(), outer_parse_info.get(), function_name,
+ function_literal,
+ isolate->counters()->worker_thread_runtime_call_stats(),
+ isolate->counters()->compile_function_on_background(), FLAG_stack_size);
}
private:
CompilerDispatcherTracer tracer_;
+ AccountingAllocator* allocator_;
static SaveFlags* save_flags_;
DISALLOW_COPY_AND_ASSIGN(UnoptimizedCompileJobTest);
@@ -63,24 +94,25 @@ SaveFlags* UnoptimizedCompileJobTest::save_flags_ = nullptr;
#define ASSERT_JOB_STATUS(STATUS, JOB) ASSERT_EQ(STATUS, JOB->status())
TEST_F(UnoptimizedCompileJobTest, Construct) {
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), nullptr),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
}
TEST_F(UnoptimizedCompileJobTest, StateTransitions) {
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), nullptr),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), nullptr);
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kPrepared, job);
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kCompiled, job);
- job->FinalizeOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
job->ResetOnMainThread(isolate());
@@ -89,15 +121,16 @@ TEST_F(UnoptimizedCompileJobTest, StateTransitions) {
TEST_F(UnoptimizedCompileJobTest, SyntaxError) {
test::ScriptResource* script = new test::ScriptResource("^^^", strlen("^^^"));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), script);
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->ReportErrorsOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
+
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_TRUE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
ASSERT_TRUE(isolate()->has_pending_exception());
@@ -109,7 +142,7 @@ TEST_F(UnoptimizedCompileJobTest, SyntaxError) {
}
TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
- const char script[] =
+ const char raw_script[] =
"function g() {\n"
" f = function(a) {\n"
" for (var i = 0; i < 3; i++) { a += 20; }\n"
@@ -118,29 +151,28 @@ TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
" return f;\n"
"}\n"
"g();";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
Handle<JSFunction> f = RunJS<JSFunction>(script);
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), handle(f->shared(), f->GetIsolate()),
- FLAG_stack_size));
+ Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- job->FinalizeOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ ASSERT_TRUE(shared->is_compiled());
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
Smi* value = Smi::cast(*RunJS("f(100);"));
ASSERT_TRUE(value == Smi::FromInt(160));
-
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
-TEST_F(UnoptimizedCompileJobTest, CompileFailureToAnalyse) {
+TEST_F(UnoptimizedCompileJobTest, CompileFailure) {
std::string raw_script("() { var a = ");
- for (int i = 0; i < 500000; i++) {
+ for (int i = 0; i < 10000; i++) {
// TODO(leszeks): Figure out a more "unit-test-y" way of forcing an analysis
// failure than a binop stack overflow.
@@ -150,42 +182,16 @@ TEST_F(UnoptimizedCompileJobTest, CompileFailureToAnalyse) {
raw_script += " 'x'; }";
test::ScriptResource* script =
new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- 100));
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), script);
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared, 100));
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->ReportErrorsOnMainThread(isolate());
- ASSERT_TRUE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
- ASSERT_TRUE(isolate()->has_pending_exception());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
- isolate()->clear_pending_exception();
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
-}
-
-TEST_F(UnoptimizedCompileJobTest, CompileFailureToFinalize) {
- std::string raw_script("() { var a = ");
- for (int i = 0; i < 500; i++) {
- // Alternate + and - to avoid n-ary operation nodes.
- raw_script += "'x' + 'x' - ";
- }
- raw_script += " 'x'; }";
- test::ScriptResource* script =
- new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- 50));
-
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
- job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- job->ReportErrorsOnMainThread(isolate());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_TRUE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
ASSERT_TRUE(isolate()->has_pending_exception());
@@ -199,7 +205,7 @@ class CompileTask : public Task {
public:
CompileTask(UnoptimizedCompileJob* job, base::Semaphore* semaphore)
: job_(job), semaphore_(semaphore) {}
- ~CompileTask() override {}
+ ~CompileTask() override = default;
void Run() override {
job_->Compile(true);
@@ -223,19 +229,18 @@ TEST_F(UnoptimizedCompileJobTest, CompileOnBackgroundThread) {
"}";
test::ScriptResource* script =
new test::ScriptResource(raw_script, strlen(raw_script));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), test::CreateSharedFunctionInfo(isolate(), script),
- 100));
-
- job->PrepareOnMainThread(isolate());
- ASSERT_FALSE(job->IsFailed());
+ Handle<SharedFunctionInfo> shared =
+ test::CreateSharedFunctionInfo(isolate(), script);
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
base::Semaphore semaphore(0);
auto background_task = base::make_unique<CompileTask>(job.get(), &semaphore);
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kPrepared, job);
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(background_task));
semaphore.Wait();
- job->FinalizeOnMainThread(isolate());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
@@ -243,26 +248,64 @@ TEST_F(UnoptimizedCompileJobTest, CompileOnBackgroundThread) {
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
-TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
- const char script[] =
- "f = function() {\n"
- " e = (function() { return 42; });\n"
- " return e;\n"
- "};\n"
- "f;";
+TEST_F(UnoptimizedCompileJobTest, EagerInnerFunctions) {
+ const char raw_script[] =
+ "function g() {\n"
+ " f = function() {\n"
+ " // Simulate an eager IIFE with brackets.\n "
+ " var e = (function () { return 42; });\n"
+ " return e;\n"
+ " }\n"
+ " return f;\n"
+ "}\n"
+ "g();";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
Handle<JSFunction> f = RunJS<JSFunction>(script);
+ Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
- std::unique_ptr<UnoptimizedCompileJob> job(new UnoptimizedCompileJob(
- isolate(), tracer(), handle(f->shared(), f->GetIsolate()),
- FLAG_stack_size));
-
- job->PrepareOnMainThread(isolate());
+ job->Compile(false);
+ ASSERT_FALSE(job->IsFailed());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ ASSERT_TRUE(shared->is_compiled());
+
+ Handle<JSFunction> e = RunJS<JSFunction>("f();");
+
+ ASSERT_TRUE(e->shared()->is_compiled());
+
+ job->ResetOnMainThread(isolate());
+ ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+}
+
+TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
+ const char raw_script[] =
+ "function g() {\n"
+ " f = function() {\n"
+ " function e() { return 42; };\n"
+ " return e;\n"
+ " }\n"
+ " return f;\n"
+ "}\n"
+ "g();";
+ test::ScriptResource* script =
+ new test::ScriptResource(raw_script, strlen(raw_script));
+ Handle<JSFunction> f = RunJS<JSFunction>(script);
+ Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
+ ASSERT_FALSE(shared->is_compiled());
+ std::unique_ptr<UnoptimizedCompileJob> job(
+ NewUnoptimizedCompileJob(isolate(), shared));
+
job->Compile(false);
ASSERT_FALSE(job->IsFailed());
- job->FinalizeOnMainThread(isolate());
+ job->FinalizeOnMainThread(isolate(), shared);
ASSERT_FALSE(job->IsFailed());
ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ ASSERT_TRUE(shared->is_compiled());
Handle<JSFunction> e = RunJS<JSFunction>("f();");
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 48c15934df..53b9c6a241 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -22,8 +22,8 @@ using ToBooleanMode = interpreter::BytecodeArrayBuilder::ToBooleanMode;
class BytecodeAnalysisTest : public TestWithIsolateAndZone {
public:
- BytecodeAnalysisTest() {}
- ~BytecodeAnalysisTest() override {}
+ BytecodeAnalysisTest() = default;
+ ~BytecodeAnalysisTest() override = default;
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
diff --git a/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
index a201fc9a55..22ed2abf9b 100644
--- a/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
@@ -18,7 +18,7 @@ namespace compiler {
class CheckpointEliminationTest : public GraphTest {
public:
CheckpointEliminationTest() : GraphTest() {}
- ~CheckpointEliminationTest() override {}
+ ~CheckpointEliminationTest() override = default;
protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.h b/deps/v8/test/unittests/compiler/code-assembler-unittest.h
index 21f3df5f4b..56f1959765 100644
--- a/deps/v8/test/unittests/compiler/code-assembler-unittest.h
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.h
@@ -15,8 +15,8 @@ namespace compiler {
class CodeAssemblerTest : public TestWithIsolateAndZone {
public:
- CodeAssemblerTest() {}
- ~CodeAssemblerTest() override {}
+ CodeAssemblerTest() = default;
+ ~CodeAssemblerTest() override = default;
};
class CodeAssemblerTestState : public CodeAssemblerState {
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index cb5b5fd806..f40cab2758 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -23,7 +23,7 @@ class CommonOperatorReducerTest : public GraphTest {
public:
explicit CommonOperatorReducerTest(int num_parameters = 1)
: GraphTest(num_parameters), machine_(zone()), simplified_(zone()) {}
- ~CommonOperatorReducerTest() override {}
+ ~CommonOperatorReducerTest() override = default;
protected:
Reduction Reduce(
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 2ee0dbb382..4d66ded5f1 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -120,7 +120,7 @@ namespace {
class CommonOperatorTest : public TestWithZone {
public:
CommonOperatorTest() : common_(zone()) {}
- ~CommonOperatorTest() override {}
+ ~CommonOperatorTest() override = default;
CommonOperatorBuilder* common() { return &common_; }
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index 464ee3a971..fd0845159f 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -66,7 +66,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
js_heap_broker_(isolate(), zone()),
simplified_(zone()),
deps_(isolate(), zone()) {}
- ~ConstantFoldingReducerTest() override {}
+ ~ConstantFoldingReducerTest() override = default;
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index 607df1fafb..992ddcc55b 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -21,7 +21,7 @@ class ControlFlowOptimizerTest : public GraphTest {
public:
explicit ControlFlowOptimizerTest(int num_parameters = 3)
: GraphTest(num_parameters), machine_(zone()), javascript_(zone()) {}
- ~ControlFlowOptimizerTest() override {}
+ ~ControlFlowOptimizerTest() override = default;
protected:
void Optimize() {
diff --git a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
index 4444ed0ca5..72e02e1416 100644
--- a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -20,7 +20,7 @@ class DeadCodeEliminationTest : public GraphTest {
public:
explicit DeadCodeEliminationTest(int num_parameters = 4)
: GraphTest(num_parameters) {}
- ~DeadCodeEliminationTest() override {}
+ ~DeadCodeEliminationTest() override = default;
protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index dc2f2189d1..f506502610 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -751,7 +751,7 @@ TEST_F(GraphReducerTest, Sorter1) {
Node* n1 = graph()->NewNode(&kOpA0);
Node* n2 = graph()->NewNode(&kOpA1, n1);
Node* n3 = graph()->NewNode(&kOpA1, n1);
- Node* end = NULL; // Initialize to please the compiler.
+ Node* end = nullptr; // Initialize to please the compiler.
if (i == 0) end = graph()->NewNode(&kOpA2, n2, n3);
if (i == 1) end = graph()->NewNode(&kOpA2, n3, n2);
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index af2c382f5b..4736ddefa2 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -4,6 +4,7 @@
#include "test/unittests/compiler/graph-unittest.h"
+#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/node-properties.h"
#include "src/heap/factory.h"
#include "src/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
@@ -24,16 +25,22 @@ GraphTest::GraphTest(int num_parameters)
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
+ js_heap_broker()->SetNativeContextRef();
}
-GraphTest::~GraphTest() {}
+GraphTest::~GraphTest() = default;
Node* GraphTest::Parameter(int32_t index) {
return graph()->NewNode(common()->Parameter(index), graph()->start());
}
+Node* GraphTest::Parameter(Type type, int32_t index) {
+ Node* node = GraphTest::Parameter(index);
+ NodeProperties::SetType(node, type);
+ return node;
+}
Node* GraphTest::Float32Constant(volatile float value) {
return graph()->NewNode(common()->Float32Constant(value));
@@ -113,15 +120,9 @@ Matcher<Node*> GraphTest::IsUndefinedConstant() {
TypedGraphTest::TypedGraphTest(int num_parameters)
: GraphTest(num_parameters),
- typer_(isolate(), js_heap_broker(), Typer::kNoFlags, graph()) {}
-
-TypedGraphTest::~TypedGraphTest() {}
+ typer_(js_heap_broker(), Typer::kNoFlags, graph()) {}
-Node* TypedGraphTest::Parameter(Type type, int32_t index) {
- Node* node = GraphTest::Parameter(index);
- NodeProperties::SetType(node, type);
- return node;
-}
+TypedGraphTest::~TypedGraphTest() = default;
namespace graph_unittest {
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index d9b9934770..8317ebf279 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -34,6 +34,7 @@ class GraphTest : public virtual TestWithNativeContext,
Node* end() { return graph()->end(); }
Node* Parameter(int32_t index = 0);
+ Node* Parameter(Type type, int32_t index = 0);
Node* Float32Constant(volatile float value);
Node* Float64Constant(volatile double value);
Node* Int32Constant(int32_t value);
@@ -79,9 +80,6 @@ class TypedGraphTest : public GraphTest {
~TypedGraphTest() override;
protected:
- Node* Parameter(int32_t index = 0) { return GraphTest::Parameter(index); }
- Node* Parameter(Type type, int32_t index = 0);
-
Typer* typer() { return &typer_; }
private:
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index ed4a1c648a..2d59393f9d 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -20,7 +20,7 @@ namespace compiler {
InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
-InstructionSelectorTest::~InstructionSelectorTest() {}
+InstructionSelectorTest::~InstructionSelectorTest() = default;
InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
@@ -365,7 +365,8 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
ZoneVector<MachineType> empty_types(zone());
auto call_descriptor = Linkage::GetJSCallDescriptor(
- zone(), false, 1, CallDescriptor::kNeedsFrameState);
+ zone(), false, 1,
+ CallDescriptor::kNeedsFrameState | CallDescriptor::kCanUseRoots);
// Build frame state for the state before the call.
Node* parameters = m.AddNode(
diff --git a/deps/v8/test/unittests/compiler/instruction-unittest.cc b/deps/v8/test/unittests/compiler/instruction-unittest.cc
index 96add7fdd8..72deb12d02 100644
--- a/deps/v8/test/unittests/compiler/instruction-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-unittest.cc
@@ -38,8 +38,8 @@ bool Contains(const ZoneVector<MoveOperands*>* moves,
class InstructionTest : public TestWithZone {
public:
- InstructionTest() {}
- virtual ~InstructionTest() {}
+ InstructionTest() = default;
+ ~InstructionTest() override = default;
ParallelMove* CreateParallelMove(
const std::vector<InstructionOperand>& operand_pairs) {
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 53e3b48762..7660f5851e 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -21,11 +21,12 @@ namespace compiler {
class JSCallReducerTest : public TypedGraphTest {
public:
JSCallReducerTest()
- : TypedGraphTest(3),
- javascript_(zone()),
- deps_(isolate(), zone()),
- js_heap_broker(isolate(), zone()) {}
- ~JSCallReducerTest() override {}
+ : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {
+ if (FLAG_concurrent_compiler_frontend) {
+ js_heap_broker()->SerializeStandardObjects();
+ }
+ }
+ ~JSCallReducerTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -36,7 +37,7 @@ class JSCallReducerTest : public TypedGraphTest {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSCallReducer reducer(&graph_reducer, &jsgraph, &js_heap_broker,
+ JSCallReducer reducer(&graph_reducer, &jsgraph, js_heap_broker(),
JSCallReducer::kNoFlags, native_context(), &deps_);
return reducer.Reduce(node);
}
@@ -45,16 +46,13 @@ class JSCallReducerTest : public TypedGraphTest {
static void SetUpTestCase() {
old_flag_lazy_ = i::FLAG_lazy_deserialization;
- old_flag_lazy_handler_ = i::FLAG_lazy_handler_deserialization;
i::FLAG_lazy_deserialization = false;
- i::FLAG_lazy_handler_deserialization = false;
TypedGraphTest::SetUpTestCase();
}
static void TearDownTestCase() {
TypedGraphTest::TearDownTestCase();
i::FLAG_lazy_deserialization = old_flag_lazy_;
- i::FLAG_lazy_handler_deserialization = old_flag_lazy_handler_;
}
Node* GlobalFunction(const char* name) {
@@ -124,7 +122,7 @@ class JSCallReducerTest : public TypedGraphTest {
// overwriting existing metadata.
shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
Handle<FeedbackVector> vector = FeedbackVector::New(isolate(), shared);
- VectorSlotPair feedback(vector, FeedbackSlot(0));
+ VectorSlotPair feedback(vector, FeedbackSlot(0), UNINITIALIZED);
return javascript()->Call(arity, CallFrequency(), feedback,
ConvertReceiverMode::kAny,
SpeculationMode::kAllowSpeculation);
@@ -133,7 +131,6 @@ class JSCallReducerTest : public TypedGraphTest {
private:
JSOperatorBuilder javascript_;
CompilationDependencies deps_;
- JSHeapBroker js_heap_broker;
static bool old_flag_lazy_;
static bool old_flag_lazy_handler_;
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 2db241aaa9..eafd7fa35e 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -33,8 +33,9 @@ class JSCreateLoweringTest : public TypedGraphTest {
: TypedGraphTest(3),
javascript_(zone()),
deps_(isolate(), zone()),
- handle_scope_(isolate()) {}
- ~JSCreateLoweringTest() override {}
+ handle_scope_(isolate()) {
+ }
+ ~JSCreateLoweringTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -45,7 +46,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, js_heap_broker(),
- native_context(), zone());
+ zone());
return reducer.Reduce(node);
}
@@ -172,7 +173,7 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
// JSCreateWithContext
TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
- Handle<ScopeInfo> scope_info(factory()->NewScopeInfo(1));
+ Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
Node* const object = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
@@ -192,7 +193,7 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
// JSCreateCatchContext
TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
- Handle<ScopeInfo> scope_info(factory()->NewScopeInfo(1));
+ Handle<ScopeInfo> scope_info = ScopeInfo::CreateForEmptyFunction(isolate());
Node* const exception = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 2b0ccaed24..234fe940eb 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -26,7 +26,7 @@ namespace compiler {
class JSIntrinsicLoweringTest : public GraphTest {
public:
JSIntrinsicLoweringTest() : GraphTest(3), javascript_(zone()) {}
- ~JSIntrinsicLoweringTest() override {}
+ ~JSIntrinsicLoweringTest() override = default;
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
new file mode 100644
index 0000000000..fdc87904c4
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
@@ -0,0 +1,50 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/graph-unittest.h"
+
+#include "src/compiler/js-native-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/dtoa.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace js_native_context_specialization_unittest {
+
+class JSNativeContextSpecializationTest : public GraphTest {
+ public:
+ explicit JSNativeContextSpecializationTest(int num_parameters = 1)
+ : GraphTest(num_parameters), javascript_(zone()) {}
+ ~JSNativeContextSpecializationTest() override {}
+
+ protected:
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+TEST_F(JSNativeContextSpecializationTest, GetMaxStringLengthOfString) {
+ const size_t str_len = 3;
+ const size_t num_len = kBase10MaximalLength + 1;
+
+ Node* const str_node = graph()->NewNode(
+ common()->HeapConstant(factory()->InternalizeUtf8String("str")));
+ EXPECT_EQ(JSNativeContextSpecialization::GetMaxStringLength(js_heap_broker(),
+ str_node),
+ str_len);
+
+ Node* const num_node = graph()->NewNode(common()->NumberConstant(10.0 / 3));
+ EXPECT_EQ(JSNativeContextSpecialization::GetMaxStringLength(js_heap_broker(),
+ num_node),
+ num_len);
+}
+
+} // namespace js_native_context_specialization_unittest
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 9ce837cd8c..43998824d2 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -38,7 +38,7 @@ Type const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
class JSTypedLoweringTest : public TypedGraphTest {
public:
JSTypedLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
- ~JSTypedLoweringTest() override {}
+ ~JSTypedLoweringTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -401,12 +401,7 @@ TEST_F(JSTypedLoweringTest, JSAddWithString) {
Reduction r = Reduce(graph()->NewNode(javascript()->Add(hint), lhs, rhs,
context, frame_state, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::StringAdd(
- isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED)
- .code()),
- lhs, rhs, context, frame_state, effect, control));
+ EXPECT_THAT(r.replacement(), IsStringConcat(_, lhs, rhs));
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 5c49468991..042e7e6bbc 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -26,7 +26,7 @@ class LoadEliminationTest : public TypedGraphTest {
simplified_(zone()),
jsgraph_(isolate(), graph(), common(), nullptr, simplified(), nullptr) {
}
- ~LoadEliminationTest() override {}
+ ~LoadEliminationTest() override = default;
protected:
JSGraph* jsgraph() { return &jsgraph_; }
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index d39336dfa5..07013aa52c 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -54,7 +54,7 @@ struct Counter {
class LoopPeelingTest : public GraphTest {
public:
LoopPeelingTest() : GraphTest(1), machine_(zone()) {}
- ~LoopPeelingTest() override {}
+ ~LoopPeelingTest() override = default;
protected:
MachineOperatorBuilder machine_;
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index f47e780426..b8b0c9004f 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -71,7 +71,7 @@ class MachineOperatorReducerTestWithParam
public:
explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
: MachineOperatorReducerTest(num_parameters) {}
- ~MachineOperatorReducerTestWithParam() override {}
+ ~MachineOperatorReducerTestWithParam() override = default;
};
@@ -344,6 +344,27 @@ TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
}
}
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToInt64
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeFloat64ToInt64WithChangeInt64ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt64(),
+ graph()->NewNode(machine()->ChangeInt64ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeFloat64ToInt64(), Float64Constant(FastI2D(x))));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
+ }
+}
// -----------------------------------------------------------------------------
// ChangeFloat64ToUint32
@@ -397,6 +418,27 @@ TEST_F(MachineOperatorReducerTest, ChangeInt32ToInt64WithConstant) {
}
}
+// -----------------------------------------------------------------------------
+// ChangeInt64ToFloat64
+
+TEST_F(MachineOperatorReducerTest,
+ ChangeInt64ToFloat64WithChangeFloat64ToInt64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->ChangeInt64ToFloat64(),
+ graph()->NewNode(machine()->ChangeFloat64ToInt64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+TEST_F(MachineOperatorReducerTest, ChangeInt64ToFloat64WithConstant) {
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->ChangeInt64ToFloat64(), Int64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(BitEq(FastI2D(x))));
+ }
+}
// -----------------------------------------------------------------------------
// ChangeUint32ToFloat64
@@ -2020,6 +2062,16 @@ TEST_F(MachineOperatorReducerTest, Float64InsertHighWord32WithConstant) {
// -----------------------------------------------------------------------------
// Float64Equal
+TEST_F(MachineOperatorReducerTest, Float64EqualWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Equal(), Float64Constant(x), Float64Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(x == y));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Conversions) {
Node* const p0 = Parameter(0);
@@ -2049,6 +2101,17 @@ TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Constant) {
// -----------------------------------------------------------------------------
// Float64LessThan
+TEST_F(MachineOperatorReducerTest, Float64LessThanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64LessThan(),
+ Float64Constant(x), Float64Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(x < y));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Conversions) {
Node* const p0 = Parameter(0);
@@ -2089,6 +2152,17 @@ TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Constant) {
// -----------------------------------------------------------------------------
// Float64LessThanOrEqual
+TEST_F(MachineOperatorReducerTest, Float64LessThanOrEqualWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ Float64Constant(x), Float64Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(x <= y));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest,
Float64LessThanOrEqualWithFloat32Conversions) {
diff --git a/deps/v8/test/unittests/compiler/node-cache-unittest.cc b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
index f77377deda..b699fb38ca 100644
--- a/deps/v8/test/unittests/compiler/node-cache-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
@@ -63,7 +63,7 @@ TEST_F(NodeCacheTest, Int32Constant_hits) {
for (int i = 0; i < kSize; i++) {
int32_t v = i * -55;
Node** pos = cache.Find(zone(), v);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
EXPECT_EQ(nodes[i], *pos);
hits++;
}
@@ -101,7 +101,7 @@ TEST_F(NodeCacheTest, Int64Constant_hits) {
for (int i = 0; i < kSize; i++) {
int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
Node** pos = cache.Find(zone(), v);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
EXPECT_EQ(nodes[i], *pos);
hits++;
}
@@ -118,7 +118,7 @@ TEST_F(NodeCacheTest, GetCachedNodes_int32) {
for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
Node** pos = cache.Find(zone(), k);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
ZoneVector<Node*> nodes(zone());
cache.GetCachedNodes(&nodes);
EXPECT_THAT(nodes, Contains(*pos));
@@ -141,7 +141,7 @@ TEST_F(NodeCacheTest, GetCachedNodes_int64) {
for (size_t i = 0; i < arraysize(constants); i++) {
int64_t k = constants[i];
Node** pos = cache.Find(zone(), k);
- if (*pos != NULL) {
+ if (*pos != nullptr) {
ZoneVector<Node*> nodes(zone());
cache.GetCachedNodes(&nodes);
EXPECT_THAT(nodes, Contains(*pos));
diff --git a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
index 7f043049f0..2663e3abb8 100644
--- a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
@@ -19,7 +19,7 @@ namespace compiler {
class NodeMatcherTest : public GraphTest {
public:
NodeMatcherTest() : machine_(zone()) {}
- ~NodeMatcherTest() override {}
+ ~NodeMatcherTest() override = default;
MachineOperatorBuilder* machine() { return &machine_; }
@@ -150,191 +150,191 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
// (B0 + B1) -> [B0, 0, B1, NULL]
BaseWithIndexAndDisplacement32Matcher match1(graph()->NewNode(a_op, b0, b1));
- CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, nullptr);
// (B0 + D15) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement32Matcher match2(graph()->NewNode(a_op, b0, d15));
- CheckBaseWithIndexAndDisplacement(&match2, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match2, nullptr, 0, b0, d15);
// (D15 + B0) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement32Matcher match3(graph()->NewNode(a_op, d15, b0));
- CheckBaseWithIndexAndDisplacement(&match3, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match3, nullptr, 0, b0, d15);
// (B0 + M1) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match4(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match5(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match6(graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match6, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match6, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match7(graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match7, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match7, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match8(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement32Matcher match9(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement32Matcher match10(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match10, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match10, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement32Matcher match11(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match11, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match11, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match12(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match13(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match14(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match14, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match14, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match15(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match15, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match15, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match16(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match17(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match18(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match18, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match18, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement32Matcher match19(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match19, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match19, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match20(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement32Matcher match21(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement32Matcher match22(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match22, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match22, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement32Matcher match23(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match23, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match23, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match24(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match25(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match26(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match26, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match26, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement32Matcher match27(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match27, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match27, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match28(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement32Matcher match29(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement32Matcher match30(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match30, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match30, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement32Matcher match31(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match31, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match31, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement32Matcher match32(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement32Matcher match33(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement32Matcher match34(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match34, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match34, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement32Matcher match35(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match35, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match35, p1, 3, nullptr, d15);
// 2 INPUT - NEGATIVE CASES
// (M3 + B1) -> [B0, 0, M3, NULL]
BaseWithIndexAndDisplacement32Matcher match36(graph()->NewNode(a_op, b1, m3));
- CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, nullptr);
// (S4 + B1) -> [B0, 0, S4, NULL]
BaseWithIndexAndDisplacement32Matcher match37(graph()->NewNode(a_op, b1, s4));
- CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, nullptr);
// 3 INPUT
@@ -400,209 +400,209 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match47(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match47, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match47, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match48(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match48, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match48, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match49(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match49, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match49, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match50(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match50, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match50, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match51(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match51, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match51, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match52(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match52, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match52, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match53(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match53, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match53, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match54(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match54, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match54, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match55(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match55, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match55, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match56(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match56, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match56, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match57(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match57, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match57, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match58(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match58, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match58, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match59(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match59, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match59, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match60(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match60, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match60, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match61(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match61, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match61, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match62(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match62, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match62, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match63(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match63, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match63, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match64(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match64, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match64, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match65(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match65, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match65, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match66(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match66, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match66, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match67(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match67, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match67, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match68(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match68, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match68, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match69(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match69, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match69, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match70(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match70, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match70, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match71(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match71, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match71, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match72(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match72, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match72, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match73(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match73, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match73, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match74(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match74, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match74, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match75(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match75, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match75, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match76(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match76, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match76, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match77(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match77, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match77, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match78(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match78, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match78, p1, 3, nullptr, d15);
// (D15 + S3) + B0 -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -627,7 +627,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match81(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match81, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match81, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (s3 + b0), d15]
// Avoid changing simple addressing to complex addressing
@@ -636,7 +636,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match82(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match82, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match82, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -686,7 +686,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match88(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match88, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match88, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (b0 + b1), d15]
// Avoid changing simple addressing to complex addressing
@@ -694,7 +694,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match89(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match89, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match89, nullptr, 0, temp, d15);
// 5 INPUT - with none-addressing operand uses
@@ -702,219 +702,219 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match90(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match90, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match90, b0, 0, m1, nullptr);
// (M1 + B0) -> [b0, 0, m1, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match91(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match91, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match91, b0, 0, m1, nullptr);
// (D15 + M1) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match92(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match92, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match92, nullptr, 0, m1, d15);
// (M1 + D15) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement32Matcher match93(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match93, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match93, nullptr, 0, m1, d15);
// (B0 + S0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match94(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match94, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match94, b0, 0, s0, nullptr);
// (S0 + B0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match95(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match95, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match95, b0, 0, s0, nullptr);
// (D15 + S0) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match96(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match96, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match96, nullptr, 0, s0, d15);
// (S0 + D15) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement32Matcher match97(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match97, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match97, nullptr, 0, s0, d15);
// (B0 + M2) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match98(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m2, nullptr);
// (M2 + B0) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match99(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match99, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match99, b0, 0, m2, nullptr);
// (D15 + M2) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match100(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match100, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match100, nullptr, 0, m2, d15);
// (M2 + D15) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement32Matcher match101(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match101, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match101, nullptr, 0, m2, d15);
// (B0 + S1) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match102(
graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s1, nullptr);
// (S1 + B0) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match103(
graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match103, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match103, b0, 0, s1, nullptr);
// (D15 + S1) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match104(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match104, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match104, nullptr, 0, s1, d15);
// (S1 + D15) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement32Matcher match105(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match105, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match105, nullptr, 0, s1, d15);
// (B0 + M4) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match106(
graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m4, nullptr);
// (M4 + B0) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match107(
graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match107, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match107, b0, 0, m4, nullptr);
// (D15 + M4) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match108(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match108, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match108, nullptr, 0, m4, d15);
// (M4 + D15) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement32Matcher match109(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match109, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match109, nullptr, 0, m4, d15);
// (B0 + S2) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match110(
graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s2, nullptr);
// (S2 + B0) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match111(
graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match111, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match111, b0, 0, s2, nullptr);
// (D15 + S2) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match112(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match112, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match112, nullptr, 0, s2, d15);
// (S2 + D15) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement32Matcher match113(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match113, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match113, nullptr, 0, s2, d15);
// (B0 + M8) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match114(
graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m8, nullptr);
// (M8 + B0) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match115(
graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match115, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match115, b0, 0, m8, nullptr);
// (D15 + M8) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match116(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match116, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match116, nullptr, 0, m8, d15);
// (M8 + D15) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement32Matcher match117(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match117, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match117, nullptr, 0, m8, d15);
// (B0 + S3) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match118(
graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s3, nullptr);
// (S3 + B0) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match119(
graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match119, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match119, b0, 0, s3, nullptr);
// (D15 + S3) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match120(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match120, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match120, nullptr, 0, s3, d15);
// (S3 + D15) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement32Matcher match121(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match121, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match121, nullptr, 0, s3, d15);
// (D15 + S3) + B0 -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -922,7 +922,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match122(
graph()->NewNode(a_op, temp, b0));
- CheckBaseWithIndexAndDisplacement(&match122, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match122, b0, 0, temp, nullptr);
// (B0 + D15) + S3 -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -930,7 +930,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match123(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match123, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match123, p1, 3, temp, nullptr);
// (S3 + B0) + D15 -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -938,7 +938,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match124(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match124, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match124, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -946,7 +946,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match125(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match125, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match125, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -954,7 +954,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match126(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match126, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match126, b0, 0, temp, nullptr);
// S3 + (B0 + D15) -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -962,7 +962,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match127(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match127, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match127, p1, 3, temp, nullptr);
// S3 + (B0 - D15) -> [p1, 3, (B0 - D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -970,14 +970,14 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match128(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match128, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match128, p1, 3, temp, nullptr);
// B0 + (B1 - D15) -> [b0, 0, (B1 - D15), NULL]
temp = graph()->NewNode(sub_op, b1, d15);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match129(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, nullptr);
// (B0 - D15) + S3 -> [p1, 3, temp, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -985,21 +985,21 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match130(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, nullptr);
// (B0 + B1) + D15 -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match131(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match131, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match131, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement32Matcher match132(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match132, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match132, nullptr, 0, temp, d15);
}
@@ -1101,195 +1101,195 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
// (B0 + B1) -> [B0, 0, B1, NULL]
BaseWithIndexAndDisplacement64Matcher match1(graph()->NewNode(a_op, b0, b1));
- CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, nullptr);
// (B0 + D15) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement64Matcher match2(graph()->NewNode(a_op, b0, d15));
- CheckBaseWithIndexAndDisplacement(&match2, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match2, nullptr, 0, b0, d15);
BaseWithIndexAndDisplacement64Matcher match2_32(
graph()->NewNode(a_op, b0, d15_32));
- CheckBaseWithIndexAndDisplacement(&match2_32, NULL, 0, b0, d15_32);
+ CheckBaseWithIndexAndDisplacement(&match2_32, nullptr, 0, b0, d15_32);
// (D15 + B0) -> [NULL, 0, B0, D15]
BaseWithIndexAndDisplacement64Matcher match3(graph()->NewNode(a_op, d15, b0));
- CheckBaseWithIndexAndDisplacement(&match3, NULL, 0, b0, d15);
+ CheckBaseWithIndexAndDisplacement(&match3, nullptr, 0, b0, d15);
// (B0 + M1) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match4(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match5(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match6(graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match6, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match6, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match7(graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match7, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match7, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match8(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement64Matcher match9(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement64Matcher match10(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match10, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match10, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
BaseWithIndexAndDisplacement64Matcher match11(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match11, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match11, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match12(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match13(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match14(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match14, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match14, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match15(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match15, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match15, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match16(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match17(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match18(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match18, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match18, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
BaseWithIndexAndDisplacement64Matcher match19(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match19, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match19, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match20(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement64Matcher match21(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement64Matcher match22(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match22, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match22, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
BaseWithIndexAndDisplacement64Matcher match23(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match23, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match23, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match24(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match25(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match26(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match26, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match26, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
BaseWithIndexAndDisplacement64Matcher match27(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match27, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match27, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match28(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement64Matcher match29(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement64Matcher match30(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match30, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match30, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
BaseWithIndexAndDisplacement64Matcher match31(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match31, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match31, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
BaseWithIndexAndDisplacement64Matcher match32(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement64Matcher match33(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement64Matcher match34(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match34, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match34, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
BaseWithIndexAndDisplacement64Matcher match35(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match35, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match35, p1, 3, nullptr, d15);
// 2 INPUT - NEGATIVE CASES
// (M3 + B1) -> [B0, 0, M3, NULL]
BaseWithIndexAndDisplacement64Matcher match36(graph()->NewNode(a_op, b1, m3));
- CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, nullptr);
// (S4 + B1) -> [B0, 0, S4, NULL]
BaseWithIndexAndDisplacement64Matcher match37(graph()->NewNode(a_op, b1, s4));
- CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, nullptr);
// 3 INPUT
@@ -1405,209 +1405,209 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match54(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match54, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match54, p1, 0, b0, nullptr);
// (M1 + B0) -> [p1, 0, B0, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match55(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match55, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match55, p1, 0, b0, nullptr);
// (D15 + M1) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match56(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match56, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match56, p1, 0, nullptr, d15);
// (M1 + D15) -> [P1, 0, NULL, D15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match57(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match57, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match57, p1, 0, nullptr, d15);
// (B0 + S0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match58(graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match58, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match58, p1, 0, b0, nullptr);
// (S0 + B0) -> [p1, 0, B0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match59(graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match59, p1, 0, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match59, p1, 0, b0, nullptr);
// (D15 + S0) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match60(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match60, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match60, p1, 0, nullptr, d15);
// (S0 + D15) -> [P1, 0, NULL, D15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match61(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match61, p1, 0, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match61, p1, 0, nullptr, d15);
// (B0 + M2) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match62(graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match62, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match62, p1, 1, b0, nullptr);
// (M2 + B0) -> [p1, 1, B0, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match63(graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match63, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match63, p1, 1, b0, nullptr);
// (D15 + M2) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match64(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match64, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match64, p1, 1, nullptr, d15);
// (M2 + D15) -> [P1, 1, NULL, D15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match65(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match65, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match65, p1, 1, nullptr, d15);
// (B0 + S1) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match66(graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match66, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match66, p1, 1, b0, nullptr);
// (S1 + B0) -> [p1, 1, B0, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match67(graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match67, p1, 1, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match67, p1, 1, b0, nullptr);
// (D15 + S1) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match68(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match68, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match68, p1, 1, nullptr, d15);
// (S1 + D15) -> [P1, 1, NULL, D15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match69(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match69, p1, 1, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match69, p1, 1, nullptr, d15);
// (B0 + M4) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match70(graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match70, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match70, p1, 2, b0, nullptr);
// (M4 + B0) -> [p1, 2, B0, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match71(graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match71, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match71, p1, 2, b0, nullptr);
// (D15 + M4) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match72(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match72, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match72, p1, 2, nullptr, d15);
// (M4 + D15) -> [p1, 2, NULL, D15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match73(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match73, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match73, p1, 2, nullptr, d15);
// (B0 + S2) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match74(graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match74, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match74, p1, 2, b0, nullptr);
// (S2 + B0) -> [p1, 2, B0, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match75(graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match75, p1, 2, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match75, p1, 2, b0, nullptr);
// (D15 + S2) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match76(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match76, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match76, p1, 2, nullptr, d15);
// (S2 + D15) -> [p1, 2, NULL, D15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match77(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match77, p1, 2, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match77, p1, 2, nullptr, d15);
// (B0 + M8) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match78(graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match78, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match78, p1, 3, b0, nullptr);
// (M8 + B0) -> [p1, 2, B0, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match79(graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match79, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match79, p1, 3, b0, nullptr);
// (D15 + M8) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match80(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match80, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match80, p1, 3, nullptr, d15);
// (M8 + D15) -> [p1, 2, NULL, D15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match81(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match81, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match81, p1, 3, nullptr, d15);
// (B0 + S3) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match82(graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match82, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match82, p1, 3, b0, nullptr);
// (S3 + B0) -> [p1, 2, B0, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match83(graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match83, p1, 3, b0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match83, p1, 3, b0, nullptr);
// (D15 + S3) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match84(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match84, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match84, p1, 3, nullptr, d15);
// (S3 + D15) -> [p1, 2, NULL, D15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match85(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match85, p1, 3, NULL, d15);
+ CheckBaseWithIndexAndDisplacement(&match85, p1, 3, nullptr, d15);
// (D15 + S3) + B0 -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1632,7 +1632,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match88(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match88, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match88, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (s3 + b0), d15]
// Avoid changing simple addressing to complex addressing
@@ -1641,7 +1641,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match89(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match89, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match89, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [p1, 2, b0, d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1691,7 +1691,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match95(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match95, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match95, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (b0 + b1), d15]
// Avoid changing simple addressing to complex addressing
@@ -1699,7 +1699,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match96(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match96, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match96, nullptr, 0, temp, d15);
// 5 INPUT - with none-addressing operand uses
@@ -1707,223 +1707,223 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match97(graph()->NewNode(a_op, b0, m1));
- CheckBaseWithIndexAndDisplacement(&match97, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match97, b0, 0, m1, nullptr);
// (M1 + B0) -> [b0, 0, m1, NULL]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match98(graph()->NewNode(a_op, m1, b0));
- CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match98, b0, 0, m1, nullptr);
// (D15 + M1) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match99(
graph()->NewNode(a_op, d15, m1));
- CheckBaseWithIndexAndDisplacement(&match99, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match99, nullptr, 0, m1, d15);
// (M1 + D15) -> [NULL, 0, m1, d15]
m1 = graph()->NewNode(m_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(m1);
BaseWithIndexAndDisplacement64Matcher match100(
graph()->NewNode(a_op, m1, d15));
- CheckBaseWithIndexAndDisplacement(&match100, NULL, 0, m1, d15);
+ CheckBaseWithIndexAndDisplacement(&match100, nullptr, 0, m1, d15);
// (B0 + S0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match101(
graph()->NewNode(a_op, b0, s0));
- CheckBaseWithIndexAndDisplacement(&match101, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match101, b0, 0, s0, nullptr);
// (S0 + B0) -> [b0, 0, s0, NULL]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match102(
graph()->NewNode(a_op, s0, b0));
- CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s0, NULL);
+ CheckBaseWithIndexAndDisplacement(&match102, b0, 0, s0, nullptr);
// (D15 + S0) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match103(
graph()->NewNode(a_op, d15, s0));
- CheckBaseWithIndexAndDisplacement(&match103, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match103, nullptr, 0, s0, d15);
// (S0 + D15) -> [NULL, 0, s0, d15]
s0 = graph()->NewNode(s_op, p1, d0);
ADD_NONE_ADDRESSING_OPERAND_USES(s0);
BaseWithIndexAndDisplacement64Matcher match104(
graph()->NewNode(a_op, s0, d15));
- CheckBaseWithIndexAndDisplacement(&match104, NULL, 0, s0, d15);
+ CheckBaseWithIndexAndDisplacement(&match104, nullptr, 0, s0, d15);
// (B0 + M2) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match105(
graph()->NewNode(a_op, b0, m2));
- CheckBaseWithIndexAndDisplacement(&match105, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match105, b0, 0, m2, nullptr);
// (M2 + B0) -> [b0, 0, m2, NULL]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match106(
graph()->NewNode(a_op, m2, b0));
- CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match106, b0, 0, m2, nullptr);
// (D15 + M2) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match107(
graph()->NewNode(a_op, d15, m2));
- CheckBaseWithIndexAndDisplacement(&match107, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match107, nullptr, 0, m2, d15);
// (M2 + D15) -> [NULL, 0, m2, d15]
m2 = graph()->NewNode(m_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(m2);
BaseWithIndexAndDisplacement64Matcher match108(
graph()->NewNode(a_op, m2, d15));
- CheckBaseWithIndexAndDisplacement(&match108, NULL, 0, m2, d15);
+ CheckBaseWithIndexAndDisplacement(&match108, nullptr, 0, m2, d15);
// (B0 + S1) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match109(
graph()->NewNode(a_op, b0, s1));
- CheckBaseWithIndexAndDisplacement(&match109, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match109, b0, 0, s1, nullptr);
// (S1 + B0) -> [b0, 0, s1, NULL]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match110(
graph()->NewNode(a_op, s1, b0));
- CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s1, NULL);
+ CheckBaseWithIndexAndDisplacement(&match110, b0, 0, s1, nullptr);
// (D15 + S1) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match111(
graph()->NewNode(a_op, d15, s1));
- CheckBaseWithIndexAndDisplacement(&match111, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match111, nullptr, 0, s1, d15);
// (S1 + D15) -> [NULL, 0, s1, d15]
s1 = graph()->NewNode(s_op, p1, d1);
ADD_NONE_ADDRESSING_OPERAND_USES(s1);
BaseWithIndexAndDisplacement64Matcher match112(
graph()->NewNode(a_op, s1, d15));
- CheckBaseWithIndexAndDisplacement(&match112, NULL, 0, s1, d15);
+ CheckBaseWithIndexAndDisplacement(&match112, nullptr, 0, s1, d15);
// (B0 + M4) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match113(
graph()->NewNode(a_op, b0, m4));
- CheckBaseWithIndexAndDisplacement(&match113, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match113, b0, 0, m4, nullptr);
// (M4 + B0) -> [b0, 0, m4, NULL]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match114(
graph()->NewNode(a_op, m4, b0));
- CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m4, NULL);
+ CheckBaseWithIndexAndDisplacement(&match114, b0, 0, m4, nullptr);
// (D15 + M4) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match115(
graph()->NewNode(a_op, d15, m4));
- CheckBaseWithIndexAndDisplacement(&match115, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match115, nullptr, 0, m4, d15);
// (M4 + D15) -> [NULL, 0, m4, d15]
m4 = graph()->NewNode(m_op, p1, d4);
ADD_NONE_ADDRESSING_OPERAND_USES(m4);
BaseWithIndexAndDisplacement64Matcher match116(
graph()->NewNode(a_op, m4, d15));
- CheckBaseWithIndexAndDisplacement(&match116, NULL, 0, m4, d15);
+ CheckBaseWithIndexAndDisplacement(&match116, nullptr, 0, m4, d15);
// (B0 + S2) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match117(
graph()->NewNode(a_op, b0, s2));
- CheckBaseWithIndexAndDisplacement(&match117, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match117, b0, 0, s2, nullptr);
// (S2 + B0) -> [b0, 0, s2, NULL]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match118(
graph()->NewNode(a_op, s2, b0));
- CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s2, NULL);
+ CheckBaseWithIndexAndDisplacement(&match118, b0, 0, s2, nullptr);
// (D15 + S2) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match119(
graph()->NewNode(a_op, d15, s2));
- CheckBaseWithIndexAndDisplacement(&match119, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match119, nullptr, 0, s2, d15);
// (S2 + D15) -> [NULL, 0, s2, d15]
s2 = graph()->NewNode(s_op, p1, d2);
ADD_NONE_ADDRESSING_OPERAND_USES(s2);
BaseWithIndexAndDisplacement64Matcher match120(
graph()->NewNode(a_op, s2, d15));
- CheckBaseWithIndexAndDisplacement(&match120, NULL, 0, s2, d15);
+ CheckBaseWithIndexAndDisplacement(&match120, nullptr, 0, s2, d15);
// (B0 + M8) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match121(
graph()->NewNode(a_op, b0, m8));
- CheckBaseWithIndexAndDisplacement(&match121, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match121, b0, 0, m8, nullptr);
// (M8 + B0) -> [b0, 0, m8, NULL]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match122(
graph()->NewNode(a_op, m8, b0));
- CheckBaseWithIndexAndDisplacement(&match122, b0, 0, m8, NULL);
+ CheckBaseWithIndexAndDisplacement(&match122, b0, 0, m8, nullptr);
// (D15 + M8) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match123(
graph()->NewNode(a_op, d15, m8));
- CheckBaseWithIndexAndDisplacement(&match123, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match123, nullptr, 0, m8, d15);
// (M8 + D15) -> [NULL, 0, m8, d15]
m8 = graph()->NewNode(m_op, p1, d8);
ADD_NONE_ADDRESSING_OPERAND_USES(m8);
BaseWithIndexAndDisplacement64Matcher match124(
graph()->NewNode(a_op, m8, d15));
- CheckBaseWithIndexAndDisplacement(&match124, NULL, 0, m8, d15);
+ CheckBaseWithIndexAndDisplacement(&match124, nullptr, 0, m8, d15);
// (B0 + S3) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match125(
graph()->NewNode(a_op, b0, s3));
- CheckBaseWithIndexAndDisplacement(&match125, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match125, b0, 0, s3, nullptr);
// (S3 + B0) -> [b0, 0, s3, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match126(
graph()->NewNode(a_op, s3, b0));
- CheckBaseWithIndexAndDisplacement(&match126, b0, 0, s3, NULL);
+ CheckBaseWithIndexAndDisplacement(&match126, b0, 0, s3, nullptr);
// (D15 + S3) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match127(
graph()->NewNode(a_op, d15, s3));
- CheckBaseWithIndexAndDisplacement(&match127, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match127, nullptr, 0, s3, d15);
// (S3 + D15) -> [NULL, 0, s3, d15]
s3 = graph()->NewNode(s_op, p1, d3);
ADD_NONE_ADDRESSING_OPERAND_USES(s3);
BaseWithIndexAndDisplacement64Matcher match128(
graph()->NewNode(a_op, s3, d15));
- CheckBaseWithIndexAndDisplacement(&match128, NULL, 0, s3, d15);
+ CheckBaseWithIndexAndDisplacement(&match128, nullptr, 0, s3, d15);
// (D15 + S3) + B0 -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1931,7 +1931,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match129(
graph()->NewNode(a_op, temp, b0));
- CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match129, b0, 0, temp, nullptr);
// (B0 + D15) + S3 -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1939,7 +1939,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match130(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match130, p1, 3, temp, nullptr);
// (S3 + B0) + D15 -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1947,7 +1947,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match131(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match131, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match131, nullptr, 0, temp, d15);
// D15 + (S3 + B0) -> [NULL, 0, (S3 + B0), d15]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1955,7 +1955,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match132(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match132, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match132, nullptr, 0, temp, d15);
// B0 + (D15 + S3) -> [b0, 0, (D15 + S3), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1963,7 +1963,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match133(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match133, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match133, b0, 0, temp, nullptr);
// S3 + (B0 + D15) -> [p1, 3, (B0 + D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1971,7 +1971,7 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match134(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match134, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match134, p1, 3, temp, nullptr);
// S3 + (B0 - D15) -> [p1, 3, (B0 - D15), NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1979,14 +1979,14 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match135(
graph()->NewNode(a_op, s3, temp));
- CheckBaseWithIndexAndDisplacement(&match135, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match135, p1, 3, temp, nullptr);
// B0 + (B1 - D15) -> [b0, 0, (B1 - D15), NULL]
temp = graph()->NewNode(sub_op, b1, d15);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match136(
graph()->NewNode(a_op, b0, temp));
- CheckBaseWithIndexAndDisplacement(&match136, b0, 0, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match136, b0, 0, temp, nullptr);
// (B0 - D15) + S3 -> [p1, 3, temp, NULL]
s3 = graph()->NewNode(s_op, p1, d3);
@@ -1994,21 +1994,21 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match137(
graph()->NewNode(a_op, temp, s3));
- CheckBaseWithIndexAndDisplacement(&match137, p1, 3, temp, NULL);
+ CheckBaseWithIndexAndDisplacement(&match137, p1, 3, temp, nullptr);
// (B0 + B1) + D15 -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match138(
graph()->NewNode(a_op, temp, d15));
- CheckBaseWithIndexAndDisplacement(&match138, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match138, nullptr, 0, temp, d15);
// D15 + (B0 + B1) -> [NULL, 0, (B0 + B1), d15]
temp = graph()->NewNode(a_op, b0, b1);
ADD_NONE_ADDRESSING_OPERAND_USES(temp);
BaseWithIndexAndDisplacement64Matcher match139(
graph()->NewNode(a_op, d15, temp));
- CheckBaseWithIndexAndDisplacement(&match139, NULL, 0, temp, d15);
+ CheckBaseWithIndexAndDisplacement(&match139, nullptr, 0, temp, d15);
}
TEST_F(NodeMatcherTest, BranchMatcher_match) {
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 56f18931b4..0b3d8786f8 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -57,7 +57,7 @@ class TestNodeMatcher : public MatcherInterface<Node*> {
bool MatchAndExplain(Node* node,
MatchResultListener* listener) const override {
- if (node == NULL) {
+ if (node == nullptr) {
*listener << "which is NULL";
return false;
}
@@ -1401,6 +1401,43 @@ class IsBinopMatcher final : public TestNodeMatcher {
const Matcher<Node*> rhs_matcher_;
};
+class IsStringConcatMatcher final : public TestNodeMatcher {
+ public:
+ IsStringConcatMatcher(const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher)
+ : TestNodeMatcher(IrOpcode::kStringConcat),
+ length_matcher_(length_matcher),
+ lhs_matcher_(lhs_matcher),
+ rhs_matcher_(rhs_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ TestNodeMatcher::DescribeTo(os);
+ *os << " whose length (";
+ length_matcher_.DescribeTo(os);
+ *os << ") and lhs (";
+ lhs_matcher_.DescribeTo(os);
+ *os << ") and rhs (";
+ rhs_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "length", length_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), "rhs",
+ rhs_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> length_matcher_;
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+};
+
class IsUnopMatcher final : public TestNodeMatcher {
public:
IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
@@ -1910,9 +1947,19 @@ Matcher<Node*> IsTailCall(
IrOpcode::k##opcode, hint_matcher, lhs_matcher, rhs_matcher, \
effect_matcher, control_matcher)); \
}
-SPECULATIVE_BINOPS(DEFINE_SPECULATIVE_BINOP_MATCHER);
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DEFINE_SPECULATIVE_BINOP_MATCHER);
+DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberEqual)
+DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThan)
+DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThanOrEqual)
#undef DEFINE_SPECULATIVE_BINOP_MATCHER
+Matcher<Node*> IsStringConcat(const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return MakeMatcher(
+ new IsStringConcatMatcher(length_matcher, lhs_matcher, rhs_matcher));
+}
+
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 30ac330f7f..4e9c32e6d6 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -7,6 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/simplified-operator.h"
#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -35,16 +36,6 @@ class Node;
using ::testing::Matcher;
-#define SPECULATIVE_BINOPS(V) \
- V(SpeculativeNumberAdd) \
- V(SpeculativeNumberSubtract) \
- V(SpeculativeNumberShiftLeft) \
- V(SpeculativeNumberShiftRight) \
- V(SpeculativeNumberShiftRightLogical) \
- V(SpeculativeNumberBitwiseAnd) \
- V(SpeculativeNumberBitwiseOr) \
- V(SpeculativeNumberBitwiseXor)
-
Matcher<Node*> IsDead();
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher);
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher,
@@ -221,7 +212,10 @@ Matcher<Node*> IsNumberAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher, \
const Matcher<Node*>& effect_matcher, \
const Matcher<Node*>& control_matcher);
-SPECULATIVE_BINOPS(DECLARE_SPECULATIVE_BINOP_MATCHER);
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_SPECULATIVE_BINOP_MATCHER);
+DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberEqual)
+DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThan)
+DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThanOrEqual)
#undef DECLARE_SPECULATIVE_BINOP_MATCHER
Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
@@ -272,6 +266,9 @@ Matcher<Node*> IsNumberSqrt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTan(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTanh(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberTrunc(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsStringConcat(const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsStringFromSingleCharCode(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsStringLength(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
new file mode 100644
index 0000000000..f3ecd228a5
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -0,0 +1,1170 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/redundancy-elimination.h"
+#include "src/compiler/common-operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::NiceMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace redundancy_elimination_unittest {
+
+class RedundancyEliminationTest : public GraphTest {
+ public:
+ explicit RedundancyEliminationTest(int num_parameters = 4)
+ : GraphTest(num_parameters),
+ reducer_(&editor_, zone()),
+ simplified_(zone()) {
+ // Initialize the {reducer_} state for the Start node.
+ reducer_.Reduce(graph()->start());
+
+ // Create a feedback vector with two CALL_IC slots.
+ FeedbackVectorSpec spec(zone());
+ FeedbackSlot slot1 = spec.AddCallICSlot();
+ FeedbackSlot slot2 = spec.AddCallICSlot();
+ Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
+ Handle<SharedFunctionInfo> shared =
+ isolate()->factory()->NewSharedFunctionInfoForBuiltin(
+ isolate()->factory()->empty_string(), Builtins::kIllegal);
+ shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::New(isolate(), shared);
+ vector_slot_pairs_.push_back(VectorSlotPair());
+ vector_slot_pairs_.push_back(
+ VectorSlotPair(feedback_vector, slot1, UNINITIALIZED));
+ vector_slot_pairs_.push_back(
+ VectorSlotPair(feedback_vector, slot2, UNINITIALIZED));
+ }
+ ~RedundancyEliminationTest() override = default;
+
+ protected:
+ Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
+
+ std::vector<VectorSlotPair> const& vector_slot_pairs() const {
+ return vector_slot_pairs_;
+ }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ NiceMock<MockAdvancedReducerEditor> editor_;
+ std::vector<VectorSlotPair> vector_slot_pairs_;
+ VectorSlotPair feedback2_;
+ RedundancyElimination reducer_;
+ SimplifiedOperatorBuilder simplified_;
+};
+
+namespace {
+
+const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
+ CheckForMinusZeroMode::kCheckForMinusZero,
+ CheckForMinusZeroMode::kDontCheckForMinusZero,
+};
+
+const CheckTaggedInputMode kCheckTaggedInputModes[] = {
+ CheckTaggedInputMode::kNumber, CheckTaggedInputMode::kNumberOrOddball};
+
+const NumberOperationHint kNumberOperationHints[] = {
+ NumberOperationHint::kSignedSmall,
+ NumberOperationHint::kSignedSmallInputs,
+ NumberOperationHint::kSigned32,
+ NumberOperationHint::kNumber,
+ NumberOperationHint::kNumberOrOddball,
+};
+
+} // namespace
+
+// -----------------------------------------------------------------------------
+// CheckBounds
+
+TEST_F(RedundancyEliminationTest, CheckBounds) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* index = Parameter(0);
+ Node* length = Parameter(1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), index, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), index, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckNumber
+
+TEST_F(RedundancyEliminationTest, CheckNumberSubsumedByCheckSmi) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckSmi(feedback1), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckNumber(feedback2), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckString
+
+TEST_F(RedundancyEliminationTest,
+ CheckStringSubsumedByCheckInternalizedString) {
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckInternalizedString(), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckString(feedback), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckSymbol
+
+TEST_F(RedundancyEliminationTest, CheckSymbol) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckSymbol(), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckSymbol(), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+}
+
+// -----------------------------------------------------------------------------
+// CheckedFloat64ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedFloat64ToInt32(mode, feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedFloat64ToInt32(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedInt32ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedInt32ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedInt32ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedInt64ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedInt64ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedInt64ToInt32(feedback1), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedInt64ToInt32(feedback2), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedInt64ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedInt64ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedInt64ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedInt64ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedSignedToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedSignedToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedSignedToInt32(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedSignedToInt32(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToFloat64
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToFloat64) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToFloat64(mode, feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToFloat64(mode, feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ CheckedTaggedToFloat64SubsubmedByCheckedTaggedToFloat64) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ // If the check passed for CheckTaggedInputMode::kNumber, it'll
+ // also pass later for CheckTaggedInputMode::kNumberOrOddball.
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedToFloat64(
+ CheckTaggedInputMode::kNumber, feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToFloat64(
+ CheckTaggedInputMode::kNumberOrOddball, feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt32(mode, feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt32(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ CheckedTaggedToInt32SubsumedByCheckedTaggedSignedToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedSignedToInt32(feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt32(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToTaggedPointer
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToTaggedPointer(feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToTaggedPointer(feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTaggedToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedTaggedToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedTruncateTaggedToWord32
+
+TEST_F(RedundancyEliminationTest, CheckedTruncateTaggedToWord32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTruncateTaggedToWord32(mode, feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTruncateTaggedToWord32(mode, feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ CheckedTruncateTaggedToWord32SubsumedByCheckedTruncateTaggedToWord32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ // If the check passed for CheckTaggedInputMode::kNumber, it'll
+ // also pass later for CheckTaggedInputMode::kNumberOrOddball.
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumber, feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumberOrOddball, feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint32ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToInt32(feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToInt32(feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint32ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint32ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint64ToInt32
+
+TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToInt32(feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToInt32(feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckedUint64ToTaggedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToTaggedSigned(feedback1),
+ value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint64ToTaggedSigned(feedback2),
+ value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberEqual
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberEqualWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberEqual(NumberOperationHint::kSignedSmall,
+ check1, check2, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberEqualWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::UnsignedSmall(), 0);
+ Node* rhs = Parameter(Type::UnsignedSmall(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberEqual(NumberOperationHint::kSignedSmall,
+ lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberLessThan
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThan(NumberOperationHint::kSignedSmall,
+ check1, check2, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::UnsignedSmall(), 0);
+ Node* rhs = Parameter(Type::UnsignedSmall(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThan(NumberOperationHint::kSignedSmall,
+ lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberLessThanOrEqual
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall, check1, check2, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* lhs = Parameter(Type::UnsignedSmall(), 0);
+ Node* rhs = Parameter(Type::UnsignedSmall(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback1), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback2), rhs, length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check2);
+
+ Node* cmp3 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall),
+ lhs, rhs, effect, control);
+ Reduction r3 = Reduce(cmp3);
+ ASSERT_TRUE(r3.Changed());
+ EXPECT_THAT(r3.replacement(),
+ IsSpeculativeNumberLessThanOrEqual(
+ NumberOperationHint::kSignedSmall, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberAdd
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberAddWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect = graph()->NewNode(
+ simplified()->SpeculativeNumberAdd(hint), lhs, rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberAdd(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect = graph()->NewNode(
+ simplified()->SpeculativeNumberAdd(hint), lhs, rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberAdd(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeNumberSubtract
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberSubtractWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberSubtract(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberSubtract(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeNumberSubtractWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberSubtract(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeNumberSubtract(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeSafeIntegerAdd
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerAdd(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerAdd(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* add2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerAdd(hint), lhs,
+ rhs, effect, control);
+ Reduction r2 = Reduce(add2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerAdd(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeSafeIntegerSubtract
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Any(), 0);
+ Node* rhs = Parameter(Type::Any(), 1);
+ Node* length = Parameter(Type::Unsigned31(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerSubtract(hint),
+ lhs, rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerSubtract(hint, check1, rhs, _, _));
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* rhs = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckBounds(feedback), lhs, length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* subtract2 = effect =
+ graph()->NewNode(simplified()->SpeculativeSafeIntegerSubtract(hint),
+ lhs, rhs, effect, control);
+ Reduction r2 = Reduce(subtract2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(),
+ IsSpeculativeSafeIntegerSubtract(hint, lhs, rhs, _, _));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// SpeculativeToNumber
+
+TEST_F(RedundancyEliminationTest,
+ SpeculativeToNumberWithCheckBoundsBetterType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* index = Parameter(Type::Any(), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckBounds(feedback1), index,
+ length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* to_number2 = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(hint, feedback2),
+ index, effect, control);
+ Reduction r2 = Reduce(to_number2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsSpeculativeToNumber(check1));
+ }
+ }
+ }
+}
+
+TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
+ Node* index = Parameter(Type::Range(42.0, 42.0, zone()), 0);
+ Node* length = Parameter(Type::Unsigned31(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckBounds(feedback1), index,
+ length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* to_number2 = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(hint, feedback2),
+ index, effect, control);
+ Reduction r2 = Reduce(to_number2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsSpeculativeToNumber(index));
+ }
+ }
+ }
+}
+
+} // namespace redundancy_elimination_unittest
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
index 97cafdb6e6..68a7ffea4a 100644
--- a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
@@ -18,7 +18,7 @@ namespace compiler {
class SchedulerRPOTest : public TestWithZone {
public:
- SchedulerRPOTest() {}
+ SchedulerRPOTest() = default;
void CheckRPONumbers(BasicBlockVector* order, size_t expected,
bool loops_allowed) {
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 51e954f799..82bcda6e9f 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -25,7 +25,7 @@ class SimplifiedLoweringTest : public GraphTest {
simplified_(zone()),
jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
&machine_) {}
- ~SimplifiedLoweringTest() override {}
+ ~SimplifiedLoweringTest() override = default;
void LowerGraph(Node* node) {
// Make sure we always start with an empty graph.
@@ -42,7 +42,7 @@ class SimplifiedLoweringTest : public GraphTest {
{
// Simplified lowering needs to run w/o the typer decorator so make sure
// the object is not live at the same time.
- Typer typer(isolate(), js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
typer.Run();
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 7913d6398c..5e2f8f15cc 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -25,7 +25,7 @@ class SimplifiedOperatorReducerTest : public GraphTest {
public:
explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
: GraphTest(num_parameters), simplified_(zone()) {}
- ~SimplifiedOperatorReducerTest() override {}
+ ~SimplifiedOperatorReducerTest() override = default;
protected:
Reduction Reduce(Node* node) {
@@ -54,7 +54,7 @@ class SimplifiedOperatorReducerTestWithParam
public:
explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
: SimplifiedOperatorReducerTest(num_parameters) {}
- ~SimplifiedOperatorReducerTestWithParam() override {}
+ ~SimplifiedOperatorReducerTestWithParam() override = default;
};
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index 86600aeffe..51426a5f85 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -28,7 +28,7 @@ class TypedOptimizationTest : public TypedGraphTest {
public:
TypedOptimizationTest()
: TypedGraphTest(3), simplified_(zone()), deps_(isolate(), zone()) {}
- ~TypedOptimizationTest() override {}
+ ~TypedOptimizationTest() override = default;
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 53459c314a..b827088336 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -23,7 +23,7 @@ class TyperTest : public TypedGraphTest {
TyperTest()
: TypedGraphTest(3),
js_heap_broker_(isolate(), zone()),
- operation_typer_(isolate(), &js_heap_broker_, zone()),
+ operation_typer_(&js_heap_broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
simplified_(zone()) {
@@ -434,7 +434,6 @@ TEST_F(TyperTest, TypeJSStrictEqual) {
TEST_F(TyperTest, Monotonicity_##name) { \
TestUnaryMonotonicity(javascript_.name()); \
}
-TEST_MONOTONICITY(ToInteger)
TEST_MONOTONICITY(ToLength)
TEST_MONOTONICITY(ToName)
TEST_MONOTONICITY(ToNumber)
diff --git a/deps/v8/test/unittests/counters-unittest.cc b/deps/v8/test/unittests/counters-unittest.cc
index d137d68ee9..c4d46b2e7a 100644
--- a/deps/v8/test/unittests/counters-unittest.cc
+++ b/deps/v8/test/unittests/counters-unittest.cc
@@ -34,7 +34,7 @@ class MockHistogram : public Histogram {
class AggregatedMemoryHistogramTest : public ::testing::Test {
public:
AggregatedMemoryHistogramTest() : aggregated_(&mock_) {}
- virtual ~AggregatedMemoryHistogramTest() {}
+ ~AggregatedMemoryHistogramTest() override = default;
void AddSample(double current_ms, double current_value) {
aggregated_.AddSample(current_ms, current_value);
@@ -66,7 +66,7 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
stats()->Reset();
}
- ~RuntimeCallStatsTest() {
+ ~RuntimeCallStatsTest() override {
// Disable RuntimeCallStats before tearing down the isolate to prevent
// printing the tests table. Comment the following line for debugging
// purposes.
diff --git a/deps/v8/test/unittests/heap/bitmap-unittest.cc b/deps/v8/test/unittests/heap/bitmap-unittest.cc
index a84437d534..1ecab4dd72 100644
--- a/deps/v8/test/unittests/heap/bitmap-unittest.cc
+++ b/deps/v8/test/unittests/heap/bitmap-unittest.cc
@@ -20,7 +20,7 @@ class BitmapTest : public ::testing::Test {
memset(memory_, 0, Bitmap::kSize);
}
- virtual ~BitmapTest() { delete[] memory_; }
+ ~BitmapTest() override { delete[] memory_; }
Bitmap* bitmap() { return reinterpret_cast<Bitmap*>(memory_); }
uint8_t* raw_bitmap() { return memory_; }
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index ac2cb3e2ee..33cc05e692 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -33,7 +33,6 @@ class MockEmbedderHeapTracer : public EmbedderHeapTracer {
public:
MOCK_METHOD0(TracePrologue, void());
MOCK_METHOD0(TraceEpilogue, void());
- MOCK_METHOD0(AbortTracing, void());
MOCK_METHOD1(EnterFinalPause, void(EmbedderHeapTracer::EmbedderStackState));
MOCK_METHOD0(IsTracingDone, bool());
MOCK_METHOD1(RegisterV8References,
@@ -76,24 +75,6 @@ TEST(LocalEmbedderHeapTracer, TraceEpilogueForwards) {
local_tracer.TraceEpilogue();
}
-TEST(LocalEmbedderHeapTracer, AbortTracingForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, AbortTracing());
- local_tracer.AbortTracing();
-}
-
-TEST(LocalEmbedderHeapTracer, AbortTracingClearsCachedWrappers) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.AddWrapperToTrace(CreateWrapperInfo());
- EXPECT_CALL(remote_tracer, AbortTracing());
- local_tracer.AbortTracing();
- EXPECT_EQ(0u, local_tracer.NumberOfCachedWrappersToTrace());
-}
-
TEST(LocalEmbedderHeapTracer, EnterFinalPauseForwards) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index 573be833af..7063b2a280 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -14,8 +14,8 @@ namespace {
class GCIdleTimeHandlerTest : public ::testing::Test {
public:
- GCIdleTimeHandlerTest() {}
- virtual ~GCIdleTimeHandlerTest() {}
+ GCIdleTimeHandlerTest() = default;
+ ~GCIdleTimeHandlerTest() override = default;
GCIdleTimeHandler* handler() { return &handler_; }
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 4ac80ab6fe..ac18e1817b 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -499,7 +499,7 @@ TEST_F(GCTracerTest, RecordMarkCompactHistograms) {
tracer->current_.scopes[GCTracer::Scope::MC_MARK] = 5;
tracer->current_.scopes[GCTracer::Scope::MC_PROLOGUE] = 6;
tracer->current_.scopes[GCTracer::Scope::MC_SWEEP] = 7;
- tracer->RecordMarkCompactHistograms(i_isolate()->counters()->gc_finalize());
+ tracer->RecordGCPhasesHistograms(i_isolate()->counters()->gc_finalize());
EXPECT_EQ(1, GcHistogram::Get("V8.GCFinalizeMC.Clear")->Total());
EXPECT_EQ(2, GcHistogram::Get("V8.GCFinalizeMC.Epilogue")->Total());
EXPECT_EQ(3, GcHistogram::Get("V8.GCFinalizeMC.Evacuate")->Total());
@@ -510,5 +510,19 @@ TEST_F(GCTracerTest, RecordMarkCompactHistograms) {
GcHistogram::CleanUp();
}
+TEST_F(GCTracerTest, RecordScavengerHistograms) {
+ if (FLAG_stress_incremental_marking) return;
+ isolate()->SetCreateHistogramFunction(&GcHistogram::CreateHistogram);
+ isolate()->SetAddHistogramSampleFunction(&GcHistogram::AddHistogramSample);
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+ tracer->current_.scopes[GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS] = 1;
+ tracer->current_.scopes[GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL] = 2;
+ tracer->RecordGCPhasesHistograms(i_isolate()->counters()->gc_scavenger());
+ EXPECT_EQ(1, GcHistogram::Get("V8.GCScavenger.ScavengeRoots")->Total());
+ EXPECT_EQ(2, GcHistogram::Get("V8.GCScavenger.ScavengeMain")->Total());
+ GcHistogram::CleanUp();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-controller-unittest.cc b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
index b2446afa84..42db9c4ba0 100644
--- a/deps/v8/test/unittests/heap/heap-controller-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
@@ -34,8 +34,8 @@ void CheckEqualRounded(double expected, double actual) {
TEST_F(HeapControllerTest, HeapGrowingFactor) {
HeapController heap_controller(i_isolate()->heap());
- double min_factor = heap_controller.kMinGrowingFactor;
- double max_factor = heap_controller.kMaxGrowingFactor;
+ double min_factor = heap_controller.min_growing_factor_;
+ double max_factor = heap_controller.max_growing_factor_;
CheckEqualRounded(max_factor, heap_controller.GrowingFactor(34, 1, 4.0));
CheckEqualRounded(3.553, heap_controller.GrowingFactor(45, 1, 4.0));
@@ -51,15 +51,15 @@ TEST_F(HeapControllerTest, HeapGrowingFactor) {
TEST_F(HeapControllerTest, MaxHeapGrowingFactor) {
HeapController heap_controller(i_isolate()->heap());
CheckEqualRounded(
- 1.3, heap_controller.MaxGrowingFactor(heap_controller.kMinSize * MB));
+ 1.3, heap_controller.MaxGrowingFactor(HeapController::kMinSize * MB));
CheckEqualRounded(1.600, heap_controller.MaxGrowingFactor(
- heap_controller.kMaxSize / 2 * MB));
+ HeapController::kMaxSize / 2 * MB));
CheckEqualRounded(
1.999, heap_controller.MaxGrowingFactor(
- (heap_controller.kMaxSize - Heap::kPointerMultiplier) * MB));
+ (HeapController::kMaxSize - Heap::kPointerMultiplier) * MB));
CheckEqualRounded(4.0,
heap_controller.MaxGrowingFactor(
- static_cast<size_t>(heap_controller.kMaxSize) * MB));
+ static_cast<size_t>(HeapController::kMaxSize) * MB));
}
TEST_F(HeapControllerTest, OldGenerationAllocationLimit) {
@@ -75,39 +75,43 @@ TEST_F(HeapControllerTest, OldGenerationAllocationLimit) {
double factor =
heap_controller.GrowingFactor(gc_speed, mutator_speed, max_factor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kDefault));
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ heap->heap_controller()->CalculateAllocationLimit(
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kDefault));
- factor = Min(factor, heap_controller.kConservativeGrowingFactor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kSlow));
-
- factor = Min(factor, heap_controller.kConservativeGrowingFactor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kConservative));
+ factor = Min(factor, heap_controller.conservative_growing_factor_);
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ heap->heap_controller()->CalculateAllocationLimit(
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kSlow));
- factor = heap_controller.kMinGrowingFactor;
+ factor = Min(factor, heap_controller.conservative_growing_factor_);
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
- new_space_capacity, Heap::HeapGrowingMode::kMinimal));
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity,
+ Heap::HeapGrowingMode::kConservative));
+
+ factor = heap_controller.min_growing_factor_;
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ heap->heap_controller()->CalculateAllocationLimit(
+ old_gen_size, max_old_generation_size, max_factor, gc_speed,
+ mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kMinimal));
}
TEST_F(HeapControllerTest, MaxOldGenerationSize) {
HeapController heap_controller(i_isolate()->heap());
uint64_t configurations[][2] = {
- {0, heap_controller.kMinSize},
- {512, heap_controller.kMinSize},
+ {0, HeapController::kMinSize},
+ {512, HeapController::kMinSize},
{1 * GB, 256 * Heap::kPointerMultiplier},
{2 * static_cast<uint64_t>(GB), 512 * Heap::kPointerMultiplier},
- {4 * static_cast<uint64_t>(GB), heap_controller.kMaxSize},
- {8 * static_cast<uint64_t>(GB), heap_controller.kMaxSize}};
+ {4 * static_cast<uint64_t>(GB), HeapController::kMaxSize},
+ {8 * static_cast<uint64_t>(GB), HeapController::kMaxSize}};
for (auto configuration : configurations) {
ASSERT_EQ(configuration[1],
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 3f08278d13..dd14e22d54 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -59,5 +59,17 @@ TEST_F(HeapTest, ASLR) {
#endif // V8_TARGET_ARCH_X64
}
+TEST_F(HeapTest, ExternalLimitDefault) {
+ Heap* heap = i_isolate()->heap();
+ EXPECT_EQ(kExternalAllocationSoftLimit, heap->external_memory_limit_);
+}
+
+TEST_F(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling) {
+ v8_isolate()->AdjustAmountOfExternalAllocatedMemory(+10 * MB);
+ v8_isolate()->AdjustAmountOfExternalAllocatedMemory(-10 * MB);
+ Heap* heap = i_isolate()->heap();
+ EXPECT_GE(heap->external_memory_limit_, kExternalAllocationSoftLimit);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index adeae2b593..36d99a31ba 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -151,7 +151,7 @@ class TaskForDifferentItems;
class BaseItem : public ItemParallelJob::Item {
public:
- virtual ~BaseItem() {}
+ ~BaseItem() override = default;
virtual void ProcessItem(TaskForDifferentItems* task) = 0;
};
@@ -162,7 +162,7 @@ class TaskForDifferentItems : public ItemParallelJob::Task {
: ItemParallelJob::Task(isolate),
processed_a_(processed_a),
processed_b_(processed_b) {}
- virtual ~TaskForDifferentItems() {}
+ ~TaskForDifferentItems() override = default;
void RunInParallel() override {
BaseItem* item = nullptr;
@@ -182,13 +182,13 @@ class TaskForDifferentItems : public ItemParallelJob::Task {
class ItemA : public BaseItem {
public:
- virtual ~ItemA() {}
+ ~ItemA() override = default;
void ProcessItem(TaskForDifferentItems* task) override { task->ProcessA(); }
};
class ItemB : public BaseItem {
public:
- virtual ~ItemB() {}
+ ~ItemB() override = default;
void ProcessItem(TaskForDifferentItems* task) override { task->ProcessB(); }
};
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index d81b7e1413..5266e54e09 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -16,11 +16,11 @@ typedef TestWithIsolate SpacesTest;
TEST_F(SpacesTest, CompactionSpaceMerge) {
Heap* heap = i_isolate()->heap();
OldSpace* old_space = heap->old_space();
- EXPECT_TRUE(old_space != NULL);
+ EXPECT_TRUE(old_space != nullptr);
CompactionSpace* compaction_space =
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- EXPECT_TRUE(compaction_space != NULL);
+ EXPECT_TRUE(compaction_space != nullptr);
for (Page* p : *old_space) {
// Unlink free lists from the main space to avoid reusing the memory for
@@ -118,9 +118,9 @@ TEST_F(SpacesTest, WriteBarrierInNewSpaceFromSpace) {
TEST_F(SpacesTest, CodeRangeAddressReuse) {
CodeRangeAddressHint hint;
// Create code ranges.
- void* code_range1 = hint.GetAddressHint(100);
- void* code_range2 = hint.GetAddressHint(200);
- void* code_range3 = hint.GetAddressHint(100);
+ Address code_range1 = hint.GetAddressHint(100);
+ Address code_range2 = hint.GetAddressHint(200);
+ Address code_range3 = hint.GetAddressHint(100);
// Since the addresses are random, we cannot check that they are different.
@@ -129,14 +129,14 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
hint.NotifyFreedCodeRange(code_range2, 200);
// The next two code ranges should reuse the freed addresses.
- void* code_range4 = hint.GetAddressHint(100);
+ Address code_range4 = hint.GetAddressHint(100);
EXPECT_EQ(code_range4, code_range1);
- void* code_range5 = hint.GetAddressHint(200);
+ Address code_range5 = hint.GetAddressHint(200);
EXPECT_EQ(code_range5, code_range2);
// Free the third code range and check address reuse.
hint.NotifyFreedCodeRange(code_range3, 100);
- void* code_range6 = hint.GetAddressHint(100);
+ Address code_range6 = hint.GetAddressHint(100);
EXPECT_EQ(code_range6, code_range3);
}
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 5030d3897d..a2c8d94793 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -22,8 +22,8 @@ namespace interpreter {
class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
public:
- BytecodeArrayBuilderTest() {}
- ~BytecodeArrayBuilderTest() override {}
+ BytecodeArrayBuilderTest() = default;
+ ~BytecodeArrayBuilderTest() override = default;
};
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
@@ -134,9 +134,12 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit load / store property operations.
builder.LoadNamedProperty(reg, name, load_slot.ToInt())
+ .LoadNamedPropertyNoFeedback(reg, name)
.LoadKeyedProperty(reg, keyed_load_slot.ToInt())
.StoreNamedProperty(reg, name, sloppy_store_slot.ToInt(),
LanguageMode::kSloppy)
+ .StoreNamedPropertyNoFeedback(reg, name, LanguageMode::kStrict)
+ .StoreNamedPropertyNoFeedback(reg, name, LanguageMode::kSloppy)
.StoreKeyedProperty(reg, reg, sloppy_keyed_store_slot.ToInt(),
LanguageMode::kSloppy)
.StoreNamedProperty(reg, name, strict_store_slot.ToInt(),
@@ -194,7 +197,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CallRuntime(Runtime::kIsArray, reg)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg_list, pair)
.CallJSRuntime(Context::OBJECT_CREATE, reg_list)
- .CallWithSpread(reg, reg_list, 1);
+ .CallWithSpread(reg, reg_list, 1)
+ .CallNoFeedback(reg, reg_list);
// Emit binary operator invocations.
builder.BinaryOperation(Token::Value::ADD, reg, 1)
@@ -375,6 +379,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateRegExpLiteral(ast_factory.GetOneByteString("wide_literal"), 0, 0)
.CreateArrayLiteral(0, 0, 0)
.CreateEmptyArrayLiteral(0)
+ .CreateArrayFromIterable()
.CreateObjectLiteral(0, 0, 0, reg)
.CreateEmptyObjectLiteral()
.CloneObject(reg, 0, 0);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index f7c89e2869..69d0e96507 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -16,8 +16,8 @@ namespace interpreter {
class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
public:
- BytecodeArrayIteratorTest() {}
- ~BytecodeArrayIteratorTest() override {}
+ BytecodeArrayIteratorTest() = default;
+ ~BytecodeArrayIteratorTest() override = default;
};
TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 8d2cd4c501..71c79300f3 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -16,8 +16,8 @@ namespace interpreter {
class BytecodeArrayRandomIteratorTest : public TestWithIsolateAndZone {
public:
- BytecodeArrayRandomIteratorTest() {}
- ~BytecodeArrayRandomIteratorTest() override {}
+ BytecodeArrayRandomIteratorTest() = default;
+ ~BytecodeArrayRandomIteratorTest() override = default;
};
TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 5eb4d3be9a..7c01228936 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -34,7 +34,7 @@ class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
bytecode_array_writer_(
zone(), &constant_array_builder_,
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS) {}
- ~BytecodeArrayWriterUnittest() override {}
+ ~BytecodeArrayWriterUnittest() override = default;
void Write(Bytecode bytecode, BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0,
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index 018263f06b..eb4fdbb745 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -49,10 +49,10 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
3,
0,
" ForInPrepare r10-r12, [11]"},
- {{B(CallRuntime), U16(Runtime::FunctionId::kIsDate), R8(0), U8(0)},
+ {{B(CallRuntime), U16(Runtime::FunctionId::kIsSmi), R8(0), U8(0)},
5,
0,
- " CallRuntime [IsDate], r0-r0"},
+ " CallRuntime [IsSmi], r0-r0"},
{{B(Ldar),
static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
2,
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
index b2c8b47c79..2ba28b2306 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -16,7 +16,7 @@ namespace interpreter {
class BytecodeRegisterAllocatorTest : public TestWithIsolateAndZone {
public:
BytecodeRegisterAllocatorTest() : allocator_(0) {}
- ~BytecodeRegisterAllocatorTest() override {}
+ ~BytecodeRegisterAllocatorTest() override = default;
BytecodeRegisterAllocator* allocator() { return &allocator_; }
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index 9e3ceb140f..9879b2a84a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -23,7 +23,7 @@ class BytecodeRegisterOptimizerTest
Register output;
};
- BytecodeRegisterOptimizerTest() {}
+ BytecodeRegisterOptimizerTest() = default;
~BytecodeRegisterOptimizerTest() override { delete register_allocator_; }
void Initialize(int number_of_parameters, int number_of_locals) {
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 59e228a29c..46bbb900c0 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -18,8 +18,8 @@ namespace interpreter {
class ConstantArrayBuilderTest : public TestWithIsolateAndZone {
public:
- ConstantArrayBuilderTest() {}
- ~ConstantArrayBuilderTest() override {}
+ ConstantArrayBuilderTest() = default;
+ ~ConstantArrayBuilderTest() override = default;
static const size_t k8BitCapacity = ConstantArrayBuilder::k8BitCapacity;
static const size_t k16BitCapacity = ConstantArrayBuilder::k16BitCapacity;
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 669db93040..cec661b468 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -28,8 +28,8 @@ class InterpreterAssemblerTestState : public compiler::CodeAssemblerState {
class InterpreterAssemblerTest : public TestWithIsolateAndZone {
public:
- InterpreterAssemblerTest() {}
- ~InterpreterAssemblerTest() override {}
+ InterpreterAssemblerTest() = default;
+ ~InterpreterAssemblerTest() override = default;
class InterpreterAssemblerForTest final : public InterpreterAssembler {
public:
diff --git a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
index 031eb9efbd..cb219a4737 100644
--- a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
@@ -17,13 +17,15 @@ namespace default_platform_unittest {
namespace {
struct MockTask : public Task {
- virtual ~MockTask() { Die(); }
+ // See issue v8:8185
+ ~MockTask() /* override */ { Die(); }
MOCK_METHOD0(Run, void());
MOCK_METHOD0(Die, void());
};
struct MockIdleTask : public IdleTask {
- virtual ~MockIdleTask() { Die(); }
+ // See issue v8:8185
+ ~MockIdleTask() /* override */ { Die(); }
MOCK_METHOD1(Run, void(double deadline_in_seconds));
MOCK_METHOD0(Die, void());
};
@@ -242,10 +244,10 @@ class TestBackgroundTask : public Task {
explicit TestBackgroundTask(base::Semaphore* sem, bool* executed)
: sem_(sem), executed_(executed) {}
- virtual ~TestBackgroundTask() { Die(); }
+ ~TestBackgroundTask() override { Die(); }
MOCK_METHOD0(Die, void());
- void Run() {
+ void Run() override {
*executed_ = true;
sem_->Signal();
}
diff --git a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
index a42b37aa7c..0caad1ef22 100644
--- a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
@@ -17,7 +17,8 @@ namespace platform {
namespace {
struct MockTask : public Task {
- virtual ~MockTask() { Die(); }
+ // See issue v8:8185
+ ~MockTask() /* override */ { Die(); }
MOCK_METHOD0(Run, void());
MOCK_METHOD0(Die, void());
};
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/object-unittest.cc
index ad8d631961..505d76df8b 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/object-unittest.cc
@@ -81,8 +81,8 @@ TEST(Object, StructListOrder) {
int last = current - 1;
ASSERT_LT(0, last);
InstanceType current_type = static_cast<InstanceType>(current);
-#define TEST_STRUCT(type, class, name) \
- current_type = InstanceType::type##_TYPE; \
+#define TEST_STRUCT(TYPE, class, name) \
+ current_type = InstanceType::TYPE; \
current = static_cast<int>(current_type); \
EXPECT_EQ(last + 1, current) \
<< " STRUCT_LIST is not ordered: " \
diff --git a/deps/v8/test/unittests/objects/microtask-queue-unittest.cc b/deps/v8/test/unittests/objects/microtask-queue-unittest.cc
new file mode 100644
index 0000000000..2b237ebc50
--- /dev/null
+++ b/deps/v8/test/unittests/objects/microtask-queue-unittest.cc
@@ -0,0 +1,55 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/microtask-queue-inl.h"
+
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+
+void NoopCallback(void*) {}
+
+class MicrotaskQueueTest : public TestWithIsolate {
+ public:
+ Handle<Microtask> NewMicrotask() {
+ MicrotaskCallback callback = &NoopCallback;
+ void* data = nullptr;
+ return factory()->NewCallbackTask(
+ factory()->NewForeign(reinterpret_cast<Address>(callback)),
+ factory()->NewForeign(reinterpret_cast<Address>(data)));
+ }
+};
+
+TEST_F(MicrotaskQueueTest, EnqueueMicrotask) {
+ Handle<MicrotaskQueue> microtask_queue = factory()->NewMicrotaskQueue();
+ Handle<Microtask> microtask = NewMicrotask();
+
+ EXPECT_EQ(0, microtask_queue->pending_microtask_count());
+ MicrotaskQueue::EnqueueMicrotask(isolate(), microtask_queue, microtask);
+ EXPECT_EQ(1, microtask_queue->pending_microtask_count());
+ ASSERT_LE(1, microtask_queue->queue()->length());
+ EXPECT_EQ(*microtask, microtask_queue->queue()->get(0));
+
+ std::vector<Handle<Microtask>> microtasks;
+ microtasks.push_back(microtask);
+
+ // Queue microtasks until the reallocation happens.
+ int queue_capacity = microtask_queue->queue()->length();
+ for (int i = 0; i < queue_capacity; ++i) {
+ microtask = NewMicrotask();
+ MicrotaskQueue::EnqueueMicrotask(isolate(), microtask_queue, microtask);
+ microtasks.push_back(microtask);
+ }
+
+ int num_tasks = static_cast<int>(microtasks.size());
+ EXPECT_EQ(num_tasks, microtask_queue->pending_microtask_count());
+ ASSERT_LE(num_tasks, microtask_queue->queue()->length());
+ for (int i = 0; i < num_tasks; ++i) {
+ EXPECT_EQ(*microtasks[i], microtask_queue->queue()->get(i));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/parser/preparser-unittest.cc b/deps/v8/test/unittests/parser/preparser-unittest.cc
index f20fbb2cee..ee5590e3f1 100644
--- a/deps/v8/test/unittests/parser/preparser-unittest.cc
+++ b/deps/v8/test/unittests/parser/preparser-unittest.cc
@@ -13,7 +13,7 @@ namespace internal {
class PreParserTest : public TestWithNativeContext {
public:
- PreParserTest() {}
+ PreParserTest() = default;
private:
DISALLOW_COPY_AND_ASSIGN(PreParserTest);
diff --git a/deps/v8/test/unittests/register-configuration-unittest.cc b/deps/v8/test/unittests/register-configuration-unittest.cc
index 0688a5e54e..f0da8a5b93 100644
--- a/deps/v8/test/unittests/register-configuration-unittest.cc
+++ b/deps/v8/test/unittests/register-configuration-unittest.cc
@@ -14,8 +14,8 @@ const MachineRepresentation kSimd128 = MachineRepresentation::kSimd128;
class RegisterConfigurationUnitTest : public ::testing::Test {
public:
- RegisterConfigurationUnitTest() {}
- virtual ~RegisterConfigurationUnitTest() {}
+ RegisterConfigurationUnitTest() = default;
+ ~RegisterConfigurationUnitTest() override = default;
};
TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index f353e83ecf..712770e9dc 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -11,18 +11,18 @@ namespace {
class DefaultPlatformEnvironment final : public ::testing::Environment {
public:
- DefaultPlatformEnvironment() {}
+ DefaultPlatformEnvironment() = default;
void SetUp() override {
platform_ = v8::platform::NewDefaultPlatform(
0, v8::platform::IdleTaskSupport::kEnabled);
- ASSERT_TRUE(platform_.get() != NULL);
+ ASSERT_TRUE(platform_.get() != nullptr);
v8::V8::InitializePlatform(platform_.get());
ASSERT_TRUE(v8::V8::Initialize());
}
void TearDown() override {
- ASSERT_TRUE(platform_.get() != NULL);
+ ASSERT_TRUE(platform_.get() != nullptr);
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
}
diff --git a/deps/v8/test/unittests/source-position-table-unittest.cc b/deps/v8/test/unittests/source-position-table-unittest.cc
index 1ad6dec006..23fd1a95d2 100644
--- a/deps/v8/test/unittests/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/source-position-table-unittest.cc
@@ -14,8 +14,8 @@ namespace interpreter {
class SourcePositionTableTest : public TestWithIsolate {
public:
- SourcePositionTableTest() {}
- ~SourcePositionTableTest() override {}
+ SourcePositionTableTest() = default;
+ ~SourcePositionTableTest() override = default;
SourcePosition toPos(int offset) {
return SourcePosition(offset, offset % 10 - 1);
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index c771906dc2..1ff25337e4 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -6,10 +6,13 @@
#include "include/v8.h"
#include "src/api.h"
+#include "src/base/template-utils.h"
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/scanner.h"
namespace v8 {
namespace internal {
@@ -17,13 +20,13 @@ namespace test {
Handle<String> CreateSource(Isolate* isolate,
ExternalOneByteString::Resource* maybe_resource) {
- static const char test_script[] = "(x) { x*x; }";
- if (maybe_resource) {
- return isolate->factory()
- ->NewExternalStringFromOneByte(maybe_resource)
- .ToHandleChecked();
+ if (!maybe_resource) {
+ static const char test_script[] = "(x) { x*x; }";
+ maybe_resource = new test::ScriptResource(test_script, strlen(test_script));
}
- return isolate->factory()->NewStringFromAsciiChecked(test_script);
+ return isolate->factory()
+ ->NewExternalStringFromOneByte(maybe_resource)
+ .ToHandleChecked();
}
Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
@@ -51,6 +54,23 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
return scope.CloseAndEscape(shared);
}
+std::unique_ptr<ParseInfo> OuterParseInfoForShared(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
+ Handle<Script> script =
+ Handle<Script>::cast(handle(shared->script(), isolate));
+ std::unique_ptr<ParseInfo> result =
+ base::make_unique<ParseInfo>(isolate, script);
+
+ // Create a character stream to simulate the parser having done so for the
+ // to-level ParseProgram.
+ Handle<String> source(String::cast(script->source()), isolate);
+ std::unique_ptr<Utf16CharacterStream> stream(
+ ScannerStream::For(isolate, source));
+ result->set_character_stream(std::move(stream));
+
+ return result;
+}
+
} // namespace test
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/test-helpers.h b/deps/v8/test/unittests/test-helpers.h
index 223b22e38e..fadc0c3e2b 100644
--- a/deps/v8/test/unittests/test-helpers.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -46,6 +46,8 @@ Handle<String> CreateSource(
Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
Isolate* isolate,
v8::String::ExternalOneByteStringResource* maybe_resource);
+std::unique_ptr<ParseInfo> OuterParseInfoForShared(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared);
} // namespace test
} // namespace internal
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 2b099e0ea5..32f405764d 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -24,32 +24,28 @@ Isolate* TestWithIsolate::isolate_ = nullptr;
TestWithIsolate::TestWithIsolate()
: isolate_scope_(isolate()), handle_scope_(isolate()) {}
-
-TestWithIsolate::~TestWithIsolate() {}
-
+TestWithIsolate::~TestWithIsolate() = default;
// static
void TestWithIsolate::SetUpTestCase() {
Test::SetUpTestCase();
- EXPECT_EQ(NULL, isolate_);
- // Make BigInt64Array / BigUint64Array available for testing.
- i::FLAG_harmony_bigint = true;
+ EXPECT_EQ(nullptr, isolate_);
v8::Isolate::CreateParams create_params;
array_buffer_allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
create_params.array_buffer_allocator = array_buffer_allocator_;
isolate_ = v8::Isolate::New(create_params);
- EXPECT_TRUE(isolate_ != NULL);
+ EXPECT_TRUE(isolate_ != nullptr);
}
// static
void TestWithIsolate::TearDownTestCase() {
- ASSERT_TRUE(isolate_ != NULL);
+ ASSERT_TRUE(isolate_ != nullptr);
v8::Platform* platform = internal::V8::GetCurrentPlatform();
- ASSERT_TRUE(platform != NULL);
+ ASSERT_TRUE(platform != nullptr);
while (platform::PumpMessageLoop(platform, isolate_)) continue;
isolate_->Dispose();
- isolate_ = NULL;
+ isolate_ = nullptr;
delete array_buffer_allocator_;
Test::TearDownTestCase();
}
@@ -64,10 +60,20 @@ Local<Value> TestWithIsolate::RunJS(const char* source) {
return script->Run(isolate()->GetCurrentContext()).ToLocalChecked();
}
+Local<Value> TestWithIsolate::RunJS(
+ String::ExternalOneByteStringResource* source) {
+ Local<Script> script =
+ v8::Script::Compile(
+ isolate()->GetCurrentContext(),
+ v8::String::NewExternalOneByte(isolate(), source).ToLocalChecked())
+ .ToLocalChecked();
+ return script->Run(isolate()->GetCurrentContext()).ToLocalChecked();
+}
+
TestWithContext::TestWithContext()
: context_(Context::New(isolate())), context_scope_(context_) {}
-TestWithContext::~TestWithContext() {}
+TestWithContext::~TestWithContext() = default;
v8::Local<v8::String> TestWithContext::NewString(const char* string) {
return v8::String::NewFromUtf8(v8_isolate(), string,
@@ -85,9 +91,9 @@ void TestWithContext::SetGlobalProperty(const char* name,
namespace internal {
-TestWithIsolate::~TestWithIsolate() {}
+TestWithIsolate::~TestWithIsolate() = default;
-TestWithIsolateAndZone::~TestWithIsolateAndZone() {}
+TestWithIsolateAndZone::~TestWithIsolateAndZone() = default;
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
@@ -95,13 +101,18 @@ Handle<Object> TestWithIsolate::RunJSInternal(const char* source) {
return Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
}
+Handle<Object> TestWithIsolate::RunJSInternal(
+ ::v8::String::ExternalOneByteStringResource* source) {
+ return Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
+}
+
base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
return isolate()->random_number_generator();
}
-TestWithZone::~TestWithZone() {}
+TestWithZone::~TestWithZone() = default;
-TestWithNativeContext::~TestWithNativeContext() {}
+TestWithNativeContext::~TestWithNativeContext() = default;
Handle<Context> TestWithNativeContext::native_context() const {
return isolate()->native_context();
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index c361810219..289ef5edf2 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -26,7 +26,7 @@ class ArrayBufferAllocator;
class TestWithIsolate : public virtual ::testing::Test {
public:
TestWithIsolate();
- virtual ~TestWithIsolate();
+ ~TestWithIsolate() override;
v8::Isolate* isolate() const { return v8_isolate(); }
@@ -37,6 +37,7 @@ class TestWithIsolate : public virtual ::testing::Test {
}
Local<Value> RunJS(const char* source);
+ Local<Value> RunJS(String::ExternalOneByteStringResource* source);
static void SetUpTestCase();
static void TearDownTestCase();
@@ -55,7 +56,7 @@ class TestWithIsolate : public virtual ::testing::Test {
class TestWithContext : public virtual v8::TestWithIsolate {
public:
TestWithContext();
- virtual ~TestWithContext();
+ ~TestWithContext() override;
const Local<Context>& context() const { return v8_context(); }
const Local<Context>& v8_context() const { return context_; }
@@ -78,8 +79,8 @@ class Factory;
class TestWithIsolate : public virtual ::v8::TestWithIsolate {
public:
- TestWithIsolate() {}
- virtual ~TestWithIsolate();
+ TestWithIsolate() = default;
+ ~TestWithIsolate() override;
Factory* factory() const;
Isolate* isolate() const { return i_isolate(); }
@@ -88,6 +89,13 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
return Handle<T>::cast(RunJSInternal(source));
}
Handle<Object> RunJSInternal(const char* source);
+ template <typename T = Object>
+ Handle<T> RunJS(::v8::String::ExternalOneByteStringResource* source) {
+ return Handle<T>::cast(RunJSInternal(source));
+ }
+ Handle<Object> RunJSInternal(
+ ::v8::String::ExternalOneByteStringResource* source);
+
base::RandomNumberGenerator* random_number_generator() const;
private:
@@ -97,7 +105,7 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
class TestWithZone : public virtual ::testing::Test {
public:
TestWithZone() : zone_(&allocator_, ZONE_NAME) {}
- virtual ~TestWithZone();
+ ~TestWithZone() override;
Zone* zone() { return &zone_; }
@@ -111,7 +119,7 @@ class TestWithZone : public virtual ::testing::Test {
class TestWithIsolateAndZone : public virtual TestWithIsolate {
public:
TestWithIsolateAndZone() : zone_(&allocator_, ZONE_NAME) {}
- virtual ~TestWithIsolateAndZone();
+ ~TestWithIsolateAndZone() override;
Zone* zone() { return &zone_; }
@@ -125,8 +133,8 @@ class TestWithIsolateAndZone : public virtual TestWithIsolate {
class TestWithNativeContext : public virtual ::v8::TestWithContext,
public virtual TestWithIsolate {
public:
- TestWithNativeContext() {}
- virtual ~TestWithNativeContext();
+ TestWithNativeContext() = default;
+ ~TestWithNativeContext() override;
Handle<Context> native_context() const;
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index d44d4b4e33..f0eef446d1 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -15,4 +15,9 @@
'RandomNumberGenerator.NextSampleSlowInvalidParam1': [SKIP],
'RandomNumberGenerator.NextSampleSlowInvalidParam2': [SKIP],
}], # 'system == macos and asan'
+
+['(arch == arm or arch == mips) and not simulator_run', {
+ # Uses too much memory.
+ 'Parameterized/WasmCodeManagerTest.GrowingVsFixedModule/Fixed': [SKIP]
+}], # '(arch == arm or arch == mips) and not simulator_run'
]
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index 77f609052a..2cc0bdc8a6 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -58,7 +58,7 @@ class ValueSerializerTest : public TestWithIsolate {
isolate_ = reinterpret_cast<i::Isolate*>(isolate());
}
- ~ValueSerializerTest() {
+ ~ValueSerializerTest() override {
// In some cases unhandled scheduled exceptions from current test produce
// that Context::New(isolate()) from next test's constructor returns NULL.
// In order to prevent that, we added destructor which will clear scheduled
@@ -228,7 +228,7 @@ class ValueSerializerTest : public TestWithIsolate {
Local<Script> script =
Script::Compile(deserialization_context_, source).ToLocalChecked();
Local<Value> value = script->Run(deserialization_context_).ToLocalChecked();
- EXPECT_TRUE(value->BooleanValue(deserialization_context_).FromJust());
+ EXPECT_TRUE(value->BooleanValue(isolate()));
}
Local<String> StringFromUtf8(const char* source) {
@@ -1870,6 +1870,22 @@ TEST_F(ValueSerializerTest, DecodeDataView) {
ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
}
+TEST_F(ValueSerializerTest, DecodeArrayWithLengthProperty1) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ DecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
+ 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x49, 0x02, 0x24, 0x01, 0x03}),
+ ".*LookupIterator::NOT_FOUND == it.state\\(\\).*");
+}
+
+TEST_F(ValueSerializerTest, DecodeArrayWithLengthProperty2) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ DecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
+ 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x6f, 0x7b, 0x00, 0x24, 0x01, 0x03}),
+ ".*LookupIterator::NOT_FOUND == it.state\\(\\).*");
+}
+
TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
// Byte offset out of range.
InvalidDecodeTest(
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 627a9da3ee..e2a7bcc388 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -674,7 +674,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
}
TEST_F(DecoderTest, FailOnNullData) {
- decoder.Reset(nullptr, 0);
+ decoder.Reset(nullptr, nullptr);
decoder.checkAvailable(1);
EXPECT_FALSE(decoder.ok());
EXPECT_FALSE(decoder.toResult(nullptr).ok());
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 771c61e237..31e4a12ae7 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -2403,34 +2403,29 @@ TEST_F(FunctionBodyDecoderTest, Throw) {
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
-
- builder.AddException(sigs.v_v());
- builder.AddException(sigs.v_i());
- AddLocals(kWasmI32, 1);
-
- EXPECT_VERIFIES(v_v, kExprThrow, 0);
-
- // exception index out of range.
- EXPECT_FAILURE(v_v, kExprThrow, 2);
-
- EXPECT_VERIFIES(v_v, WASM_I32V(0), kExprThrow, 1);
-
- // TODO(kschimpf): Add more tests.
+ byte ex1 = builder.AddException(sigs.v_v());
+ byte ex2 = builder.AddException(sigs.v_i());
+ byte ex3 = builder.AddException(sigs.v_ii());
+ EXPECT_VERIFIES(v_v, kExprThrow, ex1);
+ EXPECT_VERIFIES(v_v, WASM_I32V(0), kExprThrow, ex2);
+ EXPECT_FAILURE(v_v, WASM_F32(0.0), kExprThrow, ex2);
+ EXPECT_VERIFIES(v_v, WASM_I32V(0), WASM_I32V(0), kExprThrow, ex3);
+ EXPECT_FAILURE(v_v, WASM_F32(0.0), WASM_I32V(0), kExprThrow, ex3);
+ EXPECT_FAILURE(v_v, kExprThrow, 99);
}
TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
- // TODO(titzer): unreachable code after throw should validate.
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
-
- builder.AddException(sigs.v_v());
- builder.AddException(sigs.v_i());
- AddLocals(kWasmI32, 1);
- EXPECT_VERIFIES(i_i, kExprThrow, 0, WASM_GET_LOCAL(0));
-
- // TODO(kschimpf): Add more (block-level) tests of unreachable to see
- // if they validate.
+ byte ex1 = builder.AddException(sigs.v_v());
+ byte ex2 = builder.AddException(sigs.v_i());
+ EXPECT_VERIFIES(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_NOP);
+ EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_NOP);
+ EXPECT_VERIFIES(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_ZERO);
+ EXPECT_FAILURE(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_ZERO);
+ EXPECT_FAILURE(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_F32(0.0));
+ EXPECT_FAILURE(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_F32(0.0));
}
#define WASM_TRY_OP kExprTry, kLocalVoid
@@ -2438,24 +2433,30 @@ TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
TEST_F(FunctionBodyDecoderTest, TryCatch) {
WASM_FEATURE_SCOPE(eh);
-
TestModuleBuilder builder;
module = builder.module();
- builder.AddException(sigs.v_v());
- builder.AddException(sigs.v_v());
-
- // TODO(kschimpf): Need to fix catch to use declared exception.
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(0), kExprEnd);
-
- // Missing catch.
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd);
+ byte ex1 = builder.AddException(sigs.v_v());
+ byte ex2 = builder.AddException(sigs.v_v());
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(ex1), kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd); // Missing catch.
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, WASM_CATCH(ex1)); // Missing end.
+ EXPECT_FAILURE(v_v, WASM_CATCH(ex1), kExprEnd); // Missing try.
- // Missing end.
- EXPECT_FAILURE(v_i, WASM_TRY_OP, WASM_CATCH(0));
+ // TODO(mstarzinger): Double catch. Fix this to verify.
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, WASM_CATCH(ex1), WASM_CATCH(ex2), kExprEnd);
+}
- // Double catch.
- // TODO(kschimpf): Fix this to verify.
- EXPECT_FAILURE(v_i, WASM_TRY_OP, WASM_CATCH(0), WASM_CATCH(1), kExprEnd);
+TEST_F(FunctionBodyDecoderTest, TryCatchAll) {
+ WASM_FEATURE_SCOPE(eh);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte ex1 = builder.AddException(sigs.v_v());
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatchAll, kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(ex1), kExprCatchAll, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll, kExprCatchAll, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll, WASM_CATCH(ex1), kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll); // Missing end.
+ EXPECT_FAILURE(v_v, kExprCatchAll, kExprEnd); // Missing try.
}
#undef WASM_TRY_OP
@@ -3132,6 +3133,20 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
pos = ExpectRun(map, pos, kWasmI64, 212);
}
+TEST_F(LocalDeclDecoderTest, ExceptRef) {
+ WASM_FEATURE_SCOPE(eh);
+ ValueType type = kWasmExceptRef;
+ const byte data[] = {1, 1,
+ static_cast<byte>(ValueTypes::ValueTypeCodeFor(type))};
+ BodyLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(&decls, data, data + sizeof(data));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(1u, decls.type_list.size());
+
+ TypesOfLocals map = decls.type_list;
+ EXPECT_EQ(type, map[0]);
+}
+
class BytecodeIteratorTest : public TestWithZone {};
TEST_F(BytecodeIteratorTest, SimpleForeach) {
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 3507f897f9..83876b3e0f 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -61,7 +61,7 @@ namespace module_decoder_unittest {
ModuleResult result = DecodeModule((data), (data) + sizeof((data))); \
EXPECT_FALSE(result.ok()); \
EXPECT_EQ(0u, result.val->exceptions.size()); \
- } while (0)
+ } while (false)
#define X1(...) __VA_ARGS__
#define X2(...) __VA_ARGS__, __VA_ARGS__
@@ -207,7 +207,7 @@ TEST_F(WasmModuleVerifyTest, WrongVersion) {
}
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
- ModuleResult result = DecodeModule(nullptr, 0);
+ ModuleResult result = DecodeModule(nullptr, nullptr);
EXPECT_TRUE(result.ok());
}
@@ -476,11 +476,9 @@ TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
}
TEST_F(WasmModuleVerifyTest, OneI32Exception) {
- static const byte data[] = {
- SECTION_EXCEPTIONS(3), 1,
- // except[0] (i32)
- 1, kLocalI32,
- };
+ static const byte data[] = {SECTION_EXCEPTIONS(3), 1,
+ // except[0] (i32)
+ 1, kLocalI32};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
@@ -525,6 +523,70 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_type) {
EXPECT_FALSE(result.ok());
}
+TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
+ static const byte data[] = {SECTION(Import, 1), 0, SECTION_EXCEPTIONS(1), 0,
+ SECTION(Export, 1), 0};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterExport) {
+ static const byte data[] = {SECTION(Export, 1), 0, SECTION_EXCEPTIONS(1), 0};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeImport) {
+ static const byte data[] = {SECTION_EXCEPTIONS(1), 0, SECTION(Import, 1), 0};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionImport) {
+ static const byte data[] = {SECTION(Import, 9), // section header
+ 1, // number of imports
+ NAME_LENGTH(1), // --
+ 'm', // module name
+ NAME_LENGTH(2), // --
+ 'e', 'x', // exception name
+ kExternalException, // import kind
+ // except[0] (i32)
+ 1, kLocalI32};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ EXPECT_EQ(1u, result.val->exceptions.size());
+ EXPECT_EQ(1u, result.val->import_table.size());
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionExport) {
+ static const byte data[] = {SECTION_EXCEPTIONS(3), 1,
+ // except[0] (i32)
+ 1, kLocalI32, SECTION(Export, 4),
+ 1, // exports
+ NO_NAME, // --
+ kExternalException, // --
+ EXCEPTION_INDEX(0)};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ EXPECT_EQ(1u, result.val->exceptions.size());
+ EXPECT_EQ(1u, result.val->export_table.size());
+}
+
TEST_F(WasmModuleVerifyTest, OneSignature) {
{
static const byte data[] = {SIGNATURES_SECTION_VOID_VOID};
@@ -2068,7 +2130,7 @@ TEST_F(WasmModuleVerifyTest, Regression684855) {
class WasmInitExprDecodeTest : public TestWithZone {
public:
- WasmInitExprDecodeTest() {}
+ WasmInitExprDecodeTest() = default;
WasmFeatures enabled_features_;
@@ -2223,6 +2285,81 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
CheckSections(data, data + sizeof(data), expected, arraysize(expected));
}
+#define SRC_MAP \
+ 16, 's', 'o', 'u', 'r', 'c', 'e', 'M', 'a', 'p', 'p', 'i', 'n', 'g', 'U', \
+ 'R', 'L'
+TEST_F(WasmModuleVerifyTest, SourceMappingURLSection) {
+#define SRC 's', 'r', 'c', '/', 'x', 'y', 'z', '.', 'c'
+ static const byte data[] = {SECTION(Unknown, 27), SRC_MAP, 9, SRC};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(9u, result.val->source_map_url.size());
+ const char src[] = {SRC};
+ EXPECT_EQ(
+ 0,
+ strncmp(reinterpret_cast<const char*>(result.val->source_map_url.data()),
+ src, 9));
+#undef SRC
+}
+
+TEST_F(WasmModuleVerifyTest, BadSourceMappingURLSection) {
+#define BAD_SRC 's', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c'
+ static const byte data[] = {SECTION(Unknown, 27), SRC_MAP, 9, BAD_SRC};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0u, result.val->source_map_url.size());
+#undef BAD_SRC
+}
+
+TEST_F(WasmModuleVerifyTest, MultipleSourceMappingURLSections) {
+#define SRC 'a', 'b', 'c'
+ static const byte data[] = {SECTION(Unknown, 21),
+ SRC_MAP,
+ 3,
+ SRC,
+ SECTION(Unknown, 21),
+ SRC_MAP,
+ 3,
+ 'p',
+ 'q',
+ 'r'};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(3u, result.val->source_map_url.size());
+ const char src[] = {SRC};
+ EXPECT_EQ(
+ 0,
+ strncmp(reinterpret_cast<const char*>(result.val->source_map_url.data()),
+ src, 3));
+#undef SRC
+}
+#undef SRC_MAP
+
+TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
+#define NAME_SECTION 4, 'n', 'a', 'm', 'e'
+ static const byte data[] = {SECTION(Unknown, 11),
+ NAME_SECTION,
+ 0,
+ 4,
+ 3,
+ 'a',
+ 'b',
+ 'c',
+ SECTION(Unknown, 12),
+ NAME_SECTION,
+ 0,
+ 5,
+ 4,
+ 'p',
+ 'q',
+ 'r',
+ 's'};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(3u, result.val->name.length());
+#undef NAME_SECTION
+}
+
#undef WASM_FEATURE_SCOPE
#undef WASM_FEATURE_SCOPE_VAL
#undef EXPECT_INIT_EXPR
diff --git a/deps/v8/test/unittests/wasm/trap-handler-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
index 1b4ddf5bb0..07e3ca888d 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
@@ -23,7 +23,7 @@ void CrashOnPurpose() { *reinterpret_cast<volatile int*>(42); }
// on failures.
class SignalHandlerFallbackTest : public ::testing::Test {
protected:
- virtual void SetUp() {
+ void SetUp() override {
struct sigaction action;
action.sa_sigaction = SignalHandler;
sigemptyset(&action.sa_mask);
@@ -32,7 +32,7 @@ class SignalHandlerFallbackTest : public ::testing::Test {
sigaction(SIGBUS, &action, &old_bus_action_);
}
- virtual void TearDown() {
+ void TearDown() override {
// be a good citizen and restore the old signal handler.
sigaction(SIGSEGV, &old_segv_action_, nullptr);
sigaction(SIGBUS, &old_bus_action_, nullptr);
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index cc66f14d9c..5d695c8275 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -17,36 +17,34 @@ namespace wasm_heap_unittest {
class DisjointAllocationPoolTest : public ::testing::Test {
public:
- Address A(size_t n) { return static_cast<Address>(n); }
- void CheckLooksLike(const DisjointAllocationPool& mem,
- std::vector<std::pair<size_t, size_t>> expectation);
- void CheckLooksLike(AddressRange range,
- std::pair<size_t, size_t> expectation);
- DisjointAllocationPool Make(std::vector<std::pair<size_t, size_t>> model);
+ void CheckPool(const DisjointAllocationPool& mem,
+ std::initializer_list<base::AddressRegion> expected_regions);
+ void CheckRange(base::AddressRegion region1, base::AddressRegion region2);
+ DisjointAllocationPool Make(
+ std::initializer_list<base::AddressRegion> regions);
};
-void DisjointAllocationPoolTest::CheckLooksLike(
+void DisjointAllocationPoolTest::CheckPool(
const DisjointAllocationPool& mem,
- std::vector<std::pair<size_t, size_t>> expectation) {
- const auto& ranges = mem.ranges();
- CHECK_EQ(ranges.size(), expectation.size());
- auto iter = expectation.begin();
- for (auto it = ranges.begin(), e = ranges.end(); it != e; ++it, ++iter) {
- CheckLooksLike(*it, *iter);
+ std::initializer_list<base::AddressRegion> expected_regions) {
+ const auto& regions = mem.regions();
+ CHECK_EQ(regions.size(), expected_regions.size());
+ auto iter = expected_regions.begin();
+ for (auto it = regions.begin(), e = regions.end(); it != e; ++it, ++iter) {
+ CHECK_EQ(*it, *iter);
}
}
-void DisjointAllocationPoolTest::CheckLooksLike(
- AddressRange range, std::pair<size_t, size_t> expectation) {
- CHECK_EQ(range.start, A(expectation.first));
- CHECK_EQ(range.end, A(expectation.second));
+void DisjointAllocationPoolTest::CheckRange(base::AddressRegion region1,
+ base::AddressRegion region2) {
+ CHECK_EQ(region1, region2);
}
DisjointAllocationPool DisjointAllocationPoolTest::Make(
- std::vector<std::pair<size_t, size_t>> model) {
+ std::initializer_list<base::AddressRegion> regions) {
DisjointAllocationPool ret;
- for (auto& pair : model) {
- ret.Merge({A(pair.first), A(pair.second)});
+ for (auto& region : regions) {
+ ret.Merge(region);
}
return ret;
}
@@ -54,90 +52,90 @@ DisjointAllocationPool DisjointAllocationPoolTest::Make(
TEST_F(DisjointAllocationPoolTest, ConstructEmpty) {
DisjointAllocationPool a;
CHECK(a.IsEmpty());
- CheckLooksLike(a, {});
- a.Merge({1, 5});
- CheckLooksLike(a, {{1, 5}});
+ CheckPool(a, {});
+ a.Merge({1, 4});
+ CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, ConstructWithRange) {
- DisjointAllocationPool a({1, 5});
+ DisjointAllocationPool a({1, 4});
CHECK(!a.IsEmpty());
- CheckLooksLike(a, {{1, 5}});
+ CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, SimpleExtract) {
- DisjointAllocationPool a = Make({{1, 5}});
- AddressRange b = a.Allocate(2);
- CheckLooksLike(a, {{3, 5}});
- CheckLooksLike(b, {1, 3});
+ DisjointAllocationPool a = Make({{1, 4}});
+ base::AddressRegion b = a.Allocate(2);
+ CheckPool(a, {{3, 2}});
+ CheckRange(b, {1, 2});
a.Merge(b);
- CheckLooksLike(a, {{1, 5}});
- CHECK_EQ(a.ranges().size(), 1);
- CHECK_EQ(a.ranges().front().start, A(1));
- CHECK_EQ(a.ranges().front().end, A(5));
+ CheckPool(a, {{1, 4}});
+ CHECK_EQ(a.regions().size(), 1);
+ CHECK_EQ(a.regions().front().begin(), 1);
+ CHECK_EQ(a.regions().front().end(), 5);
}
TEST_F(DisjointAllocationPoolTest, ExtractAll) {
- DisjointAllocationPool a({A(1), A(5)});
- AddressRange b = a.Allocate(4);
- CheckLooksLike(b, {1, 5});
+ DisjointAllocationPool a({1, 4});
+ base::AddressRegion b = a.Allocate(4);
+ CheckRange(b, {1, 4});
CHECK(a.IsEmpty());
a.Merge(b);
- CheckLooksLike(a, {{1, 5}});
+ CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, FailToExtract) {
- DisjointAllocationPool a = Make({{1, 5}});
- AddressRange b = a.Allocate(5);
- CheckLooksLike(a, {{1, 5}});
+ DisjointAllocationPool a = Make({{1, 4}});
+ base::AddressRegion b = a.Allocate(5);
+ CheckPool(a, {{1, 4}});
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, FailToExtractExact) {
- DisjointAllocationPool a = Make({{1, 5}, {10, 14}});
- AddressRange b = a.Allocate(5);
- CheckLooksLike(a, {{1, 5}, {10, 14}});
+ DisjointAllocationPool a = Make({{1, 4}, {10, 4}});
+ base::AddressRegion b = a.Allocate(5);
+ CheckPool(a, {{1, 4}, {10, 4}});
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, ExtractExact) {
- DisjointAllocationPool a = Make({{1, 5}, {10, 15}});
- AddressRange b = a.Allocate(5);
- CheckLooksLike(a, {{1, 5}});
- CheckLooksLike(b, {10, 15});
+ DisjointAllocationPool a = Make({{1, 4}, {10, 5}});
+ base::AddressRegion b = a.Allocate(5);
+ CheckPool(a, {{1, 4}});
+ CheckRange(b, {10, 5});
}
TEST_F(DisjointAllocationPoolTest, Merging) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}});
- a.Merge({15, 20});
- CheckLooksLike(a, {{10, 25}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}});
+ a.Merge({15, 5});
+ CheckPool(a, {{10, 15}});
}
TEST_F(DisjointAllocationPoolTest, MergingMore) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({15, 20});
- a.Merge({25, 30});
- CheckLooksLike(a, {{10, 35}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({15, 5});
+ a.Merge({25, 5});
+ CheckPool(a, {{10, 25}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkip) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({25, 30});
- CheckLooksLike(a, {{10, 15}, {20, 35}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({25, 5});
+ CheckPool(a, {{10, 5}, {20, 15}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrc) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({25, 30});
- a.Merge({35, 40});
- CheckLooksLike(a, {{10, 15}, {20, 40}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({25, 5});
+ a.Merge({35, 5});
+ CheckPool(a, {{10, 5}, {20, 20}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
- DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
- a.Merge({25, 30});
- a.Merge({36, 40});
- CheckLooksLike(a, {{10, 15}, {20, 35}, {36, 40}});
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
+ a.Merge({25, 5});
+ a.Merge({36, 4});
+ CheckPool(a, {{10, 5}, {20, 15}, {36, 4}});
}
enum ModuleStyle : int { Fixed = 0, Growable = 1 };
@@ -200,7 +198,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
@@ -223,9 +221,12 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
+ // This fails in "reservation" if we cannot extend the code space, or in
+ // "commit" it we can (since we hit the allocation limit in the
+ // WasmCodeManager). Hence don't check for that part of the OOM message.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(native_module.get(), index++, 1 * kCodeAlignment),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode");
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
@@ -237,7 +238,7 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
WasmCode* code = AddCode(nm1.get(), 0, 2 * page() - kJumpTableSize);
CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * page() - kJumpTableSize),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode commit");
}
TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
@@ -264,7 +265,7 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
// grow.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment),
- "OOM in NativeModule::AddOwnedCode");
+ "OOM in NativeModule::AllocateForCode");
} else {
// The module grows by one page. One page remains uncommitted.
CHECK_NOT_NULL(
diff --git a/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
index 28b35793f7..807fc40959 100644
--- a/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
@@ -28,7 +28,7 @@ TEST_F(WasmModuleBuilderTest, Regression_647329) {
// Test crashed with asan.
ZoneBuffer buffer(zone());
const size_t kSize = ZoneBuffer::kInitialSize * 3 + 4096 + 100;
- byte data[kSize];
+ byte data[kSize] = {0};
buffer.write(data, kSize);
}
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 3571b62807..b8d8ed8bd8 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -26,7 +26,7 @@ class TestSuite(testsuite.TestSuite):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def _get_files_params(self):
return [os.path.join(self.suite.root, self.path + self._get_suffix())]
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 1dd1c7217b..6bf33e90fb 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-7e2cdbd60dd1d10db95fb61b491ebf7b9f2c69e6 \ No newline at end of file
+0df32ecb1ad4141cc082f2b5575d8f88f3ae8c53 \ No newline at end of file
diff --git a/deps/v8/test/webkit/array-splice.js b/deps/v8/test/webkit/array-splice.js
index 045e39e379..208847eaf9 100644
--- a/deps/v8/test/webkit/array-splice.js
+++ b/deps/v8/test/webkit/array-splice.js
@@ -54,8 +54,3 @@ shouldBe("arr.splice(2, -1)", "[]")
shouldBe("arr", "['a','b','c']");
shouldBe("arr.splice(2, 100)", "['c']")
shouldBe("arr", "['a','b']");
-
-// Check this doesn't crash.
-try {
- String(Array(0xFFFFFFFD).splice(0));
-} catch (e) { }
diff --git a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
index 44bd2e7c36..16706e43dd 100644
--- a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
+++ b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
@@ -28,10 +28,10 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS [1].toString() is '1'
PASS [1].toLocaleString() is 'toLocaleString'
-FAIL [1].toLocaleString() should be 1. Threw exception TypeError: string is not a function
+FAIL [1].toLocaleString() should be 1. Threw exception TypeError: string "invalid" is not a function
PASS [/r/].toString() is 'toString2'
PASS [/r/].toLocaleString() is 'toLocaleString2'
-FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: string is not a function
+FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: string "invalid" is not a function
PASS caught is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/string-trim.js b/deps/v8/test/webkit/string-trim.js
index fd9c1d1557..a6c08070cf 100644
--- a/deps/v8/test/webkit/string-trim.js
+++ b/deps/v8/test/webkit/string-trim.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-string-trimming
-
description("This test checks the `trim`, `trimStart`/`trimLeft`, and `trimEnd`/`trimRight` methods on `String.prototype`.");
// References to trim(), trimLeft() and trimRight() functions for testing Function's *.call() and *.apply() methods
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index d76527276a..5d564b69bc 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -61,7 +61,7 @@ class TestSuite(testsuite.TestSuite):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 412784d252..cc856007e2 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -116,10 +116,10 @@
}], # arch == arm64 and msan
##############################################################################
-['(variant == nooptimization or variant == stress or variant == no_liftoff) and (arch == arm or arch == arm64) and simulator_run', {
+['variant in [no_liftoff, nooptimization, stress, stress_background_compile] and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
'dfg-double-vote-fuzz': [SKIP],
-}], # (variant == nooptimization or variant == stress or variant == no_liftoff) and (arch == arm or arch == arm64) and simulator_run
+}], # variant in [no_liftoff, nooptimization, stress, stress_background_compile] and (arch == arm or arch == arm64) and simulator_run
##############################################################################
['gcov_coverage', {
diff --git a/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1 b/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
index 93942d8908..aaed4ddbad 100644
--- a/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
+++ b/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
@@ -1 +1 @@
-81fd042fef3e2ff2e807a8c1fb4ea621b665d6b3 \ No newline at end of file
+c956d54d404eb1d35b3a4d88b7bfd34f2f06f7af \ No newline at end of file
diff --git a/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1 b/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
index 6bc9f8c8c1..dfa4fefbd5 100644
--- a/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
+++ b/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
@@ -1 +1 @@
-dbe488f8a5c2e11573a38e8b01e8c96bebed3365 \ No newline at end of file
+69bedb1192a03126687f75cb6cf1717758a1a59f \ No newline at end of file
diff --git a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
index 71377f87d4..e651671ebd 100644
--- a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
+++ b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -26,8 +26,7 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
+
//
// Google C++ Testing and Mocking Framework definitions useful in production code.
// GOOGLETEST_CM0003 DO NOT DELETE
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 65d0b8348b..46848e3f49 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -124,7 +124,7 @@ module array {
// copied values from the prototype chain to the receiver if they were visible
// through a hole.
- builtin Load<ElementsAccessor : type>(
+ builtin Load<ElementsAccessor: type>(
context: Context, sortState: FixedArray, elements: HeapObject,
index: Smi): Object {
return GetProperty(context, elements, index);
@@ -133,14 +133,14 @@ module array {
Load<FastPackedSmiElements>(
context: Context, sortState: FixedArray, elements: HeapObject,
index: Smi): Object {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
return elems[index];
}
Load<FastSmiOrObjectElements>(
context: Context, sortState: FixedArray, elements: HeapObject,
index: Smi): Object {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
const result: Object = elems[index];
if (IsTheHole(result)) {
// The pre-processing step removed all holes by compacting all elements
@@ -155,7 +155,7 @@ module array {
context: Context, sortState: FixedArray, elements: HeapObject,
index: Smi): Object {
try {
- const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
+ const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
const value: float64 =
LoadDoubleWithHoleCheck(elems, index) otherwise Bailout;
return AllocateHeapNumberWithValue(value);
@@ -173,11 +173,11 @@ module array {
index: Smi): Object {
try {
const dictionary: NumberDictionary =
- unsafe_cast<NumberDictionary>(elements);
- const intptr_index: intptr = convert<intptr>(index);
+ UnsafeCast<NumberDictionary>(elements);
+ const intptrIndex: intptr = Convert<intptr>(index);
const value: Object =
- BasicLoadNumberDictionaryElement(dictionary, intptr_index)
- otherwise Bailout, Bailout;
+ BasicLoadNumberDictionaryElement(dictionary, intptrIndex)
+ otherwise Bailout, Bailout;
return value;
}
label Bailout {
@@ -189,11 +189,11 @@ module array {
context: Context, sortState: FixedArray, elements: HeapObject,
index: Smi): Object {
assert(IsFixedArray(elements));
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
return elems[index];
}
- builtin Store<ElementsAccessor : type>(
+ builtin Store<ElementsAccessor: type>(
context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
value: Object): Smi {
SetProperty(context, elements, index, value);
@@ -203,7 +203,7 @@ module array {
Store<FastPackedSmiElements>(
context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
value: Object): Smi {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
return kSuccess;
}
@@ -211,7 +211,7 @@ module array {
Store<FastSmiOrObjectElements>(
context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
value: Object): Smi {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
elems[index] = value;
return kSuccess;
}
@@ -219,10 +219,10 @@ module array {
Store<FastDoubleElements>(
context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
value: Object): Smi {
- const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
- const heap_val: HeapNumber = unsafe_cast<HeapNumber>(value);
+ const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
+ const heapVal: HeapNumber = UnsafeCast<HeapNumber>(value);
// Make sure we do not store signalling NaNs into double arrays.
- const val: float64 = Float64SilenceNaN(convert<float64>(heap_val));
+ const val: float64 = Float64SilenceNaN(Convert<float64>(heapVal));
StoreFixedDoubleArrayElementWithSmiIndex(elems, index, val);
return kSuccess;
}
@@ -230,12 +230,11 @@ module array {
Store<DictionaryElements>(
context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
value: Object): Smi {
- const dictionary: NumberDictionary =
- unsafe_cast<NumberDictionary>(elements);
- const intptr_index: intptr = convert<intptr>(index);
+ const dictionary: NumberDictionary = UnsafeCast<NumberDictionary>(elements);
+ const intptrIndex: intptr = Convert<intptr>(index);
try {
- BasicStoreNumberDictionaryElement(dictionary, intptr_index, value)
- otherwise Fail, Fail, ReadOnly;
+ BasicStoreNumberDictionaryElement(dictionary, intptrIndex, value)
+ otherwise Fail, Fail, ReadOnly;
return kSuccess;
}
label ReadOnly {
@@ -253,29 +252,29 @@ module array {
Store<TempArrayElements>(
context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
value: Object): Smi {
- const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
elems[index] = value;
return kSuccess;
}
extern macro UnsafeCastObjectToCompareBuiltinFn(Object): CompareBuiltinFn;
- unsafe_cast<CompareBuiltinFn>(o: Object): CompareBuiltinFn {
+ UnsafeCast<CompareBuiltinFn>(o: Object): CompareBuiltinFn {
return UnsafeCastObjectToCompareBuiltinFn(o);
}
extern macro UnsafeCastObjectToLoadFn(Object): LoadFn;
- unsafe_cast<LoadFn>(o: Object): LoadFn {
+ UnsafeCast<LoadFn>(o: Object): LoadFn {
return UnsafeCastObjectToLoadFn(o);
}
extern macro UnsafeCastObjectToStoreFn(Object): StoreFn;
- unsafe_cast<StoreFn>(o: Object): StoreFn {
+ UnsafeCast<StoreFn>(o: Object): StoreFn {
return UnsafeCastObjectToStoreFn(o);
}
extern macro UnsafeCastObjectToCanUseSameAccessorFn(Object):
CanUseSameAccessorFn;
- unsafe_cast<CanUseSameAccessorFn>(o: Object): CanUseSameAccessorFn {
+ UnsafeCast<CanUseSameAccessorFn>(o: Object): CanUseSameAccessorFn {
return UnsafeCastObjectToCanUseSameAccessorFn(o);
}
@@ -284,8 +283,7 @@ module array {
assert(comparefn == Undefined);
if (TaggedIsSmi(x) && TaggedIsSmi(y)) {
- // TODO(szuend): Replace with a fast CallCFunction call.
- return SmiLexicographicCompare(context, x, y);
+ return SmiLexicographicCompare(UnsafeCast<Smi>(x), UnsafeCast<Smi>(y));
}
// 5. Let xString be ? ToString(x).
@@ -311,7 +309,7 @@ module array {
builtin SortCompareUserFn(
context: Context, comparefn: Object, x: Object, y: Object): Number {
assert(comparefn != Undefined);
- const cmpfn: Callable = unsafe_cast<Callable>(comparefn);
+ const cmpfn: Callable = UnsafeCast<Callable>(comparefn);
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
const v: Number =
@@ -324,16 +322,16 @@ module array {
return v;
}
- builtin CanUseSameAccessor<ElementsAccessor : type>(
+ builtin CanUseSameAccessor<ElementsAccessor: type>(
context: Context, receiver: JSReceiver, initialReceiverMap: Object,
initialReceiverLength: Number): Boolean {
assert(IsJSArray(receiver));
- let a: JSArray = unsafe_cast<JSArray>(receiver);
+ let a: JSArray = UnsafeCast<JSArray>(receiver);
if (a.map != initialReceiverMap) return False;
assert(TaggedIsSmi(initialReceiverLength));
- let originalLength: Smi = unsafe_cast<Smi>(initialReceiverLength);
+ let originalLength: Smi = UnsafeCast<Smi>(initialReceiverLength);
if (a.length_fast != originalLength) return False;
return True;
@@ -349,27 +347,27 @@ module array {
CanUseSameAccessor<DictionaryElements>(
context: Context, receiver: JSReceiver, initialReceiverMap: Object,
initialReceiverLength: Number): Boolean {
- let obj: JSReceiver = unsafe_cast<JSReceiver>(receiver);
+ let obj: JSReceiver = UnsafeCast<JSReceiver>(receiver);
return SelectBooleanConstant(obj.map == initialReceiverMap);
}
macro CallCompareFn(
context: Context, sortState: FixedArray, x: Object, y: Object): Number
- labels Bailout {
+ labels Bailout {
const userCmpFn: Object = sortState[kUserCmpFnIdx];
const sortCompare: CompareBuiltinFn =
- unsafe_cast<CompareBuiltinFn>(sortState[kSortComparePtrIdx]);
+ UnsafeCast<CompareBuiltinFn>(sortState[kSortComparePtrIdx]);
const result: Number = sortCompare(context, userCmpFn, x, y);
const receiver: JSReceiver = GetReceiver(sortState);
const initialReceiverMap: Object = sortState[kInitialReceiverMapIdx];
const initialReceiverLength: Number =
- unsafe_cast<Number>(sortState[kInitialReceiverLengthIdx]);
- const CanUseSameAccessor: CanUseSameAccessorFn =
+ UnsafeCast<Number>(sortState[kInitialReceiverLengthIdx]);
+ const canUseSameAccessorFn: CanUseSameAccessorFn =
GetCanUseSameAccessorFn(sortState);
- if (!CanUseSameAccessor(
+ if (!canUseSameAccessorFn(
context, receiver, initialReceiverMap, initialReceiverLength)) {
goto Bailout;
}
@@ -384,40 +382,40 @@ module array {
const receiver: JSReceiver = GetReceiver(sortState);
if (sortState[kAccessorIdx] == kGenericElementsAccessorId) return receiver;
- const object: JSObject = unsafe_cast<JSObject>(receiver);
+ const object: JSObject = UnsafeCast<JSObject>(receiver);
return object.elements;
}
macro GetLoadFn(sortState: FixedArray): LoadFn {
- return unsafe_cast<LoadFn>(sortState[kLoadFnIdx]);
+ return UnsafeCast<LoadFn>(sortState[kLoadFnIdx]);
}
macro GetStoreFn(sortState: FixedArray): StoreFn {
- return unsafe_cast<StoreFn>(sortState[kStoreFnIdx]);
+ return UnsafeCast<StoreFn>(sortState[kStoreFnIdx]);
}
macro GetCanUseSameAccessorFn(sortState: FixedArray): CanUseSameAccessorFn {
- return unsafe_cast<CanUseSameAccessorFn>(
+ return UnsafeCast<CanUseSameAccessorFn>(
sortState[kCanUseSameAccessorFnIdx]);
}
macro GetReceiver(sortState: FixedArray): JSReceiver {
- return unsafe_cast<JSReceiver>(sortState[kReceiverIdx]);
+ return UnsafeCast<JSReceiver>(sortState[kReceiverIdx]);
}
// Returns the temporary array without changing its size.
macro GetTempArray(sortState: FixedArray): FixedArray {
- return unsafe_cast<FixedArray>(sortState[kTempArrayIdx]);
+ return UnsafeCast<FixedArray>(sortState[kTempArrayIdx]);
}
// Re-loading the stack-size is done in a few places. The small macro allows
// for easier invariant checks at all use sites.
macro GetPendingRunsSize(sortState: FixedArray): Smi {
assert(TaggedIsSmi(sortState[kPendingRunsSizeIdx]));
- const stack_size: Smi = unsafe_cast<Smi>(sortState[kPendingRunsSizeIdx]);
+ const stackSize: Smi = UnsafeCast<Smi>(sortState[kPendingRunsSizeIdx]);
- assert(stack_size >= 0);
- return stack_size;
+ assert(stackSize >= 0);
+ return stackSize;
}
macro SetPendingRunsSize(sortState: FixedArray, value: Smi) {
@@ -425,7 +423,7 @@ module array {
}
macro GetPendingRunBase(pendingRuns: FixedArray, run: Smi): Smi {
- return unsafe_cast<Smi>(pendingRuns[run << 1]);
+ return UnsafeCast<Smi>(pendingRuns[run << 1]);
}
macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
@@ -433,7 +431,7 @@ module array {
}
macro GetPendingRunLength(pendingRuns: FixedArray, run: Smi): Smi {
- return unsafe_cast<Smi>(pendingRuns[(run << 1) + 1]);
+ return UnsafeCast<Smi>(pendingRuns[(run << 1) + 1]);
}
macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
@@ -443,38 +441,37 @@ module array {
macro PushRun(sortState: FixedArray, base: Smi, length: Smi) {
assert(GetPendingRunsSize(sortState) < kMaxMergePending);
- const stack_size: Smi = GetPendingRunsSize(sortState);
- const pending_runs: FixedArray =
- unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
+ const stackSize: Smi = GetPendingRunsSize(sortState);
+ const pendingRuns: FixedArray =
+ UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
- SetPendingRunBase(pending_runs, stack_size, base);
- SetPendingRunLength(pending_runs, stack_size, length);
+ SetPendingRunBase(pendingRuns, stackSize, base);
+ SetPendingRunLength(pendingRuns, stackSize, length);
- SetPendingRunsSize(sortState, stack_size + 1);
+ SetPendingRunsSize(sortState, stackSize + 1);
}
// Returns the temporary array and makes sure that it is big enough.
// TODO(szuend): Implement a better re-size strategy.
macro GetTempArray(sortState: FixedArray, requestedSize: Smi): FixedArray {
- const min_size: Smi = SmiMax(kSortStateTempSize, requestedSize);
+ const minSize: Smi = SmiMax(kSortStateTempSize, requestedSize);
- const current_size: Smi = unsafe_cast<Smi>(sortState[kTempArraySizeIdx]);
- if (current_size >= min_size) {
+ const currentSize: Smi = UnsafeCast<Smi>(sortState[kTempArraySizeIdx]);
+ if (currentSize >= minSize) {
return GetTempArray(sortState);
}
- const temp_array: FixedArray =
- AllocateZeroedFixedArray(convert<intptr>(min_size));
- FillFixedArrayWithSmiZero(temp_array, min_size);
+ const tempArray: FixedArray =
+ AllocateZeroedFixedArray(Convert<intptr>(minSize));
- sortState[kTempArraySizeIdx] = min_size;
- sortState[kTempArrayIdx] = temp_array;
- return temp_array;
+ sortState[kTempArraySizeIdx] = minSize;
+ sortState[kTempArrayIdx] = tempArray;
+ return tempArray;
}
// This macro jumps to the Bailout label iff kBailoutStatus is kFailure.
macro EnsureSuccess(sortState: FixedArray) labels Bailout {
- const status: Smi = unsafe_cast<Smi>(sortState[kBailoutStatusIdx]);
+ const status: Smi = UnsafeCast<Smi>(sortState[kBailoutStatusIdx]);
if (status == kFailure) goto Bailout;
}
@@ -489,17 +486,18 @@ module array {
// or the return value.
macro CallLoad(
- context: Context, sortState: FixedArray, Load: LoadFn,
- elements: HeapObject, index: Smi): Object labels Bailout {
- const result: Object = Load(context, sortState, elements, index);
+ context: Context, sortState: FixedArray, load: LoadFn,
+ elements: HeapObject, index: Smi): Object
+ labels Bailout {
+ const result: Object = load(context, sortState, elements, index);
EnsureSuccess(sortState) otherwise Bailout;
return result;
}
macro CallStore(
- context: Context, sortState: FixedArray, Store: StoreFn,
+ context: Context, sortState: FixedArray, store: StoreFn,
elements: HeapObject, index: Smi, value: Object) labels Bailout {
- Store(context, sortState, elements, index, value);
+ store(context, sortState, elements, index, value);
EnsureSuccess(sortState) otherwise Bailout;
}
@@ -521,21 +519,21 @@ module array {
}
macro CallGallopRight(
- context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ context: Context, sortState: FixedArray, load: LoadFn, key: Object,
base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi
- labels Bailout {
+ labels Bailout {
const result: Smi = GallopRight(
- context, sortState, Load, key, base, length, hint, useTempArray);
+ context, sortState, load, key, base, length, hint, useTempArray);
EnsureSuccess(sortState) otherwise Bailout;
return result;
}
macro CallGallopLeft(
- context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ context: Context, sortState: FixedArray, load: LoadFn, key: Object,
base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi
- labels Bailout {
+ labels Bailout {
const result: Smi = GallopLeft(
- context, sortState, Load, key, base, length, hint, useTempArray);
+ context, sortState, load, key, base, length, hint, useTempArray);
EnsureSuccess(sortState) otherwise Bailout;
return result;
}
@@ -551,15 +549,15 @@ module array {
context: Context, sortState: FixedArray): Smi {
const receiver: JSReceiver = GetReceiver(sortState);
- if (IsJSArray(receiver)) return unsafe_cast<JSArray>(receiver).length_fast;
+ if (IsJSArray(receiver)) return UnsafeCast<JSArray>(receiver).length_fast;
const len: Number =
- ToLength_Inline(context, GetProperty(context, receiver, 'length'));
- return unsafe_cast<Smi>(len);
+ ToLength_Inline(context, GetProperty(context, receiver, kLengthString));
+ return UnsafeCast<Smi>(len);
}
macro CopyToTempArray(
- context: Context, sortState: FixedArray, Load: LoadFn,
+ context: Context, sortState: FixedArray, load: LoadFn,
srcElements: HeapObject, srcPos: Smi, tempArray: FixedArray, dstPos: Smi,
length: Smi)
labels Bailout {
@@ -568,15 +566,15 @@ module array {
assert(srcPos <= GetReceiverLengthProperty(context, sortState) - length);
assert(dstPos <= tempArray.length - length);
- let src_idx: Smi = srcPos;
- let dst_idx: Smi = dstPos;
+ let srcIdx: Smi = srcPos;
+ let dstIdx: Smi = dstPos;
let to: Smi = srcPos + length;
- while (src_idx < to) {
+ while (srcIdx < to) {
let element: Object =
- CallLoad(context, sortState, Load, srcElements, src_idx++)
- otherwise Bailout;
- tempArray[dst_idx++] = element;
+ CallLoad(context, sortState, load, srcElements, srcIdx++)
+ otherwise Bailout;
+ tempArray[dstIdx++] = element;
}
}
@@ -588,17 +586,17 @@ module array {
assert(srcPos <= tempArray.length - length);
assert(dstPos <= GetReceiverLengthProperty(context, sortState) - length);
- let Store: StoreFn = GetStoreFn(sortState);
+ let store: StoreFn = GetStoreFn(sortState);
- let src_idx: Smi = srcPos;
- let dst_idx: Smi = dstPos;
+ let srcIdx: Smi = srcPos;
+ let dstIdx: Smi = dstPos;
let to: Smi = srcPos + length;
try {
- while (src_idx < to) {
+ while (srcIdx < to) {
CallStore(
- context, sortState, Store, dstElements, dst_idx++,
- tempArray[src_idx++])
- otherwise Bailout;
+ context, sortState, store, dstElements, dstIdx++,
+ tempArray[srcIdx++])
+ otherwise Bailout;
}
return kSuccess;
}
@@ -616,25 +614,25 @@ module array {
assert(dstPos <= GetReceiverLengthProperty(context, sortState) - length);
try {
- let Load: LoadFn = GetLoadFn(sortState);
- let Store: StoreFn = GetStoreFn(sortState);
+ let load: LoadFn = GetLoadFn(sortState);
+ let store: StoreFn = GetStoreFn(sortState);
if (srcPos < dstPos) {
- let src_idx: Smi = srcPos + length - 1;
- let dst_idx: Smi = dstPos + length - 1;
- while (src_idx >= srcPos) {
+ let srcIdx: Smi = srcPos + length - 1;
+ let dstIdx: Smi = dstPos + length - 1;
+ while (srcIdx >= srcPos) {
CopyElement(
- context, sortState, Load, Store, elements, src_idx--, dst_idx--)
- otherwise Bailout;
+ context, sortState, load, store, elements, srcIdx--, dstIdx--)
+ otherwise Bailout;
}
} else {
- let src_idx: Smi = srcPos;
- let dst_idx: Smi = dstPos;
+ let srcIdx: Smi = srcPos;
+ let dstIdx: Smi = dstPos;
let to: Smi = srcPos + length;
- while (src_idx < to) {
+ while (srcIdx < to) {
CopyElement(
- context, sortState, Load, Store, elements, src_idx++, dst_idx++)
- otherwise Bailout;
+ context, sortState, load, store, elements, srcIdx++, dstIdx++)
+ otherwise Bailout;
}
}
return kSuccess;
@@ -662,8 +660,8 @@ module array {
try {
let elements: HeapObject = ReloadElements(sortState);
- const Load: LoadFn = GetLoadFn(sortState);
- const Store: StoreFn = GetStoreFn(sortState);
+ const load: LoadFn = GetLoadFn(sortState);
+ const store: StoreFn = GetStoreFn(sortState);
let start: Smi = low == startArg ? (startArg + 1) : startArg;
@@ -673,8 +671,8 @@ module array {
let right: Smi = start;
const pivot: Object =
- CallLoad(context, sortState, Load, elements, right)
- otherwise Bailout;
+ CallLoad(context, sortState, load, elements, right)
+ otherwise Bailout;
// Invariants:
// pivot >= all in [low, left).
@@ -684,12 +682,12 @@ module array {
// Find pivot insertion point.
while (left < right) {
const mid: Smi = left + ((right - left) >>> 1);
- const mid_element: Object =
- CallLoad(context, sortState, Load, elements, mid)
- otherwise Bailout;
+ const midElement: Object =
+ CallLoad(context, sortState, load, elements, mid)
+ otherwise Bailout;
const order: Number =
- CallCompareFn(context, sortState, pivot, mid_element)
- otherwise Bailout;
+ CallCompareFn(context, sortState, pivot, midElement)
+ otherwise Bailout;
elements = ReloadElements(sortState);
if (order < 0) {
@@ -709,11 +707,11 @@ module array {
// sort is stable.
// Slide over to make room.
for (let p: Smi = start; p > left; --p) {
- CopyElement(context, sortState, Load, Store, elements, p - 1, p)
- otherwise Bailout;
+ CopyElement(context, sortState, load, store, elements, p - 1, p)
+ otherwise Bailout;
}
- CallStore(context, sortState, Store, elements, left, pivot)
- otherwise Bailout;
+ CallStore(context, sortState, store, elements, left, pivot)
+ otherwise Bailout;
}
return kSuccess;
}
@@ -741,152 +739,150 @@ module array {
// length is always an ascending sequence.
macro CountAndMakeRun(
context: Context, sortState: FixedArray, lowArg: Smi, high: Smi): Smi
- labels Bailout {
+ labels Bailout {
assert(lowArg < high);
let elements: HeapObject = ReloadElements(sortState);
- const Load: LoadFn = GetLoadFn(sortState);
- const Store: StoreFn = GetStoreFn(sortState);
+ const load: LoadFn = GetLoadFn(sortState);
+ const store: StoreFn = GetStoreFn(sortState);
let low: Smi = lowArg + 1;
if (low == high) return 1;
- let run_length: Smi = 2;
+ let runLength: Smi = 2;
- const element_low: Object =
- CallLoad(context, sortState, Load, elements, low) otherwise Bailout;
- const element_low_pred: Object =
- CallLoad(context, sortState, Load, elements, low - 1) otherwise Bailout;
+ const elementLow: Object =
+ CallLoad(context, sortState, load, elements, low) otherwise Bailout;
+ const elementLowPred: Object =
+ CallLoad(context, sortState, load, elements, low - 1) otherwise Bailout;
let order: Number =
- CallCompareFn(context, sortState, element_low, element_low_pred)
- otherwise Bailout;
+ CallCompareFn(context, sortState, elementLow, elementLowPred)
+ otherwise Bailout;
elements = ReloadElements(sortState);
// TODO(szuend): Replace with "order < 0" once Torque supports it.
// Currently the operator<(Number, Number) has return type
// 'never' and uses two labels to branch.
- const is_descending: bool = order < 0 ? true : false;
+ const isDescending: bool = order < 0 ? true : false;
- let previous_element: Object = element_low;
+ let previousElement: Object = elementLow;
for (let idx: Smi = low + 1; idx < high; ++idx) {
- const current_element: Object =
- CallLoad(context, sortState, Load, elements, idx) otherwise Bailout;
- order =
- CallCompareFn(context, sortState, current_element, previous_element)
- otherwise Bailout;
+ const currentElement: Object =
+ CallLoad(context, sortState, load, elements, idx) otherwise Bailout;
+ order = CallCompareFn(context, sortState, currentElement, previousElement)
+ otherwise Bailout;
elements = ReloadElements(sortState);
- if (is_descending) {
+ if (isDescending) {
if (order >= 0) break;
} else {
if (order < 0) break;
}
- previous_element = current_element;
- ++run_length;
+ previousElement = currentElement;
+ ++runLength;
}
- if (is_descending) {
+ if (isDescending) {
ReverseRange(
- context, sortState, Load, Store, elements, lowArg,
- lowArg + run_length)
- otherwise Bailout;
+ context, sortState, load, store, elements, lowArg, lowArg + runLength)
+ otherwise Bailout;
}
- return run_length;
+ return runLength;
}
macro ReverseRange(
- context: Context, sortState: FixedArray, Load: LoadFn, Store: StoreFn,
+ context: Context, sortState: FixedArray, load: LoadFn, store: StoreFn,
elements: HeapObject, from: Smi, to: Smi)
labels Bailout {
let low: Smi = from;
let high: Smi = to - 1;
while (low < high) {
- const element_low: Object =
- CallLoad(context, sortState, Load, elements, low) otherwise Bailout;
- const element_high: Object =
- CallLoad(context, sortState, Load, elements, high) otherwise Bailout;
- CallStore(context, sortState, Store, elements, low++, element_high)
- otherwise Bailout;
- CallStore(context, sortState, Store, elements, high--, element_low)
- otherwise Bailout;
+ const elementLow: Object =
+ CallLoad(context, sortState, load, elements, low) otherwise Bailout;
+ const elementHigh: Object =
+ CallLoad(context, sortState, load, elements, high) otherwise Bailout;
+ CallStore(context, sortState, store, elements, low++, elementHigh)
+ otherwise Bailout;
+ CallStore(context, sortState, store, elements, high--, elementLow)
+ otherwise Bailout;
}
}
// Merges the two runs at stack indices i and i + 1.
// Returns kFailure if we need to bailout, kSuccess otherwise.
builtin MergeAt(context: Context, sortState: FixedArray, i: Smi): Smi {
- const stack_size: Smi = GetPendingRunsSize(sortState);
+ const stackSize: Smi = GetPendingRunsSize(sortState);
// We are only allowed to either merge the two top-most runs, or leave
// the top most run alone and merge the two next runs.
- assert(stack_size >= 2);
+ assert(stackSize >= 2);
assert(i >= 0);
- assert(i == stack_size - 2 || i == stack_size - 3);
+ assert(i == stackSize - 2 || i == stackSize - 3);
let elements: HeapObject = ReloadElements(sortState);
- const Load: LoadFn = GetLoadFn(sortState);
-
- const pending_runs: FixedArray =
- unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
- let base_a: Smi = GetPendingRunBase(pending_runs, i);
- let length_a: Smi = GetPendingRunLength(pending_runs, i);
- let base_b: Smi = GetPendingRunBase(pending_runs, i + 1);
- let length_b: Smi = GetPendingRunLength(pending_runs, i + 1);
- assert(length_a > 0 && length_b > 0);
- assert(base_a + length_a == base_b);
+ const load: LoadFn = GetLoadFn(sortState);
+
+ const pendingRuns: FixedArray =
+ UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
+ let baseA: Smi = GetPendingRunBase(pendingRuns, i);
+ let lengthA: Smi = GetPendingRunLength(pendingRuns, i);
+ let baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
+ let lengthB: Smi = GetPendingRunLength(pendingRuns, i + 1);
+ assert(lengthA > 0 && lengthB > 0);
+ assert(baseA + lengthA == baseB);
// Record the length of the combined runs; if i is the 3rd-last run now,
// also slide over the last run (which isn't involved in this merge).
// The current run i + 1 goes away in any case.
- SetPendingRunLength(pending_runs, i, length_a + length_b);
- if (i == stack_size - 3) {
- const base: Smi = GetPendingRunBase(pending_runs, i + 2);
- const length: Smi = GetPendingRunLength(pending_runs, i + 2);
- SetPendingRunBase(pending_runs, i + 1, base);
- SetPendingRunLength(pending_runs, i + 1, length);
+ SetPendingRunLength(pendingRuns, i, lengthA + lengthB);
+ if (i == stackSize - 3) {
+ const base: Smi = GetPendingRunBase(pendingRuns, i + 2);
+ const length: Smi = GetPendingRunLength(pendingRuns, i + 2);
+ SetPendingRunBase(pendingRuns, i + 1, base);
+ SetPendingRunLength(pendingRuns, i + 1, length);
}
- SetPendingRunsSize(sortState, stack_size - 1);
+ SetPendingRunsSize(sortState, stackSize - 1);
try {
// Where does b start in a? Elements in a before that can be ignored,
// because they are already in place.
- const key_right: Object =
- CallLoad(context, sortState, Load, elements, base_b)
- otherwise Bailout;
+ const keyRight: Object =
+ CallLoad(context, sortState, load, elements, baseB)
+ otherwise Bailout;
const k: Smi = CallGallopRight(
- context, sortState, Load, key_right, base_a, length_a, 0, False)
- otherwise Bailout;
+ context, sortState, load, keyRight, baseA, lengthA, 0, False)
+ otherwise Bailout;
elements = ReloadElements(sortState);
assert(k >= 0);
- base_a = base_a + k;
- length_a = length_a - k;
- if (length_a == 0) return kSuccess;
- assert(length_a > 0);
+ baseA = baseA + k;
+ lengthA = lengthA - k;
+ if (lengthA == 0) return kSuccess;
+ assert(lengthA > 0);
// Where does a end in b? Elements in b after that can be ignored,
// because they are already in place.
- let key_left: Object =
- CallLoad(context, sortState, Load, elements, base_a + length_a - 1)
- otherwise Bailout;
- length_b = CallGallopLeft(
- context, sortState, Load, key_left, base_b, length_b, length_b - 1,
- False) otherwise Bailout;
+ let keyLeft: Object =
+ CallLoad(context, sortState, load, elements, baseA + lengthA - 1)
+ otherwise Bailout;
+ lengthB = CallGallopLeft(
+ context, sortState, load, keyLeft, baseB, lengthB, lengthB - 1, False)
+ otherwise Bailout;
elements = ReloadElements(sortState);
- assert(length_b >= 0);
- if (length_b == 0) return kSuccess;
+ assert(lengthB >= 0);
+ if (lengthB == 0) return kSuccess;
// Merge what remains of the runs, using a temp array with
- // min(length_a, length_b) elements.
- if (length_a <= length_b) {
- MergeLow(context, sortState, base_a, length_a, base_b, length_b)
- otherwise Bailout;
+ // min(lengthA, lengthB) elements.
+ if (lengthA <= lengthB) {
+ MergeLow(context, sortState, baseA, lengthA, baseB, lengthB)
+ otherwise Bailout;
} else {
- MergeHigh(context, sortState, base_a, length_a, base_b, length_b)
- otherwise Bailout;
+ MergeHigh(context, sortState, baseA, lengthA, baseB, lengthB)
+ otherwise Bailout;
}
return kSuccess;
}
@@ -919,111 +915,111 @@ module array {
// pretending that array[base - 1] is minus infinity and array[base + len]
// is plus infinity. In other words, key belongs at index base + k.
builtin GallopLeft(
- context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ context: Context, sortState: FixedArray, load: LoadFn, key: Object,
base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi {
assert(length > 0 && base >= 0);
assert(0 <= hint && hint < length);
- let last_ofs: Smi = 0;
+ let lastOfs: Smi = 0;
let offset: Smi = 1;
try {
- const base_hint_element: Object = CallLoad(
- context, sortState, Load,
+ const baseHintElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState), base + hint)
- otherwise Bailout;
+ otherwise Bailout;
let order: Number =
- CallCompareFn(context, sortState, base_hint_element, key)
- otherwise Bailout;
+ CallCompareFn(context, sortState, baseHintElement, key)
+ otherwise Bailout;
if (order < 0) {
// a[base + hint] < key: gallop right, until
- // a[base + hint + last_ofs] < key <= a[base + hint + offset].
+ // a[base + hint + lastOfs] < key <= a[base + hint + offset].
// a[base + length - 1] is highest.
- let max_ofs: Smi = length - hint;
- while (offset < max_ofs) {
- const offset_element: Object = CallLoad(
- context, sortState, Load,
+ let maxOfs: Smi = length - hint;
+ while (offset < maxOfs) {
+ const offsetElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState),
base + hint + offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, offset_element, key)
- otherwise Bailout;
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, offsetElement, key)
+ otherwise Bailout;
// a[base + hint + offset] >= key? Break.
if (order >= 0) break;
- last_ofs = offset;
+ lastOfs = offset;
offset = (offset << 1) + 1;
// Integer overflow.
- if (offset <= 0) offset = max_ofs;
+ if (offset <= 0) offset = maxOfs;
}
- if (offset > max_ofs) offset = max_ofs;
+ if (offset > maxOfs) offset = maxOfs;
// Translate back to positive offsets relative to base.
- last_ofs = last_ofs + hint;
+ lastOfs = lastOfs + hint;
offset = offset + hint;
} else {
// key <= a[base + hint]: gallop left, until
- // a[base + hint - offset] < key <= a[base + hint - last_ofs].
+ // a[base + hint - offset] < key <= a[base + hint - lastOfs].
assert(order >= 0);
// a[base + hint] is lowest.
- let max_ofs: Smi = hint + 1;
- while (offset < max_ofs) {
- const offset_element: Object = CallLoad(
- context, sortState, Load,
+ let maxOfs: Smi = hint + 1;
+ while (offset < maxOfs) {
+ const offsetElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState),
base + hint - offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, offset_element, key)
- otherwise Bailout;
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, offsetElement, key)
+ otherwise Bailout;
if (order < 0) break;
- last_ofs = offset;
+ lastOfs = offset;
offset = (offset << 1) + 1;
// Integer overflow.
- if (offset <= 0) offset = max_ofs;
+ if (offset <= 0) offset = maxOfs;
}
- if (offset > max_ofs) offset = max_ofs;
+ if (offset > maxOfs) offset = maxOfs;
// Translate back to positive offsets relative to base.
- const tmp: Smi = last_ofs;
- last_ofs = hint - offset;
+ const tmp: Smi = lastOfs;
+ lastOfs = hint - offset;
offset = hint - tmp;
}
- assert(-1 <= last_ofs && last_ofs < offset && offset <= length);
+ assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
- // Now a[base+last_ofs] < key <= a[base+offset], so key belongs somewhere
- // to the right of last_ofs but no farther right than offset. Do a binary
+ // Now a[base+lastOfs] < key <= a[base+offset], so key belongs somewhere
+ // to the right of lastOfs but no farther right than offset. Do a binary
// search, with invariant:
- // a[base + last_ofs - 1] < key <= a[base + offset].
- last_ofs++;
- while (last_ofs < offset) {
- const m: Smi = last_ofs + ((offset - last_ofs) >>> 1);
+ // a[base + lastOfs - 1] < key <= a[base + offset].
+ lastOfs++;
+ while (lastOfs < offset) {
+ const m: Smi = lastOfs + ((offset - lastOfs) >>> 1);
- const base_m_element: Object = CallLoad(
- context, sortState, Load,
+ const baseMElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState), base + m)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, base_m_element, key)
- otherwise Bailout;
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, baseMElement, key)
+ otherwise Bailout;
if (order < 0) {
- last_ofs = m + 1; // a[base + m] < key.
+ lastOfs = m + 1; // a[base + m] < key.
} else {
offset = m; // key <= a[base + m].
}
}
// so a[base + offset - 1] < key <= a[base + offset].
- assert(last_ofs == offset);
+ assert(lastOfs == offset);
assert(0 <= offset && offset <= length);
return offset;
}
@@ -1042,109 +1038,109 @@ module array {
//
// or kFailure on error.
builtin GallopRight(
- context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ context: Context, sortState: FixedArray, load: LoadFn, key: Object,
base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi {
assert(length > 0 && base >= 0);
assert(0 <= hint && hint < length);
- let last_ofs: Smi = 0;
+ let lastOfs: Smi = 0;
let offset: Smi = 1;
try {
- const base_hint_element: Object = CallLoad(
- context, sortState, Load,
+ const baseHintElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState), base + hint)
- otherwise Bailout;
+ otherwise Bailout;
let order: Number =
- CallCompareFn(context, sortState, key, base_hint_element)
- otherwise Bailout;
+ CallCompareFn(context, sortState, key, baseHintElement)
+ otherwise Bailout;
if (order < 0) {
// key < a[base + hint]: gallop left, until
- // a[base + hint - offset] <= key < a[base + hint - last_ofs].
+ // a[base + hint - offset] <= key < a[base + hint - lastOfs].
// a[base + hint] is lowest.
- let max_ofs: Smi = hint + 1;
- while (offset < max_ofs) {
- const offset_element: Object = CallLoad(
- context, sortState, Load,
+ let maxOfs: Smi = hint + 1;
+ while (offset < maxOfs) {
+ const offsetElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState),
base + hint - offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, key, offset_element)
- otherwise Bailout;
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, key, offsetElement)
+ otherwise Bailout;
if (order >= 0) break;
- last_ofs = offset;
+ lastOfs = offset;
offset = (offset << 1) + 1;
// Integer overflow.
- if (offset <= 0) offset = max_ofs;
+ if (offset <= 0) offset = maxOfs;
}
- if (offset > max_ofs) offset = max_ofs;
+ if (offset > maxOfs) offset = maxOfs;
// Translate back to positive offsets relative to base.
- const tmp: Smi = last_ofs;
- last_ofs = hint - offset;
+ const tmp: Smi = lastOfs;
+ lastOfs = hint - offset;
offset = hint - tmp;
} else {
// a[base + hint] <= key: gallop right, until
- // a[base + hint + last_ofs] <= key < a[base + hint + offset].
+ // a[base + hint + lastOfs] <= key < a[base + hint + offset].
// a[base + length - 1] is highest.
- let max_ofs: Smi = length - hint;
- while (offset < max_ofs) {
- const offset_element: Object = CallLoad(
- context, sortState, Load,
+ let maxOfs: Smi = length - hint;
+ while (offset < maxOfs) {
+ const offsetElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState),
base + hint + offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, key, offset_element)
- otherwise Bailout;
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, key, offsetElement)
+ otherwise Bailout;
// a[base + hint + ofs] <= key.
if (order < 0) break;
- last_ofs = offset;
+ lastOfs = offset;
offset = (offset << 1) + 1;
// Integer overflow.
- if (offset <= 0) offset = max_ofs;
+ if (offset <= 0) offset = maxOfs;
}
- if (offset > max_ofs) offset = max_ofs;
+ if (offset > maxOfs) offset = maxOfs;
// Translate back to positive offests relative to base.
- last_ofs = last_ofs + hint;
+ lastOfs = lastOfs + hint;
offset = offset + hint;
}
- assert(-1 <= last_ofs && last_ofs < offset && offset <= length);
+ assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
- // Now a[base + last_ofs] <= key < a[base + ofs], so key belongs
- // somewhere to the right of last_ofs but no farther right than ofs.
+ // Now a[base + lastOfs] <= key < a[base + ofs], so key belongs
+ // somewhere to the right of lastOfs but no farther right than ofs.
// Do a binary search, with invariant
- // a[base + last_ofs - 1] < key <= a[base + ofs].
- last_ofs++;
- while (last_ofs < offset) {
- const m: Smi = last_ofs + ((offset - last_ofs) >>> 1);
+ // a[base + lastOfs - 1] < key <= a[base + ofs].
+ lastOfs++;
+ while (lastOfs < offset) {
+ const m: Smi = lastOfs + ((offset - lastOfs) >>> 1);
- const base_m_element: Object = CallLoad(
- context, sortState, Load,
+ const baseMElement: Object = CallLoad(
+ context, sortState, load,
LoadElementsOrTempArray(useTempArray, sortState), base + m)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, key, base_m_element)
- otherwise Bailout;
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, key, baseMElement)
+ otherwise Bailout;
if (order < 0) {
offset = m; // key < a[base + m].
} else {
- last_ofs = m + 1; // a[base + m] <= key.
+ lastOfs = m + 1; // a[base + m] <= key.
}
}
// so a[base + offset - 1] <= key < a[base + offset].
- assert(last_ofs == offset);
+ assert(lastOfs == offset);
assert(0 <= offset && offset <= length);
return offset;
}
@@ -1153,374 +1149,373 @@ module array {
}
}
- // Copies a single element inside the array/object (NOT the temp_array).
+ // Copies a single element inside the array/object (NOT the tempArray).
macro CopyElement(
- context: Context, sortState: FixedArray, Load: LoadFn, Store: StoreFn,
+ context: Context, sortState: FixedArray, load: LoadFn, store: StoreFn,
elements: HeapObject, from: Smi, to: Smi)
labels Bailout {
- const element: Object = CallLoad(context, sortState, Load, elements, from)
- otherwise Bailout;
- CallStore(context, sortState, Store, elements, to, element)
- otherwise Bailout;
- }
-
- // Merge the length_a elements starting at base_a with the length_b elements
- // starting at base_b in a stable way, in-place. length_a and length_b must
- // be > 0, and base_a + length_a == base_b. Must also have that
- // array[base_b] < array[base_a],
- // that array[base_a + length_a - 1] belongs at the end of the merge,
- // and should have length_a <= length_b.
+ const element: Object = CallLoad(context, sortState, load, elements, from)
+ otherwise Bailout;
+ CallStore(context, sortState, store, elements, to, element)
+ otherwise Bailout;
+ }
+
+ // Merge the lengthA elements starting at baseA with the lengthB elements
+ // starting at baseB in a stable way, in-place. lengthA and lengthB must
+ // be > 0, and baseA + lengthA == baseB. Must also have that
+ // array[baseB] < array[baseA],
+ // that array[baseA + lengthA - 1] belongs at the end of the merge,
+ // and should have lengthA <= lengthB.
macro MergeLow(
- context: Context, sortState: FixedArray, baseA: Smi, lengthA: Smi,
- baseB: Smi, lengthB: Smi)
+ context: Context, sortState: FixedArray, baseA: Smi, lengthAArg: Smi,
+ baseB: Smi, lengthBArg: Smi)
labels Bailout {
- assert(0 < lengthA && 0 < lengthB);
+ assert(0 < lengthAArg && 0 < lengthBArg);
assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthA == baseB);
+ assert(baseA + lengthAArg == baseB);
- let length_a: Smi = lengthA;
- let length_b: Smi = lengthB;
+ let lengthA: Smi = lengthAArg;
+ let lengthB: Smi = lengthBArg;
let elements: HeapObject = ReloadElements(sortState);
- const LoadF: LoadFn = GetLoadFn(sortState);
- const Store: StoreFn = GetStoreFn(sortState);
+ const load: LoadFn = GetLoadFn(sortState);
+ const store: StoreFn = GetStoreFn(sortState);
- const temp_array: FixedArray = GetTempArray(sortState, length_a);
+ const tempArray: FixedArray = GetTempArray(sortState, lengthA);
CopyToTempArray(
- context, sortState, LoadF, elements, baseA, temp_array, 0, length_a)
- otherwise Bailout;
+ context, sortState, load, elements, baseA, tempArray, 0, lengthA)
+ otherwise Bailout;
let dest: Smi = baseA;
- let cursor_temp: Smi = 0;
- let cursor_b: Smi = baseB;
+ let cursorTemp: Smi = 0;
+ let cursorB: Smi = baseB;
- CopyElement(context, sortState, LoadF, Store, elements, cursor_b++, dest++)
- otherwise Bailout;
+ CopyElement(context, sortState, load, store, elements, cursorB++, dest++)
+ otherwise Bailout;
try {
- if (--length_b == 0) goto Succeed;
- if (length_a == 1) goto CopyB;
+ if (--lengthB == 0) goto Succeed;
+ if (lengthA == 1) goto CopyB;
- let min_gallop: Smi = unsafe_cast<Smi>(sortState[kMinGallopIdx]);
+ let minGallop: Smi = UnsafeCast<Smi>(sortState[kMinGallopIdx]);
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- let nof_wins_a: Smi = 0; // # of times A won in a row.
- let nof_wins_b: Smi = 0; // # of times B won in a row.
+ let nofWinsA: Smi = 0; // # of times A won in a row.
+ let nofWinsB: Smi = 0; // # of times B won in a row.
// Do the straightforward thing until (if ever) one run appears to
// win consistently.
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- assert(length_a > 1 && length_b > 0);
-
- let element_b: Object =
- CallLoad(context, sortState, LoadF, elements, cursor_b)
- otherwise Bailout;
- let order: Number = CallCompareFn(
- context, sortState, element_b, temp_array[cursor_temp])
- otherwise Bailout;
+ assert(lengthA > 1 && lengthB > 0);
+
+ let elementB: Object =
+ CallLoad(context, sortState, load, elements, cursorB)
+ otherwise Bailout;
+ let order: Number =
+ CallCompareFn(context, sortState, elementB, tempArray[cursorTemp])
+ otherwise Bailout;
elements = ReloadElements(sortState);
if (order < 0) {
CopyElement(
- context, sortState, LoadF, Store, elements, cursor_b, dest)
- otherwise Bailout;
+ context, sortState, load, store, elements, cursorB, dest)
+ otherwise Bailout;
- ++cursor_b;
+ ++cursorB;
++dest;
- ++nof_wins_b;
- --length_b;
- nof_wins_a = 0;
+ ++nofWinsB;
+ --lengthB;
+ nofWinsA = 0;
- if (length_b == 0) goto Succeed;
- if (nof_wins_b >= min_gallop) break;
+ if (lengthB == 0) goto Succeed;
+ if (nofWinsB >= minGallop) break;
} else {
CallStore(
- context, sortState, Store, elements, dest,
- temp_array[cursor_temp])
- otherwise Bailout;
+ context, sortState, store, elements, dest,
+ tempArray[cursorTemp])
+ otherwise Bailout;
- ++cursor_temp;
+ ++cursorTemp;
++dest;
- ++nof_wins_a;
- --length_a;
- nof_wins_b = 0;
+ ++nofWinsA;
+ --lengthA;
+ nofWinsB = 0;
- if (length_a == 1) goto CopyB;
- if (nof_wins_a >= min_gallop) break;
+ if (lengthA == 1) goto CopyB;
+ if (nofWinsA >= minGallop) break;
}
}
// One run is winning so consistently that galloping may be a huge win.
// So try that, and continue galloping until (if ever) neither run
// appears to be winning consistently anymore.
- ++min_gallop;
- let first_iteration: bool = true;
- while (nof_wins_a >= kMinGallopWins || nof_wins_b >= kMinGallopWins ||
- first_iteration) {
- first_iteration = false;
- assert(length_a > 1 && length_b > 0);
-
- min_gallop = SmiMax(1, min_gallop - 1);
- sortState[kMinGallopIdx] = min_gallop;
-
- let key_right: Object =
- CallLoad(context, sortState, LoadF, elements, cursor_b)
- otherwise Bailout;
- nof_wins_a = CallGallopRight(
- context, sortState, Load<TempArrayElements>, key_right,
- cursor_temp, length_a, 0, True) otherwise Bailout;
+ ++minGallop;
+ let firstIteration: bool = true;
+ while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
+ firstIteration) {
+ firstIteration = false;
+ assert(lengthA > 1 && lengthB > 0);
+
+ minGallop = SmiMax(1, minGallop - 1);
+ sortState[kMinGallopIdx] = minGallop;
+
+ let keyRight: Object =
+ CallLoad(context, sortState, load, elements, cursorB)
+ otherwise Bailout;
+ nofWinsA = CallGallopRight(
+ context, sortState, Load<TempArrayElements>, keyRight, cursorTemp,
+ lengthA, 0, True) otherwise Bailout;
elements = ReloadElements(sortState);
- assert(nof_wins_a >= 0);
+ assert(nofWinsA >= 0);
- if (nof_wins_a > 0) {
+ if (nofWinsA > 0) {
CallCopyFromTempArray(
- context, sortState, elements, dest, temp_array, cursor_temp,
- nof_wins_a) otherwise Bailout;
- dest = dest + nof_wins_a;
- cursor_temp = cursor_temp + nof_wins_a;
- length_a = length_a - nof_wins_a;
+ context, sortState, elements, dest, tempArray, cursorTemp,
+ nofWinsA) otherwise Bailout;
+ dest = dest + nofWinsA;
+ cursorTemp = cursorTemp + nofWinsA;
+ lengthA = lengthA - nofWinsA;
- if (length_a == 1) goto CopyB;
+ if (lengthA == 1) goto CopyB;
- // length_a == 0 is impossible now if the comparison function is
+ // lengthA == 0 is impossible now if the comparison function is
// consistent, but we can't assume that it is.
- if (length_a == 0) goto Succeed;
+ if (lengthA == 0) goto Succeed;
}
CopyElement(
- context, sortState, LoadF, Store, elements, cursor_b++, dest++)
- otherwise Bailout;
- if (--length_b == 0) goto Succeed;
-
- nof_wins_b = CallGallopLeft(
- context, sortState, LoadF, temp_array[cursor_temp], cursor_b,
- length_b, 0, False)
- otherwise Bailout;
+ context, sortState, load, store, elements, cursorB++, dest++)
+ otherwise Bailout;
+ if (--lengthB == 0) goto Succeed;
+
+ nofWinsB = CallGallopLeft(
+ context, sortState, load, tempArray[cursorTemp], cursorB, lengthB,
+ 0, False)
+ otherwise Bailout;
elements = ReloadElements(sortState);
- assert(nof_wins_b >= 0);
- if (nof_wins_b > 0) {
+ assert(nofWinsB >= 0);
+ if (nofWinsB > 0) {
CallCopyWithinSortArray(
- context, sortState, elements, cursor_b, dest, nof_wins_b)
- otherwise Bailout;
+ context, sortState, elements, cursorB, dest, nofWinsB)
+ otherwise Bailout;
- dest = dest + nof_wins_b;
- cursor_b = cursor_b + nof_wins_b;
- length_b = length_b - nof_wins_b;
+ dest = dest + nofWinsB;
+ cursorB = cursorB + nofWinsB;
+ lengthB = lengthB - nofWinsB;
- if (length_b == 0) goto Succeed;
+ if (lengthB == 0) goto Succeed;
}
CallStore(
- context, sortState, Store, elements, dest++,
- temp_array[cursor_temp++])
- otherwise Bailout;
- if (--length_a == 1) goto CopyB;
+ context, sortState, store, elements, dest++,
+ tempArray[cursorTemp++])
+ otherwise Bailout;
+ if (--lengthA == 1) goto CopyB;
}
- ++min_gallop; // Penalize it for leaving galloping mode
- sortState[kMinGallopIdx] = min_gallop;
+ ++minGallop; // Penalize it for leaving galloping mode
+ sortState[kMinGallopIdx] = minGallop;
}
}
label Succeed {
- if (length_a > 0) {
+ if (lengthA > 0) {
CallCopyFromTempArray(
- context, sortState, elements, dest, temp_array, cursor_temp,
- length_a) otherwise Bailout;
+ context, sortState, elements, dest, tempArray, cursorTemp, lengthA)
+ otherwise Bailout;
}
}
label CopyB {
- assert(length_a == 1 && length_b > 0);
+ assert(lengthA == 1 && lengthB > 0);
// The last element of run A belongs at the end of the merge.
CallCopyWithinSortArray(
- context, sortState, elements, cursor_b, dest, length_b)
- otherwise Bailout;
+ context, sortState, elements, cursorB, dest, lengthB)
+ otherwise Bailout;
CallStore(
- context, sortState, Store, elements, dest + length_b,
- temp_array[cursor_temp])
- otherwise Bailout;
+ context, sortState, store, elements, dest + lengthB,
+ tempArray[cursorTemp])
+ otherwise Bailout;
}
}
- // Merge the length_a elements starting at base_a with the length_b elements
- // starting at base_b in a stable way, in-place. length_a and length_b must
- // be > 0. Must also have that array[base_a + length_a - 1] belongs at the
- // end of the merge and should have length_a >= length_b.
+ // Merge the lengthA elements starting at baseA with the lengthB elements
+ // starting at baseB in a stable way, in-place. lengthA and lengthB must
+ // be > 0. Must also have that array[baseA + lengthA - 1] belongs at the
+ // end of the merge and should have lengthA >= lengthB.
macro MergeHigh(
- context: Context, sortState: FixedArray, baseA: Smi, lengthA: Smi,
- baseB: Smi, lengthB: Smi)
+ context: Context, sortState: FixedArray, baseA: Smi, lengthAArg: Smi,
+ baseB: Smi, lengthBArg: Smi)
labels Bailout {
- assert(0 < lengthA && 0 < lengthB);
+ assert(0 < lengthAArg && 0 < lengthBArg);
assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthA == baseB);
+ assert(baseA + lengthAArg == baseB);
- let length_a: Smi = lengthA;
- let length_b: Smi = lengthB;
+ let lengthA: Smi = lengthAArg;
+ let lengthB: Smi = lengthBArg;
let elements: HeapObject = ReloadElements(sortState);
- const LoadF: LoadFn = GetLoadFn(sortState);
- const Store: StoreFn = GetStoreFn(sortState);
+ const load: LoadFn = GetLoadFn(sortState);
+ const store: StoreFn = GetStoreFn(sortState);
- const temp_array: FixedArray = GetTempArray(sortState, length_b);
+ const tempArray: FixedArray = GetTempArray(sortState, lengthB);
CopyToTempArray(
- context, sortState, LoadF, elements, baseB, temp_array, 0, length_b)
- otherwise Bailout;
+ context, sortState, load, elements, baseB, tempArray, 0, lengthB)
+ otherwise Bailout;
// MergeHigh merges the two runs backwards.
- let dest: Smi = baseB + length_b - 1;
- let cursor_temp: Smi = length_b - 1;
- let cursor_a: Smi = baseA + length_a - 1;
+ let dest: Smi = baseB + lengthB - 1;
+ let cursorTemp: Smi = lengthB - 1;
+ let cursorA: Smi = baseA + lengthA - 1;
- CopyElement(context, sortState, LoadF, Store, elements, cursor_a--, dest--)
- otherwise Bailout;
+ CopyElement(context, sortState, load, store, elements, cursorA--, dest--)
+ otherwise Bailout;
try {
- if (--length_a == 0) goto Succeed;
- if (length_b == 1) goto CopyA;
+ if (--lengthA == 0) goto Succeed;
+ if (lengthB == 1) goto CopyA;
- let min_gallop: Smi = unsafe_cast<Smi>(sortState[kMinGallopIdx]);
+ let minGallop: Smi = UnsafeCast<Smi>(sortState[kMinGallopIdx]);
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- let nof_wins_a: Smi = 0; // # of times A won in a row.
- let nof_wins_b: Smi = 0; // # of times B won in a row.
+ let nofWinsA: Smi = 0; // # of times A won in a row.
+ let nofWinsB: Smi = 0; // # of times B won in a row.
// Do the straightforward thing until (if ever) one run appears to
// win consistently.
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- assert(length_a > 0 && length_b > 1);
-
- let element_a: Object =
- CallLoad(context, sortState, LoadF, elements, cursor_a)
- otherwise Bailout;
- let order: Number = CallCompareFn(
- context, sortState, temp_array[cursor_temp], element_a)
- otherwise Bailout;
+ assert(lengthA > 0 && lengthB > 1);
+
+ let elementA: Object =
+ CallLoad(context, sortState, load, elements, cursorA)
+ otherwise Bailout;
+ let order: Number =
+ CallCompareFn(context, sortState, tempArray[cursorTemp], elementA)
+ otherwise Bailout;
elements = ReloadElements(sortState);
if (order < 0) {
CopyElement(
- context, sortState, LoadF, Store, elements, cursor_a, dest)
- otherwise Bailout;
+ context, sortState, load, store, elements, cursorA, dest)
+ otherwise Bailout;
- --cursor_a;
+ --cursorA;
--dest;
- ++nof_wins_a;
- --length_a;
- nof_wins_b = 0;
+ ++nofWinsA;
+ --lengthA;
+ nofWinsB = 0;
- if (length_a == 0) goto Succeed;
- if (nof_wins_a >= min_gallop) break;
+ if (lengthA == 0) goto Succeed;
+ if (nofWinsA >= minGallop) break;
} else {
CallStore(
- context, sortState, Store, elements, dest,
- temp_array[cursor_temp])
- otherwise Bailout;
+ context, sortState, store, elements, dest,
+ tempArray[cursorTemp])
+ otherwise Bailout;
- --cursor_temp;
+ --cursorTemp;
--dest;
- ++nof_wins_b;
- --length_b;
- nof_wins_a = 0;
+ ++nofWinsB;
+ --lengthB;
+ nofWinsA = 0;
- if (length_b == 1) goto CopyA;
- if (nof_wins_b >= min_gallop) break;
+ if (lengthB == 1) goto CopyA;
+ if (nofWinsB >= minGallop) break;
}
}
// One run is winning so consistently that galloping may be a huge win.
// So try that, and continue galloping until (if ever) neither run
// appears to be winning consistently anymore.
- ++min_gallop;
- let first_iteration: bool = true;
- while (nof_wins_a >= kMinGallopWins || nof_wins_b >= kMinGallopWins ||
- first_iteration) {
- first_iteration = false;
+ ++minGallop;
+ let firstIteration: bool = true;
+ while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
+ firstIteration) {
+ firstIteration = false;
- assert(length_a > 0 && length_b > 1);
+ assert(lengthA > 0 && lengthB > 1);
- min_gallop = SmiMax(1, min_gallop - 1);
- sortState[kMinGallopIdx] = min_gallop;
+ minGallop = SmiMax(1, minGallop - 1);
+ sortState[kMinGallopIdx] = minGallop;
let k: Smi = CallGallopRight(
- context, sortState, LoadF, temp_array[cursor_temp], baseA,
- length_a, length_a - 1, False)
- otherwise Bailout;
+ context, sortState, load, tempArray[cursorTemp], baseA, lengthA,
+ lengthA - 1, False)
+ otherwise Bailout;
elements = ReloadElements(sortState);
assert(k >= 0);
- nof_wins_a = length_a - k;
+ nofWinsA = lengthA - k;
- if (nof_wins_a > 0) {
- dest = dest - nof_wins_a;
- cursor_a = cursor_a - nof_wins_a;
+ if (nofWinsA > 0) {
+ dest = dest - nofWinsA;
+ cursorA = cursorA - nofWinsA;
CallCopyWithinSortArray(
- context, sortState, elements, cursor_a + 1, dest + 1,
- nof_wins_a)
- otherwise Bailout;
+ context, sortState, elements, cursorA + 1, dest + 1, nofWinsA)
+ otherwise Bailout;
- length_a = length_a - nof_wins_a;
- if (length_a == 0) goto Succeed;
+ lengthA = lengthA - nofWinsA;
+ if (lengthA == 0) goto Succeed;
}
CallStore(
- context, sortState, Store, elements, dest--,
- temp_array[cursor_temp--])
- otherwise Bailout;
- if (--length_b == 1) goto CopyA;
+ context, sortState, store, elements, dest--,
+ tempArray[cursorTemp--])
+ otherwise Bailout;
+ if (--lengthB == 1) goto CopyA;
let key: Object =
- CallLoad(context, sortState, LoadF, elements, cursor_a)
- otherwise Bailout;
+ CallLoad(context, sortState, load, elements, cursorA)
+ otherwise Bailout;
k = CallGallopLeft(
- context, sortState, Load<TempArrayElements>, key, 0, length_b,
- length_b - 1, True) otherwise Bailout;
+ context, sortState, Load<TempArrayElements>, key, 0, lengthB,
+ lengthB - 1, True) otherwise Bailout;
elements = ReloadElements(sortState);
assert(k >= 0);
- nof_wins_b = length_b - k;
+ nofWinsB = lengthB - k;
- if (nof_wins_b > 0) {
- dest = dest - nof_wins_b;
- cursor_temp = cursor_temp - nof_wins_b;
+ if (nofWinsB > 0) {
+ dest = dest - nofWinsB;
+ cursorTemp = cursorTemp - nofWinsB;
CallCopyFromTempArray(
- context, sortState, elements, dest + 1, temp_array,
- cursor_temp + 1, nof_wins_b) otherwise Bailout;
+ context, sortState, elements, dest + 1, tempArray,
+ cursorTemp + 1, nofWinsB) otherwise Bailout;
- length_b = length_b - nof_wins_b;
- if (length_b == 1) goto CopyA;
+ lengthB = lengthB - nofWinsB;
+ if (lengthB == 1) goto CopyA;
- // length_b == 0 is impossible now if the comparison function is
+ // lengthB == 0 is impossible now if the comparison function is
// consistent, but we can't assume that it is.
- if (length_b == 0) goto Succeed;
+ if (lengthB == 0) goto Succeed;
}
CopyElement(
- context, sortState, LoadF, Store, elements, cursor_a--, dest--)
- otherwise Bailout;
- if (--length_a == 0) goto Succeed;
+ context, sortState, load, store, elements, cursorA--, dest--)
+ otherwise Bailout;
+ if (--lengthA == 0) goto Succeed;
}
- ++min_gallop;
- sortState[kMinGallopIdx] = min_gallop;
+ ++minGallop;
+ sortState[kMinGallopIdx] = minGallop;
}
}
label Succeed {
- if (length_b > 0) {
- assert(length_a == 0);
+ if (lengthB > 0) {
+ assert(lengthA == 0);
CallCopyFromTempArray(
- context, sortState, elements, dest - (length_b - 1), temp_array, 0,
- length_b) otherwise Bailout;
+ context, sortState, elements, dest - (lengthB - 1), tempArray, 0,
+ lengthB) otherwise Bailout;
}
}
label CopyA {
- assert(length_b == 1 && length_a > 0);
+ assert(lengthB == 1 && lengthA > 0);
// The first element of run B belongs at the front of the merge.
- dest = dest - length_a;
- cursor_a = cursor_a - length_a;
+ dest = dest - lengthA;
+ cursorA = cursorA - lengthA;
CallCopyWithinSortArray(
- context, sortState, elements, cursor_a + 1, dest + 1, length_a)
- otherwise Bailout;
+ context, sortState, elements, cursorA + 1, dest + 1, lengthA)
+ otherwise Bailout;
CallStore(
- context, sortState, Store, elements, dest, temp_array[cursor_temp])
- otherwise Bailout;
+ context, sortState, store, elements, dest, tempArray[cursorTemp])
+ otherwise Bailout;
}
}
@@ -1543,20 +1538,20 @@ module array {
n = n >>> 1;
}
- const min_run_length: Smi = n + r;
- assert(nArg < 64 || (32 <= min_run_length && min_run_length <= 64));
- return min_run_length;
+ const minRunLength: Smi = n + r;
+ assert(nArg < 64 || (32 <= minRunLength && minRunLength <= 64));
+ return minRunLength;
}
// Returns true iff run_length(n - 2) > run_length(n - 1) + run_length(n).
macro RunInvariantEstablished(pendingRuns: FixedArray, n: Smi): bool {
if (n < 2) return true;
- const run_length_n: Smi = GetPendingRunLength(pendingRuns, n);
- const run_length_nm: Smi = GetPendingRunLength(pendingRuns, n - 1);
- const run_length_nmm: Smi = GetPendingRunLength(pendingRuns, n - 2);
+ const runLengthN: Smi = GetPendingRunLength(pendingRuns, n);
+ const runLengthNM: Smi = GetPendingRunLength(pendingRuns, n - 1);
+ const runLengthNMM: Smi = GetPendingRunLength(pendingRuns, n - 2);
- return run_length_nmm > run_length_nm + run_length_n;
+ return runLengthNMM > runLengthNM + runLengthN;
}
// Examines the stack of runs waiting to be merged, merging adjacent runs
@@ -1570,24 +1565,24 @@ module array {
// process. Determine if all these extra loads are ok.
macro MergeCollapse(context: Context, sortState: FixedArray)
labels Bailout {
- const pending_runs: FixedArray =
- unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
+ const pendingRuns: FixedArray =
+ UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
// Reload the stack size because MergeAt might change it.
while (GetPendingRunsSize(sortState) > 1) {
let n: Smi = GetPendingRunsSize(sortState) - 2;
- if (!RunInvariantEstablished(pending_runs, n + 1) ||
- !RunInvariantEstablished(pending_runs, n)) {
- if (GetPendingRunLength(pending_runs, n - 1) <
- GetPendingRunLength(pending_runs, n + 1)) {
+ if (!RunInvariantEstablished(pendingRuns, n + 1) ||
+ !RunInvariantEstablished(pendingRuns, n)) {
+ if (GetPendingRunLength(pendingRuns, n - 1) <
+ GetPendingRunLength(pendingRuns, n + 1)) {
--n;
}
CallMergeAt(context, sortState, n) otherwise Bailout;
} else if (
- GetPendingRunLength(pending_runs, n) <=
- GetPendingRunLength(pending_runs, n + 1)) {
+ GetPendingRunLength(pendingRuns, n) <=
+ GetPendingRunLength(pendingRuns, n + 1)) {
CallMergeAt(context, sortState, n) otherwise Bailout;
} else {
break;
@@ -1599,16 +1594,16 @@ module array {
// remains. This is used at the end of the mergesort.
macro MergeForceCollapse(context: Context, sortState: FixedArray)
labels Bailout {
- let pending_runs: FixedArray =
- unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
+ let pendingRuns: FixedArray =
+ UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
// Reload the stack size becuase MergeAt might change it.
while (GetPendingRunsSize(sortState) > 1) {
let n: Smi = GetPendingRunsSize(sortState) - 2;
if (n > 0 &&
- GetPendingRunLength(pending_runs, n - 1) <
- GetPendingRunLength(pending_runs, n + 1)) {
+ GetPendingRunLength(pendingRuns, n - 1) <
+ GetPendingRunLength(pendingRuns, n + 1)) {
--n;
}
CallMergeAt(context, sortState, n) otherwise Bailout;
@@ -1620,13 +1615,12 @@ module array {
sortState[kTempArraySizeIdx] = SmiConstant(0);
SetPendingRunsSize(sortState, 0);
- let pending_runs: FixedArray =
- AllocateZeroedFixedArray(convert<intptr>(kMaxMergePending));
- FillFixedArrayWithSmiZero(pending_runs, kMaxMergePending);
- sortState[kPendingRunsIdx] = pending_runs;
+ let pendingRuns: FixedArray =
+ AllocateZeroedFixedArray(Convert<intptr>(kMaxMergePending));
+ sortState[kPendingRunsIdx] = pendingRuns;
}
- macro InitializeSortStateAccessor<Accessor : type>(sortState: FixedArray) {
+ macro InitializeSortStateAccessor<Accessor: type>(sortState: FixedArray) {
sortState[kAccessorIdx] = kFastElementsAccessorId;
sortState[kLoadFnIdx] = Load<Accessor>;
sortState[kStoreFnIdx] = Store<Accessor>;
@@ -1651,44 +1645,44 @@ module array {
// March over the array once, left to right, finding natural runs,
// and extending short natural runs to minrun elements.
let low: Smi = 0;
- const min_run_length: Smi = ComputeMinRunLength(remaining);
+ const minRunLength: Smi = ComputeMinRunLength(remaining);
while (remaining != 0) {
- let current_run_length: Smi =
+ let currentRunLength: Smi =
CountAndMakeRun(context, sortState, low, low + remaining)
- otherwise Bailout;
+ otherwise Bailout;
- // If the run is short, extend it to min(min_run_length, remaining).
- if (current_run_length < min_run_length) {
- const forced_run_length: Smi = SmiMin(min_run_length, remaining);
+ // If the run is short, extend it to min(minRunLength, remaining).
+ if (currentRunLength < minRunLength) {
+ const forcedRunLength: Smi = SmiMin(minRunLength, remaining);
BinaryInsertionSort(
- context, sortState, low, low + current_run_length,
- low + forced_run_length);
+ context, sortState, low, low + currentRunLength,
+ low + forcedRunLength);
EnsureSuccess(sortState) otherwise Bailout;
- current_run_length = forced_run_length;
+ currentRunLength = forcedRunLength;
}
// Push run onto pending-runs stack, and maybe merge.
- PushRun(sortState, low, current_run_length);
+ PushRun(sortState, low, currentRunLength);
MergeCollapse(context, sortState) otherwise Bailout;
// Advance to find next run.
- low = low + current_run_length;
- remaining = remaining - current_run_length;
+ low = low + currentRunLength;
+ remaining = remaining - currentRunLength;
}
MergeForceCollapse(context, sortState) otherwise Bailout;
assert(GetPendingRunsSize(sortState) == 1);
assert(
GetPendingRunLength(
- unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]), 0) == length);
+ UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]), 0) == length);
}
builtin ArrayTimSort(
context: Context, sortState: FixedArray, length: Smi): Object {
try {
ArrayTimSortImpl(context, sortState, length)
- otherwise Slow;
+ otherwise Slow;
}
label Slow {
if (sortState[kAccessorIdx] == kGenericElementsAccessorId) {
@@ -1716,7 +1710,6 @@ module array {
// array and move the undefineds after that. Holes are removed.
// This happens for Array as well as non-Array objects.
extern runtime PrepareElementsForSort(Context, Object, Number): Smi;
- extern macro FillFixedArrayWithSmiZero(FixedArray, Smi);
// https://tc39.github.io/ecma262/#sec-array.prototype.sort
javascript builtin ArrayPrototypeSort(
@@ -1731,14 +1724,13 @@ module array {
// 2. Let obj be ? ToObject(this value).
const obj: JSReceiver = ToObject(context, receiver);
- const sort_state: FixedArray = AllocateZeroedFixedArray(kSortStateSize);
- FillFixedArrayWithSmiZero(sort_state, SmiTag(kSortStateSize));
+ const sortState: FixedArray = AllocateZeroedFixedArray(kSortStateSize);
- sort_state[kReceiverIdx] = obj;
- sort_state[kUserCmpFnIdx] = comparefnObj;
- sort_state[kSortComparePtrIdx] =
+ sortState[kReceiverIdx] = obj;
+ sortState[kUserCmpFnIdx] = comparefnObj;
+ sortState[kSortComparePtrIdx] =
comparefnObj != Undefined ? SortCompareUserFn : SortCompareDefault;
- sort_state[kBailoutStatusIdx] = kSuccess;
+ sortState[kBailoutStatusIdx] = kSuccess;
// 3. Let len be ? ToLength(? Get(obj, "length")).
const len: Number =
@@ -1751,33 +1743,34 @@ module array {
assert(nofNonUndefined <= len);
let map: Map = obj.map;
- sort_state[kInitialReceiverMapIdx] = map;
- sort_state[kInitialReceiverLengthIdx] = len;
+ sortState[kInitialReceiverMapIdx] = map;
+ sortState[kInitialReceiverLengthIdx] = len;
try {
- const a: JSArray = cast<JSArray>(obj) otherwise slow;
+ const a: JSArray = Cast<JSArray>(obj) otherwise Slow;
const elementsKind: ElementsKind = map.elements_kind;
- if (!IsFastElementsKind(elementsKind)) goto slow;
+ if (!IsFastElementsKind(elementsKind)) goto Slow;
if (IsDoubleElementsKind(elementsKind)) {
- InitializeSortStateAccessor<FastDoubleElements>(sort_state);
+ InitializeSortStateAccessor<FastDoubleElements>(sortState);
} else if (elementsKind == PACKED_SMI_ELEMENTS) {
- InitializeSortStateAccessor<FastPackedSmiElements>(sort_state);
+ InitializeSortStateAccessor<FastPackedSmiElements>(sortState);
} else {
- InitializeSortStateAccessor<FastSmiOrObjectElements>(sort_state);
+ InitializeSortStateAccessor<FastSmiOrObjectElements>(sortState);
}
- ArrayTimSort(context, sort_state, nofNonUndefined);
+ ArrayTimSort(context, sortState, nofNonUndefined);
}
- label slow {
+ label Slow {
if (map.elements_kind == DICTIONARY_ELEMENTS && IsExtensibleMap(map) &&
!IsCustomElementsReceiverInstanceType(map.instance_type)) {
- InitializeSortStateAccessor<DictionaryElements>(sort_state);
+ InitializeSortStateAccessor<DictionaryElements>(sortState);
} else {
- InitializeSortStateAccessor<GenericElementsAccessor>(sort_state);
+ InitializeSortStateAccessor<GenericElementsAccessor>(sortState);
}
- ArrayTimSort(context, sort_state, nofNonUndefined);
+ ArrayTimSort(context, sortState, nofNonUndefined);
}
return receiver;
}
}
+
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 05c382e2f6..3ae98078f1 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -30,11 +30,11 @@ group("v8_android_test_runner_deps") {
if (is_android && !build_with_chromium) {
data_deps = [
- "../build/android:test_runner_py",
+ "//build/android:test_runner_py",
]
data = [
# This is used by android.py, but not included by test_runner_py above.
- "../third_party/catapult/devil/devil/android/perf/",
+ "//third_party/catapult/devil/devil/android/perf/",
]
}
}
diff --git a/deps/v8/tools/bigint-tester.py b/deps/v8/tools/bigint-tester.py
index d48d2546f9..2deab883fa 100755
--- a/deps/v8/tools/bigint-tester.py
+++ b/deps/v8/tools/bigint-tester.py
@@ -30,8 +30,6 @@ TEST_HEADER = """\
// found in the LICENSE file.
// Generated by %s.
-
-// Flags: --harmony-bigint
""" % sys.argv[0]
TEST_BODY = """
@@ -109,7 +107,7 @@ class TestGenerator(object):
with open(path, "w") as f:
f.write(self.EmitData(count))
f.write(self.EmitTestBody())
- return subprocess.call("%s --harmony-bigint %s" % (binary, path),
+ return subprocess.call("%s %s" % (binary, path),
shell=True)
finally:
os.close(fd)
diff --git a/deps/v8/tools/blink_tests/TestExpectations b/deps/v8/tools/blink_tests/TestExpectations
index 3655c5c93a..e6cc3d274f 100644
--- a/deps/v8/tools/blink_tests/TestExpectations
+++ b/deps/v8/tools/blink_tests/TestExpectations
@@ -3,3 +3,5 @@
# Turn off Slimming Paint tests on linux.
[ Linux ] virtual/slimmingpaint/ [ Skip ]
+# Several failures since https://crrev.com/c/1196547
+crbug.com/879604 external/wpt/cookies/samesite/ [ Skip ]
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index 2618b50b71..2afd0602d8 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1507,6 +1507,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.groups = [
this.total,
Group.groups.get('ic').entry(),
+ Group.groups.get('optimize-background').entry(),
Group.groups.get('optimize').entry(),
Group.groups.get('compile-background').entry(),
Group.groups.get('compile').entry(),
@@ -1715,14 +1716,16 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
Group.add('total', new Group('Total', /.*Total.*/, '#BBB'));
Group.add('ic', new Group('IC', /.*IC_.*/, "#3366CC"));
+ Group.add('optimize-background', new Group('Optimize-Background',
+ /(.*OptimizeConcurrent.*)|RecompileConcurrent.*/, "#702000"));
Group.add('optimize', new Group('Optimize',
/StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
Group.add('compile-background', new Group('Compile-Background',
- /(.*CompileBackground.*)/, "#b9a720"));
+ /(.*CompileBackground.*)/, "#b08000"));
Group.add('compile', new Group('Compile',
/(^Compile.*)|(.*_Compile.*)/, "#FFAA00"));
Group.add('parse-background',
- new Group('Parse-Background', /.*ParseBackground.*/, "#af744d"));
+ new Group('Parse-Background', /.*ParseBackground.*/, "#c05000"));
Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
Group.add('callback', new Group('Blink C++', /.*Callback.*/, "#109618"));
Group.add('api', new Group('API', /.*API.*/, "#990099"));
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 4ee4e83c02..91f8637acd 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -180,7 +180,7 @@ def run_site(site, domain, args, timeout=None):
user_data_dir = args.user_data_dir
else:
user_data_dir = tempfile.mkdtemp(prefix="chr_")
- js_flags = "--runtime-call-stats --noconcurrent-recompilation"
+ js_flags = "--runtime-call-stats"
if args.replay_wpr: js_flags += " --allow-natives-syntax"
if args.js_flags: js_flags += " " + args.js_flags
chrome_flags = get_chrome_flags(js_flags, user_data_dir)
@@ -218,7 +218,7 @@ def run_site(site, domain, args, timeout=None):
# Abort after too many retries, no point in ever increasing the
# timeout.
print("TOO MANY EMPTY RESULTS ABORTING RUN")
- break
+ return
timeout += 2 ** retries_since_good_run
retries_since_good_run += 1
print("EMPTY RESULT, REPEATING RUN ({})".format(
@@ -240,6 +240,8 @@ def read_sites_file(args):
if item['timeout'] > args.timeout: item['timeout'] = args.timeout
sites.append(item)
except ValueError:
+ args.error("Warning: Could not read sites file as JSON, falling back to "
+ "primitive file")
with open(args.sites_file, "rt") as f:
for line in f:
line = line.strip()
@@ -349,11 +351,22 @@ def statistics(data):
'stddev': stddev, 'min': low, 'max': high, 'ci': ci }
+def add_category_total(entries, groups, category_prefix):
+ group_data = { 'time': 0, 'count': 0 }
+ for group_name, regexp in groups:
+ if not group_name.startswith('Group-' + category_prefix): continue
+ group_data['time'] += entries[group_name]['time']
+ group_data['count'] += entries[group_name]['count']
+ entries['Group-' + category_prefix + '-Total'] = group_data
+
+
def read_stats(path, domain, args):
groups = [];
if args.aggregate:
groups = [
('Group-IC', re.compile(".*IC_.*")),
+ ('Group-OptimizeBackground',
+ re.compile(".*OptimizeConcurrent.*|RecompileConcurrent.*")),
('Group-Optimize',
re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")),
('Group-CompileBackground', re.compile("(.*CompileBackground.*)")),
@@ -405,20 +418,10 @@ def read_stats(path, domain, args):
group_data['time'] += entries[group_name]['time']
group_data['count'] += entries[group_name]['count']
entries['Group-Total-V8'] = group_data
- # Calculate the Parse-Total group
- group_data = { 'time': 0, 'count': 0 }
- for group_name, regexp in groups:
- if not group_name.startswith('Group-Parse'): continue
- group_data['time'] += entries[group_name]['time']
- group_data['count'] += entries[group_name]['count']
- entries['Group-Parse-Total'] = group_data
- # Calculate the Compile-Total group
- group_data = { 'time': 0, 'count': 0 }
- for group_name, regexp in groups:
- if not group_name.startswith('Group-Compile'): continue
- group_data['time'] += entries[group_name]['time']
- group_data['count'] += entries[group_name]['count']
- entries['Group-Compile-Total'] = group_data
+ # Calculate the Parse-Total, Compile-Total and Optimize-Total groups
+ add_category_total(entries, groups, 'Parse')
+ add_category_total(entries, groups, 'Compile')
+ add_category_total(entries, groups, 'Optimize')
# Append the sums as single entries to domain.
for key in entries:
if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }
@@ -651,7 +654,7 @@ def main():
"-l", "--log-stderr", type=str, metavar="<path>",
help="specify where chrome's stderr should go (default: /dev/null)")
subparser.add_argument(
- "sites", type=str, metavar="<URL>", nargs="*",
+ "--sites", type=str, metavar="<URL>", nargs="*",
help="specify benchmark website")
add_replay_args(subparsers["run"])
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 9bc1dd66d5..b98a92d266 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -193,9 +193,9 @@ consts_misc = [
'value': 'ScopeInfo::kVariablePartIndex' },
{ 'name': 'jsarray_buffer_was_neutered_mask',
- 'value': 'JSArrayBuffer::WasNeutered::kMask' },
+ 'value': 'JSArrayBuffer::WasNeuteredBit::kMask' },
{ 'name': 'jsarray_buffer_was_neutered_shift',
- 'value': 'JSArrayBuffer::WasNeutered::kShift' },
+ 'value': 'JSArrayBuffer::WasNeuteredBit::kShift' },
{ 'name': 'context_idx_scope_info',
'value': 'Context::SCOPE_INFO_INDEX' },
@@ -250,7 +250,9 @@ extras_accessors = [
'FixedArray, data, uintptr_t, kHeaderSize',
'FixedTypedArrayBase, external_pointer, Object, kExternalPointerOffset',
'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
- 'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
+ 'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
+ 'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
+ 'JSArrayBufferView, byte_offset, size_t, kByteOffsetOffset',
'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_size_in_words, char, kInstanceSizeInWordsOffset',
'Map, inobject_properties_start_or_constructor_function_index, char, kInObjectPropertiesStartOrConstructorFunctionIndexOffset',
@@ -275,6 +277,7 @@ extras_accessors = [
'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
+ 'String, length, int32_t, kLengthOffset',
];
#
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index ca3b0079ca..7ff52dd740 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -34,7 +34,6 @@ AUTO_EXCLUDE = [
'src/compiler/js-context-specialization.h',
'src/compiler/raw-machine-assembler.h',
'src/dateparser-inl.h',
- 'src/heap/incremental-marking.h',
'src/ic/ic.h',
'src/lookup.h',
'src/parsing/parser.h',
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 60215857c0..34689316af 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -1715,9 +1715,9 @@ class V8Heap(object):
"EXTERNAL_SYMBOL_TYPE": ExternalString,
"EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
"EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
- "SHORT_EXTERNAL_SYMBOL_TYPE": ExternalString,
- "SHORT_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
- "SHORT_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
+ "UNCACHED_EXTERNAL_SYMBOL_TYPE": ExternalString,
+ "UNCACHED_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+ "UNCACHED_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
"STRING_TYPE": SeqString,
"ONE_BYTE_STRING_TYPE": SeqString,
"CONS_STRING_TYPE": ConsString,
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 4ede5e9a45..63b99aae7e 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -70,10 +70,10 @@ const CATEGORIES = new Map([
'ONE_BYTE_STRING_TYPE',
'OTHER_CONTEXT_TYPE',
'PROPERTY_ARRAY_TYPE',
- 'SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE',
- 'SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
- 'SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE',
- 'SHORT_EXTERNAL_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_STRING_TYPE',
'SLICED_ONE_BYTE_STRING_TYPE',
'SLICED_STRING_TYPE',
'STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE',
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
index 26b9d6a72f..332c6e2d7d 100755
--- a/deps/v8/tools/node/fetch_deps.py
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -32,12 +32,15 @@ GCLIENT_SOLUTION = [
"v8/test/mozilla/data" : None,
"v8/test/test262/data" : None,
"v8/test/test262/harness" : None,
+ "v8/third_party/android_ndk" : None,
"v8/third_party/android_tools" : None,
"v8/third_party/catapult" : None,
"v8/third_party/colorama/src" : None,
+ "v8/third_party/fuchsia-sdk" : None,
"v8/third_party/instrumented_libraries" : None,
"v8/tools/luci-go" : None,
"v8/tools/swarming_client" : None,
+ "v8/third_party/qemu-linux-x64" : None,
},
},
]
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
index 759e9d5aac..fb3c2a0aec 100755
--- a/deps/v8/tools/node/update_node.py
+++ b/deps/v8/tools/node/update_node.py
@@ -34,17 +34,12 @@ import node_common
TARGET_SUBDIR = os.path.join("deps", "v8")
SUB_REPOSITORIES = [ ["base", "trace_event", "common"],
- ["third_party", "googletest", "src"],
- ["third_party", "jinja2"],
- ["third_party", "markupsafe"] ]
+ ["third_party", "googletest", "src"] ]
DELETE_FROM_GITIGNORE = [ "/base",
- "/third_party/googletest/src",
- "/third_party/jinja2",
- "/third_party/markupsafe" ]
+ "/third_party/googletest/src" ]
# Node.js requires only a single header file from gtest to build V8.
-# Both jinja2 and markupsafe are required to generate part of the inspector.
ADD_TO_GITIGNORE = [ "/third_party/googletest/*",
"!/third_party/googletest/BUILD.gn",
"!/third_party/googletest/src",
@@ -55,9 +50,7 @@ ADD_TO_GITIGNORE = [ "/third_party/googletest/*",
"/third_party/googletest/src/googletest/include/*",
"!/third_party/googletest/src/googletest/include/gtest",
"/third_party/googletest/src/googletest/include/gtest/*",
- "!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
- "!/third_party/jinja2",
- "!/third_party/markupsafe" ]
+ "!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h" ]
# Node.js owns deps/v8/gypfiles in their downstream repository.
FILES_TO_KEEP = [ "gypfiles" ]
diff --git a/deps/v8/tools/perf-to-html.py b/deps/v8/tools/perf-to-html.py
deleted file mode 100755
index e3979360a7..0000000000
--- a/deps/v8/tools/perf-to-html.py
+++ /dev/null
@@ -1,381 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-'''
-python %prog
-
-Convert a perf trybot JSON file into a pleasing HTML page. It can read
-from standard input or via the --filename option. Examples:
-
- cat results.json | %prog --title "ia32 results"
- %prog -f results.json -t "ia32 results" -o results.html
-'''
-
-import json
-import math
-from optparse import OptionParser
-import os
-import shutil
-import sys
-import tempfile
-
-PERCENT_CONSIDERED_SIGNIFICANT = 0.5
-PROBABILITY_CONSIDERED_SIGNIFICANT = 0.02
-PROBABILITY_CONSIDERED_MEANINGLESS = 0.05
-
-
-def ComputeZ(baseline_avg, baseline_sigma, mean, n):
- if baseline_sigma == 0:
- return 1000.0;
- return abs((mean - baseline_avg) / (baseline_sigma / math.sqrt(n)))
-
-
-# Values from http://www.fourmilab.ch/rpkp/experiments/analysis/zCalc.html
-def ComputeProbability(z):
- if z > 2.575829: # p 0.005: two sided < 0.01
- return 0
- if z > 2.326348: # p 0.010
- return 0.01
- if z > 2.170091: # p 0.015
- return 0.02
- if z > 2.053749: # p 0.020
- return 0.03
- if z > 1.959964: # p 0.025: two sided < 0.05
- return 0.04
- if z > 1.880793: # p 0.030
- return 0.05
- if z > 1.811910: # p 0.035
- return 0.06
- if z > 1.750686: # p 0.040
- return 0.07
- if z > 1.695397: # p 0.045
- return 0.08
- if z > 1.644853: # p 0.050: two sided < 0.10
- return 0.09
- if z > 1.281551: # p 0.100: two sided < 0.20
- return 0.10
- return 0.20 # two sided p >= 0.20
-
-
-class Result:
- def __init__(self, test_name, count, hasScoreUnits, result, sigma,
- master_result, master_sigma):
- self.result_ = float(result)
- self.sigma_ = float(sigma)
- self.master_result_ = float(master_result)
- self.master_sigma_ = float(master_sigma)
- self.significant_ = False
- self.notable_ = 0
- self.percentage_string_ = ""
- # compute notability and significance.
- try:
- if hasScoreUnits:
- compare_num = 100*self.result_/self.master_result_ - 100
- else:
- compare_num = 100*self.master_result_/self.result_ - 100
- if abs(compare_num) > 0.1:
- self.percentage_string_ = "%3.1f" % (compare_num)
- z = ComputeZ(self.master_result_, self.master_sigma_, self.result_, count)
- p = ComputeProbability(z)
- if p < PROBABILITY_CONSIDERED_SIGNIFICANT:
- self.significant_ = True
- if compare_num >= PERCENT_CONSIDERED_SIGNIFICANT:
- self.notable_ = 1
- elif compare_num <= -PERCENT_CONSIDERED_SIGNIFICANT:
- self.notable_ = -1
- except ZeroDivisionError:
- self.percentage_string_ = "NaN"
- self.significant_ = True
-
- def result(self):
- return self.result_
-
- def sigma(self):
- return self.sigma_
-
- def master_result(self):
- return self.master_result_
-
- def master_sigma(self):
- return self.master_sigma_
-
- def percentage_string(self):
- return self.percentage_string_;
-
- def isSignificant(self):
- return self.significant_
-
- def isNotablyPositive(self):
- return self.notable_ > 0
-
- def isNotablyNegative(self):
- return self.notable_ < 0
-
-
-class Benchmark:
- def __init__(self, name, data):
- self.name_ = name
- self.tests_ = {}
- for test in data:
- # strip off "<name>/" prefix, allowing for subsequent "/"s
- test_name = test.split("/", 1)[1]
- self.appendResult(test_name, data[test])
-
- # tests is a dictionary of Results
- def tests(self):
- return self.tests_
-
- def SortedTestKeys(self):
- keys = self.tests_.keys()
- keys.sort()
- t = "Total"
- if t in keys:
- keys.remove(t)
- keys.append(t)
- return keys
-
- def name(self):
- return self.name_
-
- def appendResult(self, test_name, test_data):
- with_string = test_data["result with patch "]
- data = with_string.split()
- master_string = test_data["result without patch"]
- master_data = master_string.split()
- runs = int(test_data["runs"])
- units = test_data["units"]
- hasScoreUnits = units == "score"
- self.tests_[test_name] = Result(test_name,
- runs,
- hasScoreUnits,
- data[0], data[2],
- master_data[0], master_data[2])
-
-
-class BenchmarkRenderer:
- def __init__(self, output_file):
- self.print_output_ = []
- self.output_file_ = output_file
-
- def Print(self, str_data):
- self.print_output_.append(str_data)
-
- def FlushOutput(self):
- string_data = "\n".join(self.print_output_)
- print_output = []
- if self.output_file_:
- # create a file
- with open(self.output_file_, "w") as text_file:
- text_file.write(string_data)
- else:
- print(string_data)
-
- def RenderOneBenchmark(self, benchmark):
- self.Print("<h2>")
- self.Print("<a name=\"" + benchmark.name() + "\">")
- self.Print(benchmark.name() + "</a> <a href=\"#top\">(top)</a>")
- self.Print("</h2>");
- self.Print("<table class=\"benchmark\">")
- self.Print("<thead>")
- self.Print(" <th>Test</th>")
- self.Print(" <th>Result</th>")
- self.Print(" <th>Master</th>")
- self.Print(" <th>%</th>")
- self.Print("</thead>")
- self.Print("<tbody>")
- tests = benchmark.tests()
- for test in benchmark.SortedTestKeys():
- t = tests[test]
- self.Print(" <tr>")
- self.Print(" <td>" + test + "</td>")
- self.Print(" <td>" + str(t.result()) + "</td>")
- self.Print(" <td>" + str(t.master_result()) + "</td>")
- t = tests[test]
- res = t.percentage_string()
- if t.isSignificant():
- res = self.bold(res)
- if t.isNotablyPositive():
- res = self.green(res)
- elif t.isNotablyNegative():
- res = self.red(res)
- self.Print(" <td>" + res + "</td>")
- self.Print(" </tr>")
- self.Print("</tbody>")
- self.Print("</table>")
-
- def ProcessJSONData(self, data, title):
- self.Print("<h1>" + title + "</h1>")
- self.Print("<ul>")
- for benchmark in data:
- if benchmark != "errors":
- self.Print("<li><a href=\"#" + benchmark + "\">" + benchmark + "</a></li>")
- self.Print("</ul>")
- for benchmark in data:
- if benchmark != "errors":
- benchmark_object = Benchmark(benchmark, data[benchmark])
- self.RenderOneBenchmark(benchmark_object)
-
- def bold(self, data):
- return "<b>" + data + "</b>"
-
- def red(self, data):
- return "<font color=\"red\">" + data + "</font>"
-
-
- def green(self, data):
- return "<font color=\"green\">" + data + "</font>"
-
- def PrintHeader(self):
- data = """<html>
-<head>
-<title>Output</title>
-<style type="text/css">
-/*
-Style inspired by Andy Ferra's gist at https://gist.github.com/andyferra/2554919
-*/
-body {
- font-family: Helvetica, arial, sans-serif;
- font-size: 14px;
- line-height: 1.6;
- padding-top: 10px;
- padding-bottom: 10px;
- background-color: white;
- padding: 30px;
-}
-h1, h2, h3, h4, h5, h6 {
- margin: 20px 0 10px;
- padding: 0;
- font-weight: bold;
- -webkit-font-smoothing: antialiased;
- cursor: text;
- position: relative;
-}
-h1 {
- font-size: 28px;
- color: black;
-}
-
-h2 {
- font-size: 24px;
- border-bottom: 1px solid #cccccc;
- color: black;
-}
-
-h3 {
- font-size: 18px;
-}
-
-h4 {
- font-size: 16px;
-}
-
-h5 {
- font-size: 14px;
-}
-
-h6 {
- color: #777777;
- font-size: 14px;
-}
-
-p, blockquote, ul, ol, dl, li, table, pre {
- margin: 15px 0;
-}
-
-li p.first {
- display: inline-block;
-}
-
-ul, ol {
- padding-left: 30px;
-}
-
-ul :first-child, ol :first-child {
- margin-top: 0;
-}
-
-ul :last-child, ol :last-child {
- margin-bottom: 0;
-}
-
-table {
- padding: 0;
-}
-
-table tr {
- border-top: 1px solid #cccccc;
- background-color: white;
- margin: 0;
- padding: 0;
-}
-
-table tr:nth-child(2n) {
- background-color: #f8f8f8;
-}
-
-table tr th {
- font-weight: bold;
- border: 1px solid #cccccc;
- text-align: left;
- margin: 0;
- padding: 6px 13px;
-}
-table tr td {
- border: 1px solid #cccccc;
- text-align: left;
- margin: 0;
- padding: 6px 13px;
-}
-table tr th :first-child, table tr td :first-child {
- margin-top: 0;
-}
-table tr th :last-child, table tr td :last-child {
- margin-bottom: 0;
-}
-</style>
-</head>
-<body>
-"""
- self.Print(data)
-
- def PrintFooter(self):
- data = """</body>
-</html>
-"""
- self.Print(data)
-
-
-def Render(opts, args):
- if opts.filename:
- with open(opts.filename) as json_data:
- data = json.load(json_data)
- else:
- # load data from stdin
- data = json.load(sys.stdin)
-
- if opts.title:
- title = opts.title
- elif opts.filename:
- title = opts.filename
- else:
- title = "Benchmark results"
- renderer = BenchmarkRenderer(opts.output)
- renderer.PrintHeader()
- renderer.ProcessJSONData(data, title)
- renderer.PrintFooter()
- renderer.FlushOutput()
-
-
-if __name__ == '__main__':
- parser = OptionParser(usage=__doc__)
- parser.add_option("-f", "--filename", dest="filename",
- help="Specifies the filename for the JSON results "
- "rather than reading from stdin.")
- parser.add_option("-t", "--title", dest="title",
- help="Optional title of the web page.")
- parser.add_option("-o", "--output", dest="output",
- help="Write html output to this file rather than stdout.")
-
- (opts, args) = parser.parse_args()
- Render(opts, args)
diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js
index 74b4b3bf66..ef362712fe 100644
--- a/deps/v8/tools/profile.js
+++ b/deps/v8/tools/profile.js
@@ -892,16 +892,24 @@ JsonProfile.prototype.addStaticCode = function(
JsonProfile.prototype.addCode = function(
kind, name, timestamp, start, size) {
+ let codeId = this.codeEntries_.length;
+ // Find out if we have a static code entry for the code. If yes, we will
+ // make sure it is written to the JSON file just once.
+ let staticEntry = this.codeMap_.findAddress(start);
+ if (staticEntry && staticEntry.entry.type === 'CPP') {
+ codeId = staticEntry.entry.codeId;
+ }
+
var entry = new CodeMap.CodeEntry(size, name, 'CODE');
this.codeMap_.addCode(start, entry);
- entry.codeId = this.codeEntries_.length;
- this.codeEntries_.push({
+ entry.codeId = codeId;
+ this.codeEntries_[codeId] = {
name : entry.name,
timestamp: timestamp,
type : entry.type,
kind : kind
- });
+ };
return entry;
};
@@ -975,7 +983,7 @@ JsonProfile.prototype.addSourcePositions = function(
if (!entry) return;
var codeId = entry.codeId;
- // Resolve the inlined fucntions list.
+ // Resolve the inlined functions list.
if (inlinedFunctions.length > 0) {
inlinedFunctions = inlinedFunctions.substring(1).split("S");
for (var i = 0; i < inlinedFunctions.length; i++) {
diff --git a/deps/v8/tools/profview/index.html b/deps/v8/tools/profview/index.html
index 957b6d04d6..8695a41e3a 100644
--- a/deps/v8/tools/profview/index.html
+++ b/deps/v8/tools/profview/index.html
@@ -22,7 +22,7 @@ found in the LICENSE file. -->
Chrome V8 profiling log processor
</h3>
-<input type="file" id="fileinput" />
+<input type="file" id="fileinput" /><div id="source-status"></div>
<br>
<hr>
@@ -59,6 +59,10 @@ found in the LICENSE file. -->
</table>
<div>
Current code object: <span id="timeline-currentCode"></span>
+ <button id="source-viewer-hide-button">Hide source</button>
+ </div>
+ <div>
+ <table id="source-viewer"> </table>
</div>
</div>
diff --git a/deps/v8/tools/profview/profile-utils.js b/deps/v8/tools/profview/profile-utils.js
index f5a85bed8d..4be55893dd 100644
--- a/deps/v8/tools/profview/profile-utils.js
+++ b/deps/v8/tools/profview/profile-utils.js
@@ -93,9 +93,10 @@ function codeEquals(code1, code2, allowDifferentKinds = false) {
function createNodeFromStackEntry(code, codeId, vmState) {
let name = code ? code.name : "UNKNOWN";
-
- return { name, codeId, type : resolveCodeKindAndVmState(code, vmState),
- children : [], ownTicks : 0, ticks : 0 };
+ let node = createEmptyNode(name);
+ node.codeId = codeId;
+ node.type = resolveCodeKindAndVmState(code, vmState);
+ return node;
}
function childIdFromCode(codeId, code) {
@@ -148,29 +149,30 @@ function findNextFrame(file, stack, stackPos, step, filter) {
}
function addOrUpdateChildNode(parent, file, stackIndex, stackPos, ascending) {
- let stack = file.ticks[stackIndex].s;
- let vmState = file.ticks[stackIndex].vm;
- let codeId = stack[stackPos];
- let code = codeId >= 0 ? file.code[codeId] : undefined;
if (stackPos === -1) {
// We reached the end without finding the next step.
// If we are doing top-down call tree, update own ticks.
if (!ascending) {
parent.ownTicks++;
}
- } else {
- console.assert(stackPos >= 0 && stackPos < stack.length);
- // We found a child node.
- let childId = childIdFromCode(codeId, code);
- let child = parent.children[childId];
- if (!child) {
- child = createNodeFromStackEntry(code, codeId, vmState);
- child.delayedExpansion = { frameList : [], ascending };
- parent.children[childId] = child;
- }
- child.ticks++;
- addFrameToFrameList(child.delayedExpansion.frameList, stackIndex, stackPos);
+ return;
+ }
+
+ let stack = file.ticks[stackIndex].s;
+ console.assert(stackPos >= 0 && stackPos < stack.length);
+ let codeId = stack[stackPos];
+ let code = codeId >= 0 ? file.code[codeId] : undefined;
+ // We found a child node.
+ let childId = childIdFromCode(codeId, code);
+ let child = parent.children[childId];
+ if (!child) {
+ let vmState = file.ticks[stackIndex].vm;
+ child = createNodeFromStackEntry(code, codeId, vmState);
+ child.delayedExpansion = { frameList : [], ascending };
+ parent.children[childId] = child;
}
+ child.ticks++;
+ addFrameToFrameList(child.delayedExpansion.frameList, stackIndex, stackPos);
}
// This expands a tree node (direct children only).
@@ -314,13 +316,7 @@ class FunctionListTree {
this.tree = root;
this.categories = categories;
} else {
- this.tree = {
- name : "root",
- codeId: -1,
- children : [],
- ownTicks : 0,
- ticks : 0
- };
+ this.tree = createEmptyNode("root");
this.categories = null;
}
@@ -339,7 +335,7 @@ class FunctionListTree {
let codeId = stack[i];
if (codeId < 0 || this.codeVisited[codeId]) continue;
- let code = codeId >= 0 ? file.code[codeId] : undefined;
+ let code = file.code[codeId];
if (this.filter) {
let type = code ? code.type : undefined;
let kind = code ? code.kind : undefined;
@@ -601,3 +597,15 @@ function computeOptimizationStats(file,
softDeoptimizations,
};
}
+
+function normalizeLeadingWhitespace(lines) {
+ let regex = /^\s*/;
+ let minimumLeadingWhitespaceChars = Infinity;
+ for (let line of lines) {
+ minimumLeadingWhitespaceChars =
+ Math.min(minimumLeadingWhitespaceChars, regex.exec(line)[0].length);
+ }
+ for (let i = 0; i < lines.length; i++) {
+ lines[i] = lines[i].substring(minimumLeadingWhitespaceChars);
+ }
+}
diff --git a/deps/v8/tools/profview/profview.css b/deps/v8/tools/profview/profview.css
index eb77ef14d8..ca39745f4b 100644
--- a/deps/v8/tools/profview/profview.css
+++ b/deps/v8/tools/profview/profview.css
@@ -19,6 +19,10 @@ body {
font-family: 'Roboto', sans-serif;
}
+#source-status {
+ display: inline-block;
+}
+
.tree-row-arrow {
margin-right: 0.2em;
text-align: right;
@@ -35,6 +39,7 @@ body {
.tree-row-name {
margin-left: 0.2em;
+ margin-right: 0.2em;
}
.codeid-link {
@@ -42,6 +47,54 @@ body {
cursor: pointer;
}
+.view-source-link {
+ text-decoration: underline;
+ cursor: pointer;
+ font-size: 10pt;
+ margin-left: 0.6em;
+ color: #555555;
+}
+
+#source-viewer {
+ border: 1px solid black;
+ padding: 0.2em;
+ font-family: 'Roboto Mono', monospace;
+ white-space: pre;
+ margin-top: 1em;
+ margin-bottom: 1em;
+}
+
+#source-viewer td.line-none {
+ background-color: white;
+}
+
+#source-viewer td.line-cold {
+ background-color: #e1f5fe;
+}
+
+#source-viewer td.line-mediumcold {
+ background-color: #b2ebf2;
+}
+
+#source-viewer td.line-mediumhot {
+ background-color: #c5e1a5;
+}
+
+#source-viewer td.line-hot {
+ background-color: #dce775;
+}
+
+#source-viewer td.line-superhot {
+ background-color: #ffee58;
+}
+
+#source-viewer .source-line-number {
+ padding-left: 0.2em;
+ padding-right: 0.2em;
+ color: #003c8f;
+ background-color: #eceff1;
+}
+
div.mode-button {
padding: 1em 3em;
display: inline-block;
diff --git a/deps/v8/tools/profview/profview.js b/deps/v8/tools/profview/profview.js
index e976b00be3..5bd64a49bd 100644
--- a/deps/v8/tools/profview/profview.js
+++ b/deps/v8/tools/profview/profview.js
@@ -8,6 +8,12 @@ function $(id) {
return document.getElementById(id);
}
+function removeAllChildren(element) {
+ while (element.firstChild) {
+ element.removeChild(element.firstChild);
+ }
+}
+
let components;
function createViews() {
components = [
@@ -16,6 +22,7 @@ function createViews() {
new HelpView(),
new SummaryView(),
new ModeBarView(),
+ new ScriptSourceView(),
];
}
@@ -24,6 +31,7 @@ function emptyState() {
file : null,
mode : null,
currentCodeId : null,
+ viewingSource: false,
start : 0,
end : Infinity,
timelineSize : {
@@ -34,7 +42,8 @@ function emptyState() {
attribution : "js-exclude-bc",
categories : "code-type",
sort : "time"
- }
+ },
+ sourceData: null
};
}
@@ -119,11 +128,27 @@ let main = {
}
},
+ updateSources(file) {
+ let statusDiv = $("source-status");
+ if (!file) {
+ statusDiv.textContent = "";
+ return;
+ }
+ if (!file.scripts || file.scripts.length === 0) {
+ statusDiv.textContent =
+ "Script source not available. Run profiler with --log-source-code.";
+ return;
+ }
+ statusDiv.textContent = "Script source is available.";
+ main.currentState.sourceData = new SourceData(file);
+ },
+
setFile(file) {
if (file !== main.currentState.file) {
let lastMode = main.currentState.mode || "summary";
main.currentState = emptyState();
main.currentState.file = file;
+ main.updateSources(file);
main.setMode(lastMode);
main.delayRender();
}
@@ -137,6 +162,14 @@ let main = {
}
},
+ setViewingSource(value) {
+ if (main.currentState.viewingSource !== value) {
+ main.currentState = Object.assign({}, main.currentState);
+ main.currentState.viewingSource = value;
+ main.delayRender();
+ }
+ },
+
onResize() {
main.delayRender();
},
@@ -328,6 +361,20 @@ function createFunctionNode(name, codeId) {
return nameElement;
}
+function createViewSourceNode(codeId) {
+ let linkElement = document.createElement("span");
+ linkElement.appendChild(document.createTextNode("View source"));
+ linkElement.classList.add("view-source-link");
+ linkElement.onclick = (event) => {
+ main.setCurrentCode(codeId);
+ main.setViewingSource(true);
+ // Prevent the click from bubbling to the row and causing it to
+ // collapse/expand.
+ event.stopPropagation();
+ };
+ return linkElement;
+}
+
const COLLAPSED_ARROW = "\u25B6";
const EXPANDED_ARROW = "\u25BC";
@@ -448,6 +495,10 @@ class CallTreeView {
nameCell.appendChild(arrow);
nameCell.appendChild(createTypeNode(node.type));
nameCell.appendChild(createFunctionNode(node.name, node.codeId));
+ if (main.currentState.sourceData &&
+ main.currentState.sourceData.hasSource(node.name)) {
+ nameCell.appendChild(createViewSourceNode(node.codeId));
+ }
// Inclusive ticks cell.
c = row.insertCell();
@@ -793,8 +844,8 @@ class TimelineView {
return;
}
- let width = Math.round(window.innerWidth - 20);
- let height = Math.round(window.innerHeight / 5);
+ let width = Math.round(document.documentElement.clientWidth - 20);
+ let height = Math.round(document.documentElement.clientHeight / 5);
if (oldState) {
if (width === oldState.timelineSize.width &&
@@ -1010,9 +1061,7 @@ class TimelineView {
cell.appendChild(document.createTextNode(" " + desc.text));
}
- while (this.currentCode.firstChild) {
- this.currentCode.removeChild(this.currentCode.firstChild);
- }
+ removeAllChildren(this.currentCode);
if (currentCodeId) {
let currentCode = file.code[currentCodeId];
this.currentCode.appendChild(document.createTextNode(currentCode.name));
@@ -1083,10 +1132,7 @@ class SummaryView {
}
this.element.style.display = "inherit";
-
- while (this.element.firstChild) {
- this.element.removeChild(this.element.firstChild);
- }
+ removeAllChildren(this.element);
let stats = computeOptimizationStats(
this.currentState.file, newState.start, newState.end);
@@ -1237,6 +1283,217 @@ class SummaryView {
}
}
+class ScriptSourceView {
+ constructor() {
+ this.table = $("source-viewer");
+ this.hideButton = $("source-viewer-hide-button");
+ this.hideButton.onclick = () => {
+ main.setViewingSource(false);
+ };
+ }
+
+ render(newState) {
+ let oldState = this.currentState;
+ if (!newState.file || !newState.viewingSource) {
+ this.table.style.display = "none";
+ this.hideButton.style.display = "none";
+ this.currentState = null;
+ return;
+ }
+ if (oldState) {
+ if (newState.file === oldState.file &&
+ newState.currentCodeId === oldState.currentCodeId &&
+ newState.viewingSource === oldState.viewingSource) {
+ // No change, nothing to do.
+ return;
+ }
+ }
+ this.currentState = newState;
+
+ this.table.style.display = "inline-block";
+ this.hideButton.style.display = "inline";
+ removeAllChildren(this.table);
+
+ let functionId =
+ this.currentState.file.code[this.currentState.currentCodeId].func;
+ let sourceView =
+ this.currentState.sourceData.generateSourceView(functionId);
+ for (let i = 0; i < sourceView.source.length; i++) {
+ let sampleCount = sourceView.lineSampleCounts[i] || 0;
+ let sampleProportion = sourceView.samplesTotal > 0 ?
+ sampleCount / sourceView.samplesTotal : 0;
+ let heatBucket;
+ if (sampleProportion === 0) {
+ heatBucket = "line-none";
+ } else if (sampleProportion < 0.2) {
+ heatBucket = "line-cold";
+ } else if (sampleProportion < 0.4) {
+ heatBucket = "line-mediumcold";
+ } else if (sampleProportion < 0.6) {
+ heatBucket = "line-mediumhot";
+ } else if (sampleProportion < 0.8) {
+ heatBucket = "line-hot";
+ } else {
+ heatBucket = "line-superhot";
+ }
+
+ let row = this.table.insertRow(-1);
+
+ let lineNumberCell = row.insertCell(-1);
+ lineNumberCell.classList.add("source-line-number");
+ lineNumberCell.textContent = i + sourceView.firstLineNumber;
+
+ let sampleCountCell = row.insertCell(-1);
+ sampleCountCell.classList.add(heatBucket);
+ sampleCountCell.textContent = sampleCount;
+
+ let sourceLineCell = row.insertCell(-1);
+ sourceLineCell.classList.add(heatBucket);
+ sourceLineCell.textContent = sourceView.source[i];
+ }
+
+ $("timeline-currentCode").scrollIntoView();
+ }
+}
+
+class SourceData {
+ constructor(file) {
+ this.scripts = new Map();
+ for (let i = 0; i < file.scripts.length; i++) {
+ const scriptBlock = file.scripts[i];
+ if (scriptBlock === null) continue; // Array may be sparse.
+ let source = scriptBlock.source.split("\n");
+ this.scripts.set(i, source);
+ }
+
+ this.functions = new Map();
+ for (let codeId = 0; codeId < file.code.length; ++codeId) {
+ let codeBlock = file.code[codeId];
+ if (codeBlock.source) {
+ let data = this.functions.get(codeBlock.func);
+ if (!data) {
+ data = new FunctionSourceData(codeBlock.source.script,
+ codeBlock.source.start,
+ codeBlock.source.end);
+ this.functions.set(codeBlock.func, data);
+ }
+ data.addSourceBlock(codeId, codeBlock.source);
+ }
+ }
+
+ for (let tick of file.ticks) {
+ let stack = tick.s;
+ for (let i = 0; i < stack.length; i += 2) {
+ let codeId = stack[i];
+ if (codeId < 0) continue;
+ let functionid = file.code[codeId].func;
+ if (this.functions.has(functionId)) {
+ let codeOffset = stack[i + 1];
+ this.functions.get(functionId).addOffsetSample(codeId, codeOffset);
+ }
+ }
+ }
+ }
+
+ getScript(scriptId) {
+ return this.scripts.get(scriptId);
+ }
+
+ getLineForScriptOffset(script, scriptOffset) {
+ let line = 0;
+ let charsConsumed = 0;
+ for (; line < script.length; ++line) {
+ charsConsumed += script[line].length + 1; // Add 1 for newline.
+ if (charsConsumed > scriptOffset) break;
+ }
+ return line;
+ }
+
+ hasSource(functionId) {
+ return this.functions.has(functionId);
+ }
+
+ generateSourceView(functionId) {
+ console.assert(this.hasSource(functionId));
+ let data = this.functions.get(functionId);
+ let scriptId = data.scriptId;
+ let script = this.getScript(scriptId);
+ let firstLineNumber =
+ this.getLineForScriptOffset(script, data.startScriptOffset);
+ let lastLineNumber =
+ this.getLineForScriptOffset(script, data.endScriptOffset);
+ let lines = script.slice(firstLineNumber, lastLineNumber + 1);
+ normalizeLeadingWhitespace(lines);
+
+ let samplesTotal = 0;
+ let lineSampleCounts = [];
+ for (let [codeId, block] of data.codes) {
+ block.offsets.forEach((sampleCount, codeOffset) => {
+ let sourceOffset = block.positionTable.getScriptOffset(codeOffset);
+ let lineNumber =
+ this.getLineForScriptOffset(script, sourceOffset) - firstLineNumber;
+ samplesTotal += sampleCount;
+ lineSampleCounts[lineNumber] =
+ (lineSampleCounts[lineNumber] || 0) + sampleCount;
+ });
+ }
+
+ return {
+ source: lines,
+ lineSampleCounts: lineSampleCounts,
+ samplesTotal: samplesTotal,
+ firstLineNumber: firstLineNumber + 1 // Source code is 1-indexed.
+ };
+ }
+}
+
+class FunctionSourceData {
+ constructor(scriptId, startScriptOffset, endScriptOffset) {
+ this.scriptId = scriptId;
+ this.startScriptOffset = startScriptOffset;
+ this.endScriptOffset = endScriptOffset;
+
+ this.codes = new Map();
+ }
+
+ addSourceBlock(codeId, source) {
+ this.codes.set(codeId, {
+ positionTable: new SourcePositionTable(source.positions),
+ offsets: []
+ });
+ }
+
+ addOffsetSample(codeId, codeOffset) {
+ let codeIdOffsets = this.codes.get(codeId).offsets;
+ codeIdOffsets[codeOffset] = (codeIdOffsets[codeOffset] || 0) + 1;
+ }
+}
+
+class SourcePositionTable {
+ constructor(encodedTable) {
+ this.offsetTable = [];
+ let offsetPairRegex = /C([0-9]+)O([0-9]+)/g;
+ while (true) {
+ let regexResult = offsetPairRegex.exec(encodedTable);
+ if (!regexResult) break;
+ let codeOffset = parseInt(regexResult[1]);
+ let scriptOffset = parseInt(regexResult[2]);
+ if (isNaN(codeOffset) || isNaN(scriptOffset)) continue;
+ this.offsetTable.push(codeOffset, scriptOffset);
+ }
+ }
+
+ getScriptOffset(codeOffset) {
+ console.assert(codeOffset >= 0);
+ for (let i = this.offsetTable.length - 2; i >= 0; i -= 2) {
+ if (this.offsetTable[i] <= codeOffset) {
+ return this.offsetTable[i + 1];
+ }
+ }
+ return this.offsetTable[1];
+ }
+}
+
class HelpView {
constructor() {
this.element = $("help");
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index e5c2114b1a..ffa5c2a0ca 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -222,7 +222,7 @@ class LandBranch(Step):
self.GitUpload(author=self._options.author,
force=True,
bypass_hooks=True,
- private=True)
+ no_autocc=True)
cmd = "cl land --bypass-hooks -f"
if self._options.dry_run:
print "Dry run. Command:\ngit %s" % cmd
diff --git a/deps/v8/tools/release/filter_build_files.py b/deps/v8/tools/release/filter_build_files.py
index 7d3f22138a..9cc6607108 100755
--- a/deps/v8/tools/release/filter_build_files.py
+++ b/deps/v8/tools/release/filter_build_files.py
@@ -35,7 +35,7 @@ SUPPLEMENTARY_FILES = [
LIBRARY_FILES = {
'android': ['*.a', '*.so'],
'linux': ['*.a', '*.so'],
- 'mac': ['*.a', '*.so'],
+ 'mac': ['*.a', '*.so', '*.dylib'],
'win': ['*.lib', '*.dll'],
}
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index 9deaee891b..a002f4211c 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -206,8 +206,8 @@ class GitRecipesMixin(object):
self.Git(MakeArgs(args), **kwargs)
def GitUpload(self, reviewer="", author="", force=False, cq=False,
- cq_dry_run=False, bypass_hooks=False, cc="", private=False,
- tbr_reviewer="", **kwargs):
+ cq_dry_run=False, bypass_hooks=False, cc="", tbr_reviewer="",
+ no_autocc=False, **kwargs):
args = ["cl upload --send-mail"]
if author:
args += ["--email", Quoted(author)]
@@ -223,11 +223,11 @@ class GitRecipesMixin(object):
args.append("--cq-dry-run")
if bypass_hooks:
args.append("--bypass-hooks")
+ if no_autocc:
+ args.append("--no-autocc")
if cc:
args += ["--cc", Quoted(cc)]
args += ["--gerrit"]
- if private:
- args += ["--private"]
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
self.Git(MakeArgs(args), pipe=False, **kwargs)
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 565b2b7c8f..f3dc400e58 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -934,7 +934,7 @@ TBR=reviewer@chromium.org"""
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git cl upload --send-mail --email \"author@chromium.org\" "
- "-f --bypass-hooks --gerrit --private", ""),
+ "-f --bypass-hooks --no-autocc --gerrit", ""),
Cmd("git cl land --bypass-hooks -f", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
diff --git a/deps/v8/tools/run-clang-tidy.py b/deps/v8/tools/run-clang-tidy.py
new file mode 100755
index 0000000000..11826f19b1
--- /dev/null
+++ b/deps/v8/tools/run-clang-tidy.py
@@ -0,0 +1,420 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import multiprocessing
+import optparse
+import os
+import re
+import subprocess
+import sys
+
+CLANG_TIDY_WARNING = re.compile(r'(\/.*?)\ .*\[(.*)\]$')
+CLANG_TIDY_CMDLINE_OUT = re.compile(r'^clang-tidy.*\ .*|^\./\.\*')
+FILE_REGEXS = ['../src/*', '../test/*']
+HEADER_REGEX = ['\.\.\/src\/.*|\.\.\/include\/.*|\.\.\/test\/.*']
+
+THREADS = multiprocessing.cpu_count()
+
+
+class ClangTidyWarning(object):
+ """
+ Wraps up a clang-tidy warning to present aggregated information.
+ """
+
+ def __init__(self, warning_type):
+ self.warning_type = warning_type
+ self.occurrences = set()
+
+ def add_occurrence(self, file_path):
+ self.occurrences.add(file_path.lstrip())
+
+ def __hash__(self):
+ return hash(self.warning_type)
+
+ def to_string(self, file_loc):
+ s = '[%s] #%d\n' % (self.warning_type, len(self.occurrences))
+ if file_loc:
+ s += ' ' + '\n '.join(self.occurrences)
+ s += '\n'
+ return s
+
+ def __str__(self):
+ return self.to_string(False)
+
+ def __lt__(self, other):
+ return len(self.occurrences) < len(other.occurrences)
+
+
+def GenerateCompileCommands(build_folder):
+ """
+ Generate a compilation database.
+
+ Currently clang-tidy-4 does not understand all flags that are passed
+ by the build system, therefore, we remove them from the generated file.
+ """
+ ninja_ps = subprocess.Popen(
+ ['ninja', '-t', 'compdb', 'cxx', 'cc'],
+ stdout=subprocess.PIPE,
+ cwd=build_folder)
+
+ out_filepath = os.path.join(build_folder, 'compile_commands.json')
+ with open(out_filepath, 'w') as cc_file:
+ while True:
+ line = ninja_ps.stdout.readline()
+
+ if line == '':
+ break
+
+ line = line.replace('-fcomplete-member-pointers', '')
+ line = line.replace('-Wno-enum-compare-switch', '')
+ line = line.replace('-Wno-ignored-pragma-optimize', '')
+ line = line.replace('-Wno-null-pointer-arithmetic', '')
+ line = line.replace('-Wno-unused-lambda-capture', '')
+ cc_file.write(line)
+
+
+def skip_line(line):
+ """
+ Check if a clang-tidy output line should be skipped.
+ """
+ return bool(CLANG_TIDY_CMDLINE_OUT.search(line))
+
+
+def ClangTidyRunFull(build_folder, skip_output_filter, checks, auto_fix):
+ """
+ Run clang-tidy on the full codebase and print warnings.
+ """
+ extra_args = []
+ if auto_fix:
+ extra_args.append('-fix')
+
+ if checks is not None:
+ extra_args.append('-checks')
+ extra_args.append('-*, ' + checks)
+
+ with open(os.devnull, 'w') as DEVNULL:
+ ct_process = subprocess.Popen(
+ ['run-clang-tidy', '-j' + str(THREADS), '-p', '.']
+ + ['-header-filter'] + HEADER_REGEX + extra_args
+ + FILE_REGEXS,
+ cwd=build_folder,
+ stdout=subprocess.PIPE,
+ stderr=DEVNULL)
+ removing_check_header = False
+ empty_lines = 0
+
+ while True:
+ line = ct_process.stdout.readline()
+ if line == '':
+ break
+
+ # Skip all lines after Enbale checks and before two newlines,
+ # i.e., skip clang-tidy check list.
+ if line.startswith('Enabled checks'):
+ removing_check_header = True
+ if removing_check_header and not skip_output_filter:
+ if line == '\n':
+ empty_lines += 1
+ if empty_lines == 2:
+ removing_check_header = False
+ continue
+
+ # Different lines get removed to ease output reading.
+ if not skip_output_filter and skip_line(line):
+ continue
+
+ # Print line, because no filter was matched.
+ if line != '\n':
+ sys.stdout.write(line)
+
+
+def ClangTidyRunAggregate(build_folder, print_files):
+ """
+ Run clang-tidy on the full codebase and aggregate warnings into categories.
+ """
+ with open(os.devnull, 'w') as DEVNULL:
+ ct_process = subprocess.Popen(
+ ['run-clang-tidy', '-j' + str(THREADS), '-p', '.'] +
+ ['-header-filter'] + HEADER_REGEX +
+ FILE_REGEXS,
+ cwd=build_folder,
+ stdout=subprocess.PIPE,
+ stderr=DEVNULL)
+ warnings = dict()
+ while True:
+ line = ct_process.stdout.readline()
+ if line == '':
+ break
+
+ res = CLANG_TIDY_WARNING.search(line)
+ if res is not None:
+ warnings.setdefault(
+ res.group(2),
+ ClangTidyWarning(res.group(2))).add_occurrence(res.group(1))
+
+ for warning in sorted(warnings.values(), reverse=True):
+ sys.stdout.write(warning.to_string(print_files))
+
+
+def ClangTidyRunDiff(build_folder, diff_branch, auto_fix):
+ """
+ Run clang-tidy on the diff between current and the diff_branch.
+ """
+ if diff_branch is None:
+ diff_branch = subprocess.check_output(['git', 'merge-base',
+ 'HEAD', 'origin/master']).strip()
+
+ git_ps = subprocess.Popen(
+ ['git', 'diff', '-U0', diff_branch], stdout=subprocess.PIPE)
+
+ extra_args = []
+ if auto_fix:
+ extra_args.append('-fix')
+
+ with open(os.devnull, 'w') as DEVNULL:
+ """
+ The script `clang-tidy-diff` does not provide support to add header-
+ filters. To still analyze headers we use the build path option `-path` to
+ inject out header-filter option. This works because the script just adds
+ the passed path string to the commandline of clang-tidy.
+ """
+ modified_build_folder = build_folder
+ modified_build_folder += ' -header-filter='
+ modified_build_folder += '\'' + ''.join(HEADER_REGEX) + '\''
+
+ ct_ps = subprocess.Popen(
+ ['clang-tidy-diff.py', '-path', modified_build_folder, '-p1'] +
+ extra_args,
+ stdin=git_ps.stdout,
+ stdout=subprocess.PIPE,
+ stderr=DEVNULL)
+ git_ps.wait()
+ while True:
+ line = ct_ps.stdout.readline()
+ if line == '':
+ break
+
+ if skip_line(line):
+ continue
+
+ sys.stdout.write(line)
+
+
+def rm_prefix(string, prefix):
+ """
+ Removes prefix from a string until the new string
+ no longer starts with the prefix.
+ """
+ while string.startswith(prefix):
+ string = string[len(prefix):]
+ return string
+
+
+def ClangTidyRunSingleFile(build_folder, filename_to_check, auto_fix,
+ line_ranges=[]):
+ """
+ Run clang-tidy on a single file.
+ """
+ files_with_relative_path = []
+
+ compdb_filepath = os.path.join(build_folder, 'compile_commands.json')
+ with open(compdb_filepath) as raw_json_file:
+ compdb = json.load(raw_json_file)
+
+ for db_entry in compdb:
+ if db_entry['file'].endswith(filename_to_check):
+ files_with_relative_path.append(db_entry['file'])
+
+ with open(os.devnull, 'w') as DEVNULL:
+ for file_with_relative_path in files_with_relative_path:
+ line_filter = None
+ if len(line_ranges) != 0:
+ line_filter = '['
+ line_filter += '{ \"lines\":[' + ', '.join(line_ranges)
+ line_filter += '], \"name\":\"'
+ line_filter += rm_prefix(file_with_relative_path,
+ '../') + '\"}'
+ line_filter += ']'
+
+ extra_args = ['-line-filter=' + line_filter] if line_filter else []
+
+ if auto_fix:
+ extra_args.append('-fix')
+
+ subprocess.call(['clang-tidy', '-p', '.'] +
+ extra_args +
+ [file_with_relative_path],
+ cwd=build_folder,
+ stderr=DEVNULL)
+
+
+def CheckClangTidy():
+ """
+ Checks if a clang-tidy binary exists.
+ """
+ with open(os.devnull, 'w') as DEVNULL:
+ return subprocess.call(['which', 'clang-tidy'], stdout=DEVNULL) == 0
+
+
+def CheckCompDB(build_folder):
+ """
+ Checks if a compilation database exists in the build_folder.
+ """
+ return os.path.isfile(os.path.join(build_folder, 'compile_commands.json'))
+
+
+def DetectBuildFolder():
+ """
+ Tries to auto detect the last used build folder in out/
+ """
+ outdirs_folder = 'out/'
+ last_used = None
+ last_timestamp = -1
+ for outdir in [outdirs_folder + folder_name
+ for folder_name in os.listdir(outdirs_folder)
+ if os.path.isdir(outdirs_folder + folder_name)]:
+ outdir_modified_timestamp = os.path.getmtime(outdir)
+ if outdir_modified_timestamp > last_timestamp:
+ last_timestamp = outdir_modified_timestamp
+ last_used = outdir
+
+ return last_used
+
+
+def GetOptions():
+ """
+ Generate the option parser for this script.
+ """
+ result = optparse.OptionParser()
+ result.add_option(
+ '-b',
+ '--build-folder',
+ help='Set V8 build folder',
+ dest='build_folder',
+ default=None)
+ result.add_option(
+ '-j',
+ help='Set the amount of threads that should be used',
+ dest='threads',
+ default=None)
+ result.add_option(
+ '--gen-compdb',
+ help='Generate a compilation database for clang-tidy',
+ default=False,
+ action='store_true')
+ result.add_option(
+ '--no-output-filter',
+ help='Done use any output filterning',
+ default=False,
+ action='store_true')
+ result.add_option(
+ '--fix',
+ help='Fix auto fixable issues',
+ default=False,
+ dest='auto_fix',
+ action='store_true'
+ )
+
+ # Full clang-tidy.
+ full_run_g = optparse.OptionGroup(result, 'Clang-tidy full', '')
+ full_run_g.add_option(
+ '--full',
+ help='Run clang-tidy on the whole codebase',
+ default=False,
+ action='store_true')
+ full_run_g.add_option('--checks',
+ help='Clang-tidy checks to use.',
+ default=None)
+ result.add_option_group(full_run_g)
+
+ # Aggregate clang-tidy.
+ agg_run_g = optparse.OptionGroup(result, 'Clang-tidy aggregate', '')
+ agg_run_g.add_option('--aggregate', help='Run clang-tidy on the whole '\
+ 'codebase and aggregate the warnings',
+ default=False, action='store_true')
+ agg_run_g.add_option('--show-loc', help='Show file locations when running '\
+ 'in aggregate mode', default=False,
+ action='store_true')
+ result.add_option_group(agg_run_g)
+
+ # Diff clang-tidy.
+ diff_run_g = optparse.OptionGroup(result, 'Clang-tidy diff', '')
+ diff_run_g.add_option('--branch', help='Run clang-tidy on the diff '\
+ 'between HEAD and the merge-base between HEAD '\
+ 'and DIFF_BRANCH (origin/master by default).',
+ default=None, dest='diff_branch')
+ result.add_option_group(diff_run_g)
+
+ # Single clang-tidy.
+ single_run_g = optparse.OptionGroup(result, 'Clang-tidy single', '')
+ single_run_g.add_option(
+ '--single', help='', default=False, action='store_true')
+ single_run_g.add_option(
+ '--file', help='File name to check', default=None, dest='file_name')
+ single_run_g.add_option('--lines', help='Limit checks to a line range. '\
+ 'For example: --lines="[2,4], [5,6]"',
+ default=[], dest='line_ranges')
+
+ result.add_option_group(single_run_g)
+ return result
+
+
+def main():
+ parser = GetOptions()
+ (options, _) = parser.parse_args()
+
+ if options.threads is not None:
+ global THREADS
+ THREADS = options.threads
+
+ if options.build_folder is None:
+ options.build_folder = DetectBuildFolder()
+
+ if not CheckClangTidy():
+ print 'Could not find clang-tidy'
+ elif options.build_folder is None or not os.path.isdir(options.build_folder):
+ print 'Please provide a build folder with -b'
+ elif options.gen_compdb:
+ GenerateCompileCommands(options.build_folder)
+ elif not CheckCompDB(options.build_folder):
+ print 'Could not find compilation database, ' \
+ 'please generate it with --gen-compdb'
+ else:
+ print 'Using build folder:', options.build_folder
+ if options.full:
+ print 'Running clang-tidy - full'
+ ClangTidyRunFull(options.build_folder,
+ options.no_output_filter,
+ options.checks,
+ options.auto_fix)
+ elif options.aggregate:
+ print 'Running clang-tidy - aggregating warnings'
+ if options.auto_fix:
+ print 'Auto fix not working in aggregate mode, running without.'
+ ClangTidyRunAggregate(options.build_folder, options.show_loc)
+ elif options.single:
+ print 'Running clang-tidy - single on ' + options.file_name
+ if options.file_name is not None:
+ line_ranges = []
+ for match in re.findall(r'(\[.*?\])', options.line_ranges):
+ if match is not []:
+ line_ranges.append(match)
+ ClangTidyRunSingleFile(options.build_folder,
+ options.file_name,
+ options.auto_fix,
+ line_ranges)
+ else:
+ print 'Filename provided, please specify a filename with --file'
+ else:
+ print 'Running clang-tidy'
+ ClangTidyRunDiff(options.build_folder,
+ options.diff_branch,
+ options.auto_fix)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 310bd8a008..67861db3ea 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -780,7 +780,9 @@ class AndroidPlatform(Platform): # pragma: no cover
except android.CommandFailedException as e:
logging.info(title % "Stdout" + "\n%s", e.output)
raise
- except android.TimeoutException:
+ except android.TimeoutException as e:
+ if e.output:
+ logging.info(title % "Stdout" + "\n%s", e.output)
logging.warning(">>> Test timed out after %ss.", runnable.timeout)
stdout = ""
if runnable.process_size:
diff --git a/deps/v8/tools/sanitizers/tsan_suppressions.txt b/deps/v8/tools/sanitizers/tsan_suppressions.txt
index 270340e484..839636c8ce 100644
--- a/deps/v8/tools/sanitizers/tsan_suppressions.txt
+++ b/deps/v8/tools/sanitizers/tsan_suppressions.txt
@@ -4,3 +4,7 @@
# Incorrectly detected lock cycles in test-lockers
# https://code.google.com/p/thread-sanitizer/issues/detail?id=81
deadlock:LockAndUnlockDifferentIsolatesThread::Run
+
+# Data race in a third party lib
+# https://bugs.chromium.org/p/v8/issues/detail?id=8110
+race:IndianCalendar::fgSystemDefaultCenturyStartYear
diff --git a/deps/v8/tools/test262-results-parser.js b/deps/v8/tools/test262-results-parser.js
new file mode 100644
index 0000000000..379436e3f0
--- /dev/null
+++ b/deps/v8/tools/test262-results-parser.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Run the test runner and dump a json file. Use this script to pass
+// the json file and return a list of failing tests that can be copied
+// to test262.status.
+//
+// Usage:
+//
+// Run the test runner to generate the results:
+// $ tools/run-tests.py --gn test262 --json-test-results=tools/.test262-results.json
+//
+// Run this script to print the formatted results:
+// $ node tools/test262-results-parser.js .test262-results.json
+//
+// Note: The json results file generated by the test runner should be
+// in the tools/ directly, which is the same dir as this script.
+
+var fs = require('fs'),
+ path = require('path');
+
+function main() {
+ if (process.argv.length === 2) {
+ throw new Error('File name required as first arg.');
+ }
+
+ var fileName = process.argv[2],
+ fullPath = path.join(__dirname, fileName),
+ results = require(fullPath)[0].results,
+ tests = new Set();
+ for (let result of results) {
+ let [_, ...test] = result.name.split('/');
+ tests.add(` '${test.join('/')}': [FAIL],`);
+ }
+
+
+ [...tests].sort().forEach(i => console.log(i));
+}
+
+main();
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 481cc10134..fcb2202f8a 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -73,13 +73,12 @@ TEST_MAP = {
],
# This needs to stay in sync with test/d8_default.isolate.
"d8_default": [
- # TODO(machenbach): uncomment after infra side lands.
- #"debugger",
+ "debugger",
"mjsunit",
"webkit",
- #"message",
- #"preparser",
- #"intl",
+ "message",
+ "preparser",
+ "intl",
],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
@@ -312,6 +311,9 @@ class BaseTestRunner(object):
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite", default="v8tests",
help="The testsuite name in the JUnit output file")
+ parser.add_option("--exit-after-n-failures", type="int", default=100,
+ help="Exit after the first N failures instead of "
+ "running all tests. Pass 0 to disable this feature.")
# Rerun
parser.add_option("--rerun-failures-count", default=0, type=int,
@@ -745,6 +747,9 @@ class BaseTestRunner(object):
self.mode_options.execution_mode))
return procs
+ def _create_result_tracker(self, options):
+ return progress.ResultsTracker(options.exit_after_n_failures)
+
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
diff --git a/deps/v8/tools/testrunner/local/android.py b/deps/v8/tools/testrunner/local/android.py
index fb25bb5a17..707614f095 100644
--- a/deps/v8/tools/testrunner/local/android.py
+++ b/deps/v8/tools/testrunner/local/android.py
@@ -18,8 +18,9 @@ DEVICE_DIR = '/data/local/tmp/v8/'
class TimeoutException(Exception):
- def __init__(self, timeout):
+ def __init__(self, timeout, output=None):
self.timeout = timeout
+ self.output = output
class CommandFailedException(Exception):
@@ -170,8 +171,8 @@ class _Driver(object):
return '\n'.join(output)
except device_errors.AdbCommandFailedError as e:
raise CommandFailedException(e.status, e.output)
- except device_errors.CommandTimeoutError:
- raise TimeoutException(timeout)
+ except device_errors.CommandTimeoutError as e:
+ raise TimeoutException(timeout, e.output)
if logcat_file:
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index 302d568e87..d176323d02 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -211,7 +211,7 @@ class AndroidCommand(BaseCommand):
"""
self.shell_name = os.path.basename(shell)
self.shell_dir = os.path.dirname(shell)
- self.files_to_push = resources_func()
+ self.files_to_push = (resources_func or (lambda: []))()
# Make all paths in arguments relative and also prepare files from arguments
# for pushing to the device.
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 7c9a250bc3..9735f8ea66 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -26,6 +26,9 @@ def setup_testing():
from threading import Thread as Process
# Monkeypatch threading Queue to look like multiprocessing Queue.
Queue.cancel_join_thread = lambda self: None
+ # Monkeypatch os.kill and add fake pid property on Thread.
+ os.kill = lambda *args: None
+ Process.pid = property(lambda self: None)
class NormalResult():
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
index b55dd2102d..3b76541604 100755
--- a/deps/v8/tools/testrunner/num_fuzzer.py
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -138,7 +138,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
fuzzer_rng = random.Random(options.fuzzer_random_seed)
combiner = self._create_combiner(fuzzer_rng, options)
- results = ResultsTracker()
+ results = self._create_result_tracker(options)
execproc = ExecutionProc(options.j)
sigproc = self._create_signal_proc()
indicators = self._create_progress_indicators(options)
diff --git a/deps/v8/tools/testrunner/objects/predictable.py b/deps/v8/tools/testrunner/objects/predictable.py
index 48279d625c..52d14ea460 100644
--- a/deps/v8/tools/testrunner/objects/predictable.py
+++ b/deps/v8/tools/testrunner/objects/predictable.py
@@ -45,5 +45,4 @@ class OutProc(outproc_base.BaseOutProc):
class PredictableFilterProc(testproc_base.TestProcFilter):
def _filter(self, test):
- return (statusfile.FAIL in test.expected_outcomes or
- test.output_proc.negative)
+ return test.skip_predictable()
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 7416590dd0..de8bc561eb 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -37,6 +37,21 @@ from ..local import utils
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+# Patterns for additional resource files on Android. Files that are not covered
+# by one of the other patterns below will be specified in the resources section.
+RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
+# Pattern to auto-detect files to push on Android for statements like:
+# load("path/to/file.js")
+LOAD_PATTERN = re.compile(
+ r"(?:load|readbuffer|read)\((?:'|\")([^'\"]*)(?:'|\")\)")
+# Pattern to auto-detect files to push on Android for statements like:
+# import "path/to/file.js"
+MODULE_RESOURCES_PATTERN_1 = re.compile(
+ r"(?:import|export)(?:\(| )(?:'|\")([^'\"]*)(?:'|\")")
+# Pattern to auto-detect files to push on Android for statements like:
+# import foobar from "path/to/file.js"
+MODULE_RESOURCES_PATTERN_2 = re.compile(
+ r"(?:import|export).*from (?:'|\")([^'\"]*)(?:'|\")")
class TestCase(object):
@@ -143,7 +158,10 @@ class TestCase(object):
def get_command(self):
params = self._get_cmd_params()
env = self._get_cmd_env()
- shell, shell_flags = self._get_shell_with_flags()
+ shell = self.get_shell()
+ if utils.IsWindows():
+ shell += '.exe'
+ shell_flags = self._get_shell_flags()
timeout = self._get_timeout(params)
return self._create_cmd(shell, shell_flags + params, env, timeout)
@@ -207,14 +225,8 @@ class TestCase(object):
def _get_suite_flags(self):
return []
- def _get_shell_with_flags(self):
- shell = self.get_shell()
- shell_flags = []
- if shell == 'd8':
- shell_flags.append('--test')
- if utils.IsWindows():
- shell += '.exe'
- return shell, shell_flags
+ def _get_shell_flags(self):
+ return []
def _get_timeout(self, params):
timeout = self._test_config.timeout
@@ -228,7 +240,7 @@ class TestCase(object):
return timeout
def get_shell(self):
- return 'd8'
+ raise NotImplementedError()
def _get_suffix(self):
return '.js'
@@ -269,6 +281,10 @@ class TestCase(object):
"""
return []
+ def skip_predictable(self):
+ """Returns True if the test case is not suitable for predictable testing."""
+ return True
+
@property
def output_proc(self):
if self.expected_outcomes is outproc.OUTCOMES_PASS:
@@ -285,3 +301,57 @@ class TestCase(object):
def __str__(self):
return self.suite.name + '/' + self.name
+
+
+class D8TestCase(TestCase):
+ def get_shell(self):
+ return "d8"
+
+ def _get_shell_flags(self):
+ return ['--test']
+
+ def _get_resources_for_file(self, file):
+ """Returns for a given file a list of absolute paths of files needed by the
+ given file.
+ """
+ with open(file) as f:
+ source = f.read()
+ result = []
+ def add_path(path):
+ result.append(os.path.abspath(path.replace('/', os.path.sep)))
+ for match in RESOURCES_PATTERN.finditer(source):
+ # There are several resources per line. Relative to base dir.
+ for path in match.group(1).strip().split():
+ add_path(path)
+ for match in LOAD_PATTERN.finditer(source):
+ # Files in load statements are relative to base dir.
+ add_path(match.group(1))
+ for match in MODULE_RESOURCES_PATTERN_1.finditer(source):
+ # Imported files are relative to the file importing them.
+ add_path(os.path.join(os.path.dirname(file), match.group(1)))
+ for match in MODULE_RESOURCES_PATTERN_2.finditer(source):
+ # Imported files are relative to the file importing them.
+ add_path(os.path.join(os.path.dirname(file), match.group(1)))
+ return result
+
+ def _get_resources(self):
+ """Returns the list of files needed by a test case."""
+ if not self._get_source_path():
+ return []
+ result = set()
+ to_check = [self._get_source_path()]
+ # Recurse over all files until reaching a fixpoint.
+ while to_check:
+ next_resource = to_check.pop()
+ result.add(next_resource)
+ for resource in self._get_resources_for_file(next_resource):
+ # Only add files that exist on disc. The pattens we check for give some
+ # false positives otherwise.
+ if resource not in result and os.path.exists(resource):
+ to_check.append(resource)
+ return sorted(list(result))
+
+ def skip_predictable(self):
+ """Returns True if the test case is not suitable for predictable testing."""
+ return (statusfile.FAIL in self.expected_outcomes or
+ self.output_proc.negative)
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index bd5df9b482..bf7d3f133d 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -282,7 +282,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
print '>>> Running with test processors'
loader = LoadProc()
tests_counter = TestsCounter()
- results = ResultsTracker()
+ results = self._create_result_tracker(options)
indicators = self._create_progress_indicators(options)
outproc_factory = None
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 221c64bfdd..50b7307e1c 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -32,13 +32,15 @@ class TestsCounter(base.TestProcObserver):
class ResultsTracker(base.TestProcObserver):
- def __init__(self):
+ """Tracks number of results and stops to run tests if max_failures reached."""
+ def __init__(self, max_failures):
super(ResultsTracker, self).__init__()
self._requirement = base.DROP_OUTPUT
self.failed = 0
self.remaining = 0
self.total = 0
+ self.max_failures = max_failures
def _on_next_test(self, test):
self.total += 1
@@ -48,6 +50,9 @@ class ResultsTracker(base.TestProcObserver):
self.remaining -= 1
if result.has_unexpected_output:
self.failed += 1
+ if self.max_failures and self.failed >= self.max_failures:
+ print '>>> Too many failures, exiting...'
+ self.stop()
class ProgressIndicator(base.TestProcObserver):
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 3470d2f3c3..aac432ef41 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -11,24 +11,77 @@ import sys
import re
from subprocess import Popen, PIPE
+def preprocess(input):
+ input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
+ input = re.sub(r'(\)\s*\:\s*\S+\s+)labels\s+',
+ r'\1,\n/*_LABELS_HOLD_*/ ', input)
+ input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
+ input = re.sub(r'(\s+)typeswitch\s*\(', r'\1/*_TYPE*/switch (', input)
+ input = re.sub(r'(\s+)case\s*\(([^\s]+)\s+\:\s*([^\:]+)\)(\s*)\:',
+ r'\1case \3: /*_TSV\2:*/', input)
+ input = re.sub(r'(\s+)case\s*\(([^\:]+)\)(\s*)\:',
+ r'\1case \2: /*_TSX*/', input)
+ input = re.sub(r'\sgenerates\s+\'([^\']+)\'\s*',
+ r' _GeNeRaTeS00_/*\1@*/', input)
+ input = re.sub(r'\sconstexpr\s+\'([^\']+)\'\s*',
+ r' _CoNsExP_/*\1@*/', input)
+ input = re.sub(r'\notherwise',
+ r'\n otherwise', input)
+ input = re.sub(r'(\n\s*\S[^\n]*\s)otherwise',
+ r'\1_OtheSaLi', input)
+ return input
+
+def postprocess(output):
+ output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
+ output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
+ output = re.sub(r',([\n ]*)\/\*_LABELS_HOLD_\*\/', r'\1labels', output)
+ output = re.sub(r'\/\*_OPE \'([^\']+)\'\*\/', r"operator '\1'", output)
+ output = re.sub(r'\/\*_TYPE\*\/(\s*)switch', r'typeswitch', output)
+ output = re.sub(r'case ([^\:]+)\:\s*\/\*_TSX\*\/',
+ r'case (\1):', output)
+ output = re.sub(r'case ([^\:]+)\:\s*\/\*_TSV([^\:]+)\:\*\/',
+ r'case (\2: \1):', output)
+ output = re.sub(r'\n_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
+ r"\n generates '\1'", output)
+ output = re.sub(r'_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
+ r"generates '\1'", output)
+ output = re.sub(r'_CoNsExP_\s*\/\*([^@]+)@\*\/',
+ r"constexpr '\1'", output)
+ output = re.sub(r'\n(\s+)otherwise',
+ r"\n\1 otherwise", output)
+ output = re.sub(r'\n(\s+)_OtheSaLi',
+ r"\n\1otherwise", output)
+ output = re.sub(r'_OtheSaLi',
+ r"otherwise", output)
+ return output
+
if len(sys.argv) < 2 or len(sys.argv) > 3:
print "invalid number of arguments"
sys.exit(-1)
use_stdout = True
-if len(sys.argv) == 3 and sys.argv[1] == '-i':
- use_stdout = False
+lint = False
+if len(sys.argv) == 3:
+ if sys.argv[1] == '-i':
+ use_stdout = False
+ if sys.argv[1] == '-l':
+ lint = True
filename = sys.argv[len(sys.argv) - 1]
with open(filename, 'r') as content_file:
content = content_file.read()
+original_input = content
p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
-output, err = p.communicate(content)
+output, err = p.communicate(preprocess(content))
+output = postprocess(output)
rc = p.returncode
if (rc <> 0):
sys.exit(rc);
-if use_stdout:
+if lint:
+ if (output != original_input):
+ print >>sys.stderr, filename + ' requires formatting'
+elif use_stdout:
print output
else:
output_file = open(filename, 'w')
diff --git a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
index ece8527c2d..d1d43e5dcb 100644
--- a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
+++ b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
@@ -30,7 +30,7 @@
},
{
"name": "support.function.torque",
- "match": "\\b(min|max|assert|check|debug|unreachable)\\b"
+ "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
},
{
"name": "support.variable.torque",
@@ -65,7 +65,7 @@
},
{
"name": "keyword.other.torque",
- "match": "\\b(constexpr|module|macro|builtin|runtime|javascript|implicit|deferred|cast|convert|label|labels|tail|isnt|is|let|generates|type|extends|extern|const)\\b"
+ "match": "\\b(constexpr|module|macro|builtin|runtime|javascript|implicit|deferred|label|labels|tail|let|generates|type|extends|extern|const|typeswitch|case)\\b"
},
{
"name": "keyword.operator.torque",
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index 58035efcc0..c6dc394389 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -18,6 +18,7 @@ BOTS = {
'--nexus7': 'v8_nexus7_perf_try',
'--nexus10': 'v8_nexus10_perf_try',
'--pixel2': 'v8_pixel2_perf_try',
+ '--nokia1': 'v8_nokia1_perf_try',
}
# This list will contain builder names that should be triggered on an internal
@@ -25,6 +26,7 @@ BOTS = {
SWARMING_BOTS = [
'v8_linux64_perf_try',
'v8_pixel2_perf_try',
+ 'v8_nokia1_perf_try',
]
DEFAULT_BOTS = [
diff --git a/deps/v8/tools/turbolizer/README.md b/deps/v8/tools/turbolizer/README.md
index 01d5099c3e..293f4a20a6 100644
--- a/deps/v8/tools/turbolizer/README.md
+++ b/deps/v8/tools/turbolizer/README.md
@@ -10,8 +10,8 @@ the '--trace-turbo' command-line flag.
Turbolizer is build using npm:
- npm i
- npm run-script build
+ npm i
+ npm run-script build
Afterwards, turbolizer can be hosted locally by starting a web server that serve
the contents of the turbolizer directory, e.g.:
diff --git a/deps/v8/tools/turbolizer/rollup.config.js b/deps/v8/tools/turbolizer/rollup.config.js
index 844e6e186d..bb34555a7d 100644
--- a/deps/v8/tools/turbolizer/rollup.config.js
+++ b/deps/v8/tools/turbolizer/rollup.config.js
@@ -6,8 +6,7 @@ import typescript from 'rollup-plugin-typescript2';
import node from 'rollup-plugin-node-resolve';
export default {
- entry: "src/turbo-visualizer.ts",
- format: "iife",
+ input: "src/turbo-visualizer.ts",
plugins: [node(), typescript({abortOnError:false})],
- dest: "build/turbolizer.js"
+ output: {file: "build/turbolizer.js", format: "iife", sourcemap: true}
};
diff --git a/deps/v8/tools/turbolizer/src/graphmultiview.ts b/deps/v8/tools/turbolizer/src/graphmultiview.ts
index 66d1103e38..f9e7efb58c 100644
--- a/deps/v8/tools/turbolizer/src/graphmultiview.ts
+++ b/deps/v8/tools/turbolizer/src/graphmultiview.ts
@@ -4,6 +4,7 @@
import {GraphView} from "./graph-view.js"
import {ScheduleView} from "./schedule-view.js"
+import {SequenceView} from "./sequence-view.js"
import {SourceResolver} from "./source-resolver.js"
import {SelectionBroker} from "./selection-broker.js"
import {View, PhaseView} from "./view.js"
@@ -13,6 +14,7 @@ export class GraphMultiView extends View {
selectionBroker: SelectionBroker;
graph: GraphView;
schedule: ScheduleView;
+ sequence: SequenceView;
selectMenu: HTMLSelectElement;
currentPhaseView: View & PhaseView;
@@ -36,6 +38,7 @@ export class GraphMultiView extends View {
this.graph = new GraphView(id, selectionBroker,
(phaseName) => view.displayPhaseByName(phaseName));
this.schedule = new ScheduleView(id, selectionBroker);
+ this.sequence = new SequenceView(id, selectionBroker);
this.selectMenu = (<HTMLSelectElement>document.getElementById('display-selector'));
}
@@ -69,6 +72,8 @@ export class GraphMultiView extends View {
this.displayPhaseView(this.graph, phase.data);
} else if (phase.type == 'schedule') {
this.displayPhaseView(this.schedule, phase);
+ } else if (phase.type == 'sequence') {
+ this.displayPhaseView(this.sequence, phase);
}
}
diff --git a/deps/v8/tools/turbolizer/src/sequence-view.ts b/deps/v8/tools/turbolizer/src/sequence-view.ts
new file mode 100644
index 0000000000..afddb56649
--- /dev/null
+++ b/deps/v8/tools/turbolizer/src/sequence-view.ts
@@ -0,0 +1,235 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {Sequence} from "./source-resolver.js"
+import {isIterable} from "./util.js"
+import {PhaseView} from "./view.js"
+import {TextView} from "./text-view.js"
+
+export class SequenceView extends TextView implements PhaseView {
+ sequence: Sequence;
+ search_info: Array<any>;
+
+ createViewElement() {
+ const pane = document.createElement('div');
+ pane.setAttribute('id', "sequence");
+ return pane;
+ }
+
+ constructor(parentId, broker) {
+ super(parentId, broker, null);
+ }
+
+ attachSelection(s) {
+ const view = this;
+ if (!(s instanceof Set)) return;
+ view.selectionHandler.clear();
+ view.blockSelectionHandler.clear();
+ const selected = new Array();
+ for (const key of s) selected.push(key);
+ view.selectionHandler.select(selected, true);
+ }
+
+ detachSelection() {
+ this.blockSelection.clear();
+ return this.selection.detachSelection();
+ }
+
+ initializeContent(data, rememberedSelection) {
+ this.divNode.innerHTML = '';
+ this.sequence = data.sequence;
+ this.search_info = [];
+ this.addBlocks(this.sequence.blocks);
+ this.attachSelection(rememberedSelection);
+ }
+
+ elementForBlock(block) {
+ const view = this;
+ function createElement(tag: string, cls: string | Array<string>, content?: string) {
+ const el = document.createElement(tag);
+ if (isIterable(cls)) {
+ for (const c of cls) el.classList.add(c);
+ } else {
+ el.classList.add(cls);
+ }
+ if (content != undefined) el.innerHTML = content;
+ return el;
+ }
+
+ function mkLinkHandler(id, handler) {
+ return function (e) {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ handler.clear();
+ }
+ handler.select(["" + id], true);
+ };
+ }
+
+ function mkBlockLinkHandler(blockId) {
+ return mkLinkHandler(blockId, view.blockSelectionHandler);
+ }
+
+ function mkOperandLinkHandler(text) {
+ return mkLinkHandler(text, view.selectionHandler);
+ }
+
+ function elementForOperand(operand, search_info) {
+ var text = operand.text;
+ const operandEl = createElement("div", ["parameter", "tag", "clickable", operand.type], text);
+ if (operand.tooltip) {
+ operandEl.setAttribute("title", operand.tooltip);
+ }
+ operandEl.onclick = mkOperandLinkHandler(text);
+ search_info.push(text);
+ view.addHtmlElementForNodeId(text, operandEl);
+ return operandEl;
+ }
+
+ function elementForInstruction(instruction, search_info) {
+ const instNodeEl = createElement("div", "instruction-node");
+
+ const inst_id = createElement("div", "instruction-id", instruction.id);
+ instNodeEl.appendChild(inst_id);
+
+ const instContentsEl = createElement("div", "instruction-contents");
+ instNodeEl.appendChild(instContentsEl);
+
+ // Print gap moves.
+ const gapEl = createElement("div", "gap", "gap");
+ instContentsEl.appendChild(gapEl);
+ for (const gap of instruction.gaps) {
+ const moves = createElement("div", ["comma-sep-list", "gap-move"]);
+ for (const move of gap) {
+ const moveEl = createElement("div", "move");
+ const destinationEl = elementForOperand(move[0], search_info);
+ moveEl.appendChild(destinationEl);
+ const assignEl = createElement("div", "assign", "=");
+ moveEl.appendChild(assignEl);
+ const sourceEl = elementForOperand(move[1], search_info);
+ moveEl.appendChild(sourceEl);
+ moves.appendChild(moveEl);
+ }
+ gapEl.appendChild(moves);
+ }
+
+ const instEl = createElement("div", "instruction");
+ instContentsEl.appendChild(instEl);
+
+ if (instruction.outputs.length > 0) {
+ const outputs = createElement("div", ["comma-sep-list", "input-output-list"]);
+ for (const output of instruction.outputs) {
+ const outputEl = elementForOperand(output, search_info);
+ outputs.appendChild(outputEl);
+ }
+ instEl.appendChild(outputs);
+ const assignEl = createElement("div", "assign", "=");
+ instEl.appendChild(assignEl);
+ }
+
+ var text = instruction.opcode + instruction.flags;
+ const inst_label = createElement("div", "node-label", text);
+ search_info.push(text);
+ view.addHtmlElementForNodeId(text, inst_label);
+ instEl.appendChild(inst_label);
+
+ if (instruction.inputs.length > 0) {
+ const inputs = createElement("div", ["comma-sep-list", "input-output-list"]);
+ for (const input of instruction.inputs) {
+ const inputEl = elementForOperand(input, search_info);
+ inputs.appendChild(inputEl);
+ }
+ instEl.appendChild(inputs);
+ }
+
+ if (instruction.temps.length > 0) {
+ const temps = createElement("div", ["comma-sep-list", "input-output-list", "temps"]);
+ for (const temp of instruction.temps) {
+ const tempEl = elementForOperand(temp, search_info);
+ temps.appendChild(tempEl);
+ }
+ instEl.appendChild(temps);
+ }
+
+ return instNodeEl;
+ }
+
+ const sequence_block = createElement("div", "schedule-block");
+
+ const block_id = createElement("div", ["block-id", "com", "clickable"], block.id);
+ block_id.onclick = mkBlockLinkHandler(block.id);
+ sequence_block.appendChild(block_id);
+ const block_pred = createElement("div", ["predecessor-list", "block-list", "comma-sep-list"]);
+ for (const pred of block.predecessors) {
+ const predEl = createElement("div", ["block-id", "com", "clickable"], pred);
+ predEl.onclick = mkBlockLinkHandler(pred);
+ block_pred.appendChild(predEl);
+ }
+ if (block.predecessors.length > 0) sequence_block.appendChild(block_pred);
+ const phis = createElement("div", "phis");
+ sequence_block.appendChild(phis);
+
+ const phiLabel = createElement("div", "phi-label", "phi:");
+ phis.appendChild(phiLabel);
+
+ const phiContents = createElement("div", "phi-contents");
+ phis.appendChild(phiContents);
+
+ for (const phi of block.phis) {
+ const phiEl = createElement("div", "phi");
+ phiContents.appendChild(phiEl);
+
+ const outputEl = elementForOperand(phi.output, this.search_info);
+ phiEl.appendChild(outputEl);
+
+ const assignEl = createElement("div", "assign", "=");
+ phiEl.appendChild(assignEl);
+
+ for (const input of phi.operands) {
+ const inputEl = createElement("div", ["parameter", "tag", "clickable"], input);
+ phiEl.appendChild(inputEl);
+ }
+ }
+
+ const instructions = createElement("div", "instructions");
+ for (const instruction of block.instructions) {
+ instructions.appendChild(elementForInstruction(instruction, this.search_info));
+ }
+ sequence_block.appendChild(instructions);
+ const block_succ = createElement("div", ["successor-list", "block-list", "comma-sep-list"]);
+ for (const succ of block.successors) {
+ const succEl = createElement("div", ["block-id", "com", "clickable"], succ);
+ succEl.onclick = mkBlockLinkHandler(succ);
+ block_succ.appendChild(succEl);
+ }
+ if (block.successors.length > 0) sequence_block.appendChild(block_succ);
+ this.addHtmlElementForBlockId(block.id, sequence_block);
+ return sequence_block;
+ }
+
+ addBlocks(blocks) {
+ for (const block of blocks) {
+ const blockEl = this.elementForBlock(block);
+ this.divNode.appendChild(blockEl);
+ }
+ }
+
+ searchInputAction(searchBar, e) {
+ e.stopPropagation();
+ this.selectionHandler.clear();
+ const query = searchBar.value;
+ if (query.length == 0) return;
+ const select = [];
+ window.sessionStorage.setItem("lastSearch", query);
+ const reg = new RegExp(query);
+ for (const item of this.search_info) {
+ if (reg.exec(item) != null) {
+ select.push(item);
+ }
+ }
+ this.selectionHandler.select(select, true);
+ }
+
+ onresize() { }
+}
diff --git a/deps/v8/tools/turbolizer/src/source-resolver.ts b/deps/v8/tools/turbolizer/src/source-resolver.ts
index b2412d3e31..20f1f5070a 100644
--- a/deps/v8/tools/turbolizer/src/source-resolver.ts
+++ b/deps/v8/tools/turbolizer/src/source-resolver.ts
@@ -76,6 +76,10 @@ export interface Schedule {
nodes: Array<any>;
}
+export interface Sequence {
+ blocks: Array<any>;
+}
+
export class SourceResolver {
nodePositionMap: Array<AnyPosition>;
sources: Array<Source>;
@@ -383,7 +387,10 @@ export class SourceResolver {
if (phase.type == 'disassembly') {
this.disassemblyPhase = phase;
} else if (phase.type == 'schedule') {
- this.phases.push(this.parseSchedule(phase))
+ this.phases.push(this.parseSchedule(phase));
+ this.phaseNames.set(phase.name, this.phases.length);
+ } else if (phase.type == 'sequence') {
+ this.phases.push(this.parseSequence(phase));
this.phaseNames.set(phase.name, this.phases.length);
} else if (phase.type == 'instructions') {
if (phase.nodeIdToInstructionRange) {
@@ -525,4 +532,8 @@ export class SourceResolver {
phase.schedule = state;
return phase;
}
+ parseSequence(phase) {
+ phase.sequence = { blocks: phase.blocks };
+ return phase;
+ }
}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index fcc68ee371..c7d45a7ee2 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -485,6 +485,11 @@ text {
margin-left: -80px;
}
+#sequence {
+ font-family: monospace;
+ margin-top: 50px;
+}
+
#schedule {
font-family: monospace;
margin-top: 50px;
@@ -579,6 +584,52 @@ text {
display: inline;
}
+.instruction * {
+ padding-right: .5ex;
+}
+
+.phi-label, .instruction-id {
+ display: inline-block;
+ padding-right: .5ex;
+ padding-left: .5ex;
+ min-width: 1ex;
+ vertical-align: top;
+}
+
+.instruction-id:after {
+ content: ":";
+}
+
+.instruction-node, .gap, .instruction {
+ display: block;
+}
+
+.phi-contents, .instruction-contents, .gap *, .instruction * {
+ display: inline-block;
+}
+
+.phi * {
+ padding-right: 1ex;
+ display: inline-block;
+}
+
+.gap .gap-move {
+ padding-left: .5ex;
+ padding-right: .5ex;
+}
+
+.gap > *:before {
+ content: "(";
+}
+
+.gap > *:after {
+ content: ")";
+}
+
+.parameter.constant {
+ outline: 1px dotted red;
+}
+
.clickable:hover {
text-decoration: underline;
}
@@ -601,4 +652,13 @@ text {
.comma-sep-list > *:last-child {
padding-right: 0ex;
-} \ No newline at end of file
+}
+
+.temps:before {
+ content: "temps: ";
+}
+
+.temps {
+ padding-left: .5ex;
+ outline: 1px dotted grey;
+}
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 4fb6aaff13..4eb9feeac6 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -684,5 +684,28 @@ class SystemTest(unittest.TestCase):
self.assertIn('sweet/bananas', result.stdout)
self.assertEqual(1, result.returncode, result)
+ def testExitAfterNFailures(self):
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--exit-after-n-failures=2',
+ '-j1',
+ 'sweet/mangoes', # PASS
+ 'sweet/strawberries', # FAIL
+ 'sweet/blackberries', # FAIL
+ 'sweet/raspberries', # should not run
+ )
+ self.assertIn('Running 4 base tests', result.stdout, result)
+ self.assertIn('sweet/mangoes: pass', result.stdout, result)
+ self.assertIn('sweet/strawberries: FAIL', result.stdout, result)
+ self.assertIn('Too many failures, exiting...', result.stdout, result)
+ self.assertIn('sweet/blackberries: FAIL', result.stdout, result)
+ self.assertNotIn('Done running sweet/raspberries', result.stdout, result)
+ self.assertIn('2 tests failed', result.stdout, result)
+ self.assertIn('3 tests ran', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index e889ecabce..9ae985c3dc 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -4,13 +4,14 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
+ "--test",
"strawberries",
"--random-seed=123",
"--nohard-abort"
@@ -20,18 +21,19 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "strawberries --random-seed=123 --nohard-abort\n",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
+ "--test",
"strawberries",
"--random-seed=123",
"--nohard-abort"
@@ -41,18 +43,19 @@
"result": "FAIL",
"run": 2,
"stderr": "",
- "stdout": "strawberries --random-seed=123 --nohard-abort\n",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
+ "--test",
"strawberries",
"--random-seed=123",
"--nohard-abort"
@@ -62,16 +65,17 @@
"result": "FAIL",
"run": 3,
"stderr": "",
- "stdout": "strawberries --random-seed=123 --nohard-abort\n",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
+ "--test",
"strawberries",
"--random-seed=123",
"--nohard-abort"
@@ -80,9 +84,10 @@
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
+ "--test",
"strawberries",
"--random-seed=123",
"--nohard-abort"
@@ -91,9 +96,10 @@
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
+ "--test",
"strawberries",
"--random-seed=123",
"--nohard-abort"
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
index 74214631dc..d823cfd231 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
@@ -6,6 +6,7 @@
[ALWAYS, {
'raspberries': FAIL,
'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
+ 'mangoes': [PASS, SLOW],
# Both cherries and apples are to test how PASS an FAIL from different
# sections are merged.
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
index 1fcf2864b6..bf9c780621 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -12,15 +12,17 @@ from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
def ListTests(self):
return map(
- self._create_test,
- ['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
+ self._create_test, [
+ 'bananas', 'apples', 'cherries', 'mangoes', 'strawberries',
+ 'blackberries', 'raspberries',
+ ],
)
def _test_class(self):
return TestCase
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
def get_shell(self):
return 'd8_mocked.py'
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index 13b4abd86c..f35bd9a2ee 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -100,6 +100,27 @@ def CppLintWorker(command):
' in your $PATH. Lint check skipped.')
process.kill()
+def TorqueLintWorker(command):
+ try:
+ process = subprocess.Popen(command, stderr=subprocess.PIPE)
+ process.wait()
+ out_lines = ""
+ error_count = 0
+ while True:
+ out_line = process.stderr.readline()
+ if out_line == '' and process.poll() != None:
+ break
+ out_lines += out_line
+ error_count += 1
+ sys.stdout.write(out_lines)
+ if error_count != 0:
+ sys.stdout.write("tip: use 'tools/torque/format-torque.py -i <filename>'\n");
+ return error_count
+ except KeyboardInterrupt:
+ process.kill()
+ except:
+ print('Error running format-torque.py')
+ process.kill()
class FileContentsCache(object):
@@ -244,7 +265,7 @@ class CppLintProcessor(SourceFileProcessor):
good_files_cache.Load()
files = good_files_cache.FilterUnchangedFiles(files)
if len(files) == 0:
- print 'No changes in files detected. Skipping cpplint check.'
+ print 'No changes in C/C++ files detected. Skipping cpplint check.'
return True
filters = ",".join([n for n in LINT_RULES])
@@ -270,10 +291,64 @@ class CppLintProcessor(SourceFileProcessor):
good_files_cache.RemoveFile(files[i])
total_errors = sum(results)
- print "Total errors found: %d" % total_errors
+ print "Total C/C++ files found that require formatting: %d" % total_errors
good_files_cache.Save()
return total_errors == 0
+class TorqueFormatProcessor(SourceFileProcessor):
+ """
+ Check .tq files to verify they follow the Torque style guide.
+ """
+
+ def IsRelevant(self, name):
+ return name.endswith('.tq')
+
+ def GetPathsToSearch(self):
+ dirs = ['third-party', 'src']
+ test_dirs = ['torque']
+ return dirs + [join('test', dir) for dir in test_dirs]
+
+ def GetTorquelintScript(self):
+ torque_tools = os.path.join(TOOLS_PATH, "torque")
+ torque_path = os.path.join(torque_tools, "format-torque.py")
+
+ if os.path.isfile(torque_path):
+ return torque_path
+
+ return None
+
+ def ProcessFiles(self, files):
+ good_files_cache = FileContentsCache('.torquelint-cache')
+ good_files_cache.Load()
+ files = good_files_cache.FilterUnchangedFiles(files)
+ if len(files) == 0:
+ print 'No changes in Torque files detected. Skipping Torque lint check.'
+ return True
+
+ torquelint = self.GetTorquelintScript()
+ if torquelint is None:
+ print('Could not find format-torque.')
+ sys.exit(1)
+
+ command = [sys.executable, torquelint, '-l']
+
+ commands = [command + [file] for file in files]
+ count = multiprocessing.cpu_count()
+ pool = multiprocessing.Pool(count)
+ try:
+ results = pool.map_async(TorqueLintWorker, commands).get()
+ except KeyboardInterrupt:
+ print "\nCaught KeyboardInterrupt, terminating workers."
+ sys.exit(1)
+
+ for i in range(len(files)):
+ if results[i] > 0:
+ good_files_cache.RemoveFile(files[i])
+
+ total_errors = sum(results)
+ print "Total Torque files requiring formatting: %d" % total_errors
+ good_files_cache.Save()
+ return total_errors == 0
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
@@ -297,7 +372,7 @@ class SourceProcessor(SourceFileProcessor):
m = pattern.match(line)
if m:
runtime_functions.append(m.group(1))
- if len(runtime_functions) < 450:
+ if len(runtime_functions) < 250:
print ("Runtime functions list is suspiciously short. "
"Consider updating the presubmit script.")
sys.exit(1)
@@ -584,6 +659,8 @@ def Main():
if not options.no_lint:
print "Running C++ lint check..."
success &= CppLintProcessor().RunOnPath(workspace)
+ print "Running Torque formatting check..."
+ success &= TorqueFormatProcessor().RunOnPath(workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success &= SourceProcessor().RunOnPath(workspace)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 16927d85b3..f8c8061ff4 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -12,9 +12,9 @@ INSTANCE_TYPES = {
8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
18: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
- 34: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
- 42: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
- 50: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
+ 34: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
+ 42: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
+ 50: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
64: "STRING_TYPE",
65: "CONS_STRING_TYPE",
66: "EXTERNAL_STRING_TYPE",
@@ -26,9 +26,9 @@ INSTANCE_TYPES = {
75: "SLICED_ONE_BYTE_STRING_TYPE",
77: "THIN_ONE_BYTE_STRING_TYPE",
82: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
- 98: "SHORT_EXTERNAL_STRING_TYPE",
- 106: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE",
- 114: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
+ 98: "UNCACHED_EXTERNAL_STRING_TYPE",
+ 106: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
+ 114: "UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
129: "HEAP_NUMBER_TYPE",
130: "BIGINT_TYPE",
@@ -82,48 +82,50 @@ INSTANCE_TYPES = {
178: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
179: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
180: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
- 181: "ALLOCATION_SITE_TYPE",
- 182: "FIXED_ARRAY_TYPE",
- 183: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 184: "HASH_TABLE_TYPE",
- 185: "ORDERED_HASH_MAP_TYPE",
- 186: "ORDERED_HASH_SET_TYPE",
- 187: "NAME_DICTIONARY_TYPE",
- 188: "GLOBAL_DICTIONARY_TYPE",
- 189: "NUMBER_DICTIONARY_TYPE",
- 190: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 191: "STRING_TABLE_TYPE",
- 192: "EPHEMERON_HASH_TABLE_TYPE",
- 193: "SCOPE_INFO_TYPE",
- 194: "SCRIPT_CONTEXT_TABLE_TYPE",
- 195: "BLOCK_CONTEXT_TYPE",
- 196: "CATCH_CONTEXT_TYPE",
- 197: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 198: "EVAL_CONTEXT_TYPE",
- 199: "FUNCTION_CONTEXT_TYPE",
- 200: "MODULE_CONTEXT_TYPE",
- 201: "NATIVE_CONTEXT_TYPE",
- 202: "SCRIPT_CONTEXT_TYPE",
- 203: "WITH_CONTEXT_TYPE",
- 204: "WEAK_FIXED_ARRAY_TYPE",
- 205: "DESCRIPTOR_ARRAY_TYPE",
- 206: "TRANSITION_ARRAY_TYPE",
- 207: "CALL_HANDLER_INFO_TYPE",
- 208: "CELL_TYPE",
- 209: "CODE_DATA_CONTAINER_TYPE",
- 210: "FEEDBACK_CELL_TYPE",
- 211: "FEEDBACK_VECTOR_TYPE",
- 212: "LOAD_HANDLER_TYPE",
- 213: "PRE_PARSED_SCOPE_DATA_TYPE",
- 214: "PROPERTY_ARRAY_TYPE",
- 215: "PROPERTY_CELL_TYPE",
- 216: "SHARED_FUNCTION_INFO_TYPE",
- 217: "SMALL_ORDERED_HASH_MAP_TYPE",
- 218: "SMALL_ORDERED_HASH_SET_TYPE",
- 219: "STORE_HANDLER_TYPE",
- 220: "UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE",
- 221: "UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE",
- 222: "WEAK_ARRAY_LIST_TYPE",
+ 181: "MICROTASK_QUEUE_TYPE",
+ 182: "ALLOCATION_SITE_TYPE",
+ 183: "FIXED_ARRAY_TYPE",
+ 184: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 185: "HASH_TABLE_TYPE",
+ 186: "ORDERED_HASH_MAP_TYPE",
+ 187: "ORDERED_HASH_SET_TYPE",
+ 188: "NAME_DICTIONARY_TYPE",
+ 189: "GLOBAL_DICTIONARY_TYPE",
+ 190: "NUMBER_DICTIONARY_TYPE",
+ 191: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 192: "STRING_TABLE_TYPE",
+ 193: "EPHEMERON_HASH_TABLE_TYPE",
+ 194: "SCOPE_INFO_TYPE",
+ 195: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 196: "AWAIT_CONTEXT_TYPE",
+ 197: "BLOCK_CONTEXT_TYPE",
+ 198: "CATCH_CONTEXT_TYPE",
+ 199: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 200: "EVAL_CONTEXT_TYPE",
+ 201: "FUNCTION_CONTEXT_TYPE",
+ 202: "MODULE_CONTEXT_TYPE",
+ 203: "NATIVE_CONTEXT_TYPE",
+ 204: "SCRIPT_CONTEXT_TYPE",
+ 205: "WITH_CONTEXT_TYPE",
+ 206: "WEAK_FIXED_ARRAY_TYPE",
+ 207: "DESCRIPTOR_ARRAY_TYPE",
+ 208: "TRANSITION_ARRAY_TYPE",
+ 209: "CALL_HANDLER_INFO_TYPE",
+ 210: "CELL_TYPE",
+ 211: "CODE_DATA_CONTAINER_TYPE",
+ 212: "FEEDBACK_CELL_TYPE",
+ 213: "FEEDBACK_VECTOR_TYPE",
+ 214: "LOAD_HANDLER_TYPE",
+ 215: "PRE_PARSED_SCOPE_DATA_TYPE",
+ 216: "PROPERTY_ARRAY_TYPE",
+ 217: "PROPERTY_CELL_TYPE",
+ 218: "SHARED_FUNCTION_INFO_TYPE",
+ 219: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 220: "SMALL_ORDERED_HASH_SET_TYPE",
+ 221: "STORE_HANDLER_TYPE",
+ 222: "UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE",
+ 223: "UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE",
+ 224: "WEAK_ARRAY_LIST_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -158,18 +160,23 @@ INSTANCE_TYPES = {
1081: "JS_WEAK_SET_TYPE",
1082: "JS_TYPED_ARRAY_TYPE",
1083: "JS_DATA_VIEW_TYPE",
- 1084: "JS_INTL_COLLATOR_TYPE",
- 1085: "JS_INTL_LIST_FORMAT_TYPE",
- 1086: "JS_INTL_LOCALE_TYPE",
- 1087: "JS_INTL_PLURAL_RULES_TYPE",
- 1088: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
- 1089: "WASM_GLOBAL_TYPE",
- 1090: "WASM_INSTANCE_TYPE",
- 1091: "WASM_MEMORY_TYPE",
- 1092: "WASM_MODULE_TYPE",
- 1093: "WASM_TABLE_TYPE",
- 1094: "JS_BOUND_FUNCTION_TYPE",
- 1095: "JS_FUNCTION_TYPE",
+ 1084: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
+ 1085: "JS_INTL_COLLATOR_TYPE",
+ 1086: "JS_INTL_DATE_TIME_FORMAT_TYPE",
+ 1087: "JS_INTL_LIST_FORMAT_TYPE",
+ 1088: "JS_INTL_LOCALE_TYPE",
+ 1089: "JS_INTL_NUMBER_FORMAT_TYPE",
+ 1090: "JS_INTL_PLURAL_RULES_TYPE",
+ 1091: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
+ 1092: "JS_INTL_SEGMENTER_TYPE",
+ 1093: "WASM_EXCEPTION_TYPE",
+ 1094: "WASM_GLOBAL_TYPE",
+ 1095: "WASM_INSTANCE_TYPE",
+ 1096: "WASM_MEMORY_TYPE",
+ 1097: "WASM_MODULE_TYPE",
+ 1098: "WASM_TABLE_TYPE",
+ 1099: "JS_BOUND_FUNCTION_TYPE",
+ 1100: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
@@ -177,139 +184,148 @@ KNOWN_MAPS = {
("RO_SPACE", 0x02201): (138, "FreeSpaceMap"),
("RO_SPACE", 0x02251): (132, "MetaMap"),
("RO_SPACE", 0x022d1): (131, "NullMap"),
- ("RO_SPACE", 0x02341): (205, "DescriptorArrayMap"),
- ("RO_SPACE", 0x023a1): (204, "WeakFixedArrayMap"),
+ ("RO_SPACE", 0x02341): (207, "DescriptorArrayMap"),
+ ("RO_SPACE", 0x023a1): (206, "WeakFixedArrayMap"),
("RO_SPACE", 0x023f1): (152, "OnePointerFillerMap"),
("RO_SPACE", 0x02441): (152, "TwoPointerFillerMap"),
("RO_SPACE", 0x024c1): (131, "UninitializedMap"),
- ("RO_SPACE", 0x02539): (8, "OneByteInternalizedStringMap"),
- ("RO_SPACE", 0x025e1): (131, "UndefinedMap"),
- ("RO_SPACE", 0x02641): (129, "HeapNumberMap"),
- ("RO_SPACE", 0x026c1): (131, "TheHoleMap"),
- ("RO_SPACE", 0x02771): (131, "BooleanMap"),
- ("RO_SPACE", 0x02869): (136, "ByteArrayMap"),
- ("RO_SPACE", 0x028b9): (182, "FixedArrayMap"),
- ("RO_SPACE", 0x02909): (182, "FixedCOWArrayMap"),
- ("RO_SPACE", 0x02959): (184, "HashTableMap"),
- ("RO_SPACE", 0x029a9): (128, "SymbolMap"),
- ("RO_SPACE", 0x029f9): (72, "OneByteStringMap"),
- ("RO_SPACE", 0x02a49): (193, "ScopeInfoMap"),
- ("RO_SPACE", 0x02a99): (216, "SharedFunctionInfoMap"),
- ("RO_SPACE", 0x02ae9): (133, "CodeMap"),
- ("RO_SPACE", 0x02b39): (199, "FunctionContextMap"),
- ("RO_SPACE", 0x02b89): (208, "CellMap"),
- ("RO_SPACE", 0x02bd9): (215, "GlobalPropertyCellMap"),
- ("RO_SPACE", 0x02c29): (135, "ForeignMap"),
- ("RO_SPACE", 0x02c79): (206, "TransitionArrayMap"),
- ("RO_SPACE", 0x02cc9): (211, "FeedbackVectorMap"),
- ("RO_SPACE", 0x02d69): (131, "ArgumentsMarkerMap"),
- ("RO_SPACE", 0x02e11): (131, "ExceptionMap"),
- ("RO_SPACE", 0x02eb9): (131, "TerminationExceptionMap"),
- ("RO_SPACE", 0x02f69): (131, "OptimizedOutMap"),
- ("RO_SPACE", 0x03011): (131, "StaleRegisterMap"),
- ("RO_SPACE", 0x03089): (201, "NativeContextMap"),
- ("RO_SPACE", 0x030d9): (200, "ModuleContextMap"),
- ("RO_SPACE", 0x03129): (198, "EvalContextMap"),
- ("RO_SPACE", 0x03179): (202, "ScriptContextMap"),
- ("RO_SPACE", 0x031c9): (195, "BlockContextMap"),
- ("RO_SPACE", 0x03219): (196, "CatchContextMap"),
- ("RO_SPACE", 0x03269): (203, "WithContextMap"),
- ("RO_SPACE", 0x032b9): (197, "DebugEvaluateContextMap"),
- ("RO_SPACE", 0x03309): (194, "ScriptContextTableMap"),
- ("RO_SPACE", 0x03359): (151, "FeedbackMetadataArrayMap"),
- ("RO_SPACE", 0x033a9): (182, "ArrayListMap"),
- ("RO_SPACE", 0x033f9): (130, "BigIntMap"),
- ("RO_SPACE", 0x03449): (183, "ObjectBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x03499): (137, "BytecodeArrayMap"),
- ("RO_SPACE", 0x034e9): (209, "CodeDataContainerMap"),
- ("RO_SPACE", 0x03539): (150, "FixedDoubleArrayMap"),
- ("RO_SPACE", 0x03589): (188, "GlobalDictionaryMap"),
- ("RO_SPACE", 0x035d9): (210, "ManyClosuresCellMap"),
- ("RO_SPACE", 0x03629): (182, "ModuleInfoMap"),
- ("RO_SPACE", 0x03679): (134, "MutableHeapNumberMap"),
- ("RO_SPACE", 0x036c9): (187, "NameDictionaryMap"),
- ("RO_SPACE", 0x03719): (210, "NoClosuresCellMap"),
- ("RO_SPACE", 0x03769): (189, "NumberDictionaryMap"),
- ("RO_SPACE", 0x037b9): (210, "OneClosureCellMap"),
- ("RO_SPACE", 0x03809): (185, "OrderedHashMapMap"),
- ("RO_SPACE", 0x03859): (186, "OrderedHashSetMap"),
- ("RO_SPACE", 0x038a9): (213, "PreParsedScopeDataMap"),
- ("RO_SPACE", 0x038f9): (214, "PropertyArrayMap"),
- ("RO_SPACE", 0x03949): (207, "SideEffectCallHandlerInfoMap"),
- ("RO_SPACE", 0x03999): (207, "SideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x039e9): (207, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x03a39): (190, "SimpleNumberDictionaryMap"),
- ("RO_SPACE", 0x03a89): (182, "SloppyArgumentsElementsMap"),
- ("RO_SPACE", 0x03ad9): (217, "SmallOrderedHashMapMap"),
- ("RO_SPACE", 0x03b29): (218, "SmallOrderedHashSetMap"),
- ("RO_SPACE", 0x03b79): (191, "StringTableMap"),
- ("RO_SPACE", 0x03bc9): (220, "UncompiledDataWithoutPreParsedScopeMap"),
- ("RO_SPACE", 0x03c19): (221, "UncompiledDataWithPreParsedScopeMap"),
- ("RO_SPACE", 0x03c69): (222, "WeakArrayListMap"),
- ("RO_SPACE", 0x03cb9): (192, "EphemeronHashTableMap"),
- ("RO_SPACE", 0x03d09): (106, "NativeSourceStringMap"),
- ("RO_SPACE", 0x03d59): (64, "StringMap"),
- ("RO_SPACE", 0x03da9): (73, "ConsOneByteStringMap"),
- ("RO_SPACE", 0x03df9): (65, "ConsStringMap"),
- ("RO_SPACE", 0x03e49): (77, "ThinOneByteStringMap"),
- ("RO_SPACE", 0x03e99): (69, "ThinStringMap"),
- ("RO_SPACE", 0x03ee9): (67, "SlicedStringMap"),
- ("RO_SPACE", 0x03f39): (75, "SlicedOneByteStringMap"),
- ("RO_SPACE", 0x03f89): (66, "ExternalStringMap"),
- ("RO_SPACE", 0x03fd9): (82, "ExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04029): (74, "ExternalOneByteStringMap"),
- ("RO_SPACE", 0x04079): (98, "ShortExternalStringMap"),
- ("RO_SPACE", 0x040c9): (114, "ShortExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04119): (0, "InternalizedStringMap"),
- ("RO_SPACE", 0x04169): (2, "ExternalInternalizedStringMap"),
- ("RO_SPACE", 0x041b9): (18, "ExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04209): (10, "ExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x04259): (34, "ShortExternalInternalizedStringMap"),
- ("RO_SPACE", 0x042a9): (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x042f9): (42, "ShortExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x04349): (106, "ShortExternalOneByteStringMap"),
- ("RO_SPACE", 0x04399): (140, "FixedUint8ArrayMap"),
- ("RO_SPACE", 0x043e9): (139, "FixedInt8ArrayMap"),
- ("RO_SPACE", 0x04439): (142, "FixedUint16ArrayMap"),
- ("RO_SPACE", 0x04489): (141, "FixedInt16ArrayMap"),
- ("RO_SPACE", 0x044d9): (144, "FixedUint32ArrayMap"),
- ("RO_SPACE", 0x04529): (143, "FixedInt32ArrayMap"),
- ("RO_SPACE", 0x04579): (145, "FixedFloat32ArrayMap"),
- ("RO_SPACE", 0x045c9): (146, "FixedFloat64ArrayMap"),
- ("RO_SPACE", 0x04619): (147, "FixedUint8ClampedArrayMap"),
- ("RO_SPACE", 0x04669): (149, "FixedBigUint64ArrayMap"),
- ("RO_SPACE", 0x046b9): (148, "FixedBigInt64ArrayMap"),
- ("RO_SPACE", 0x04709): (131, "SelfReferenceMarkerMap"),
- ("RO_SPACE", 0x04771): (171, "Tuple2Map"),
- ("RO_SPACE", 0x04811): (173, "ArrayBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x04b01): (161, "InterceptorInfoMap"),
- ("RO_SPACE", 0x04bf9): (169, "ScriptMap"),
- ("RO_SPACE", 0x09aa1): (154, "AccessorInfoMap"),
- ("RO_SPACE", 0x09af1): (153, "AccessCheckInfoMap"),
- ("RO_SPACE", 0x09b41): (155, "AccessorPairMap"),
- ("RO_SPACE", 0x09b91): (156, "AliasedArgumentsEntryMap"),
- ("RO_SPACE", 0x09be1): (157, "AllocationMementoMap"),
- ("RO_SPACE", 0x09c31): (158, "AsyncGeneratorRequestMap"),
- ("RO_SPACE", 0x09c81): (159, "DebugInfoMap"),
- ("RO_SPACE", 0x09cd1): (160, "FunctionTemplateInfoMap"),
- ("RO_SPACE", 0x09d21): (162, "InterpreterDataMap"),
- ("RO_SPACE", 0x09d71): (163, "ModuleInfoEntryMap"),
- ("RO_SPACE", 0x09dc1): (164, "ModuleMap"),
- ("RO_SPACE", 0x09e11): (165, "ObjectTemplateInfoMap"),
- ("RO_SPACE", 0x09e61): (166, "PromiseCapabilityMap"),
- ("RO_SPACE", 0x09eb1): (167, "PromiseReactionMap"),
- ("RO_SPACE", 0x09f01): (168, "PrototypeInfoMap"),
- ("RO_SPACE", 0x09f51): (170, "StackFrameInfoMap"),
- ("RO_SPACE", 0x09fa1): (172, "Tuple3Map"),
- ("RO_SPACE", 0x09ff1): (174, "WasmDebugInfoMap"),
- ("RO_SPACE", 0x0a041): (175, "WasmExportedFunctionDataMap"),
- ("RO_SPACE", 0x0a091): (176, "CallableTaskMap"),
- ("RO_SPACE", 0x0a0e1): (177, "CallbackTaskMap"),
- ("RO_SPACE", 0x0a131): (178, "PromiseFulfillReactionJobTaskMap"),
- ("RO_SPACE", 0x0a181): (179, "PromiseRejectReactionJobTaskMap"),
- ("RO_SPACE", 0x0a1d1): (180, "PromiseResolveThenableJobTaskMap"),
- ("RO_SPACE", 0x0a221): (181, "AllocationSiteMap"),
- ("RO_SPACE", 0x0a271): (181, "AllocationSiteMap"),
+ ("RO_SPACE", 0x02531): (8, "OneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x025d1): (131, "UndefinedMap"),
+ ("RO_SPACE", 0x02631): (129, "HeapNumberMap"),
+ ("RO_SPACE", 0x026b1): (131, "TheHoleMap"),
+ ("RO_SPACE", 0x02759): (131, "BooleanMap"),
+ ("RO_SPACE", 0x02831): (136, "ByteArrayMap"),
+ ("RO_SPACE", 0x02881): (183, "FixedArrayMap"),
+ ("RO_SPACE", 0x028d1): (183, "FixedCOWArrayMap"),
+ ("RO_SPACE", 0x02921): (185, "HashTableMap"),
+ ("RO_SPACE", 0x02971): (128, "SymbolMap"),
+ ("RO_SPACE", 0x029c1): (72, "OneByteStringMap"),
+ ("RO_SPACE", 0x02a11): (194, "ScopeInfoMap"),
+ ("RO_SPACE", 0x02a61): (218, "SharedFunctionInfoMap"),
+ ("RO_SPACE", 0x02ab1): (133, "CodeMap"),
+ ("RO_SPACE", 0x02b01): (201, "FunctionContextMap"),
+ ("RO_SPACE", 0x02b51): (210, "CellMap"),
+ ("RO_SPACE", 0x02ba1): (217, "GlobalPropertyCellMap"),
+ ("RO_SPACE", 0x02bf1): (135, "ForeignMap"),
+ ("RO_SPACE", 0x02c41): (208, "TransitionArrayMap"),
+ ("RO_SPACE", 0x02c91): (213, "FeedbackVectorMap"),
+ ("RO_SPACE", 0x02d31): (131, "ArgumentsMarkerMap"),
+ ("RO_SPACE", 0x02dd1): (131, "ExceptionMap"),
+ ("RO_SPACE", 0x02e71): (131, "TerminationExceptionMap"),
+ ("RO_SPACE", 0x02f19): (131, "OptimizedOutMap"),
+ ("RO_SPACE", 0x02fb9): (131, "StaleRegisterMap"),
+ ("RO_SPACE", 0x03029): (203, "NativeContextMap"),
+ ("RO_SPACE", 0x03079): (202, "ModuleContextMap"),
+ ("RO_SPACE", 0x030c9): (200, "EvalContextMap"),
+ ("RO_SPACE", 0x03119): (204, "ScriptContextMap"),
+ ("RO_SPACE", 0x03169): (196, "AwaitContextMap"),
+ ("RO_SPACE", 0x031b9): (197, "BlockContextMap"),
+ ("RO_SPACE", 0x03209): (198, "CatchContextMap"),
+ ("RO_SPACE", 0x03259): (205, "WithContextMap"),
+ ("RO_SPACE", 0x032a9): (199, "DebugEvaluateContextMap"),
+ ("RO_SPACE", 0x032f9): (195, "ScriptContextTableMap"),
+ ("RO_SPACE", 0x03349): (151, "FeedbackMetadataArrayMap"),
+ ("RO_SPACE", 0x03399): (183, "ArrayListMap"),
+ ("RO_SPACE", 0x033e9): (130, "BigIntMap"),
+ ("RO_SPACE", 0x03439): (184, "ObjectBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x03489): (137, "BytecodeArrayMap"),
+ ("RO_SPACE", 0x034d9): (211, "CodeDataContainerMap"),
+ ("RO_SPACE", 0x03529): (150, "FixedDoubleArrayMap"),
+ ("RO_SPACE", 0x03579): (189, "GlobalDictionaryMap"),
+ ("RO_SPACE", 0x035c9): (212, "ManyClosuresCellMap"),
+ ("RO_SPACE", 0x03619): (183, "ModuleInfoMap"),
+ ("RO_SPACE", 0x03669): (134, "MutableHeapNumberMap"),
+ ("RO_SPACE", 0x036b9): (188, "NameDictionaryMap"),
+ ("RO_SPACE", 0x03709): (212, "NoClosuresCellMap"),
+ ("RO_SPACE", 0x03759): (190, "NumberDictionaryMap"),
+ ("RO_SPACE", 0x037a9): (212, "OneClosureCellMap"),
+ ("RO_SPACE", 0x037f9): (186, "OrderedHashMapMap"),
+ ("RO_SPACE", 0x03849): (187, "OrderedHashSetMap"),
+ ("RO_SPACE", 0x03899): (215, "PreParsedScopeDataMap"),
+ ("RO_SPACE", 0x038e9): (216, "PropertyArrayMap"),
+ ("RO_SPACE", 0x03939): (209, "SideEffectCallHandlerInfoMap"),
+ ("RO_SPACE", 0x03989): (209, "SideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x039d9): (209, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x03a29): (191, "SimpleNumberDictionaryMap"),
+ ("RO_SPACE", 0x03a79): (183, "SloppyArgumentsElementsMap"),
+ ("RO_SPACE", 0x03ac9): (219, "SmallOrderedHashMapMap"),
+ ("RO_SPACE", 0x03b19): (220, "SmallOrderedHashSetMap"),
+ ("RO_SPACE", 0x03b69): (192, "StringTableMap"),
+ ("RO_SPACE", 0x03bb9): (222, "UncompiledDataWithoutPreParsedScopeMap"),
+ ("RO_SPACE", 0x03c09): (223, "UncompiledDataWithPreParsedScopeMap"),
+ ("RO_SPACE", 0x03c59): (224, "WeakArrayListMap"),
+ ("RO_SPACE", 0x03ca9): (193, "EphemeronHashTableMap"),
+ ("RO_SPACE", 0x03cf9): (106, "NativeSourceStringMap"),
+ ("RO_SPACE", 0x03d49): (64, "StringMap"),
+ ("RO_SPACE", 0x03d99): (73, "ConsOneByteStringMap"),
+ ("RO_SPACE", 0x03de9): (65, "ConsStringMap"),
+ ("RO_SPACE", 0x03e39): (77, "ThinOneByteStringMap"),
+ ("RO_SPACE", 0x03e89): (69, "ThinStringMap"),
+ ("RO_SPACE", 0x03ed9): (67, "SlicedStringMap"),
+ ("RO_SPACE", 0x03f29): (75, "SlicedOneByteStringMap"),
+ ("RO_SPACE", 0x03f79): (66, "ExternalStringMap"),
+ ("RO_SPACE", 0x03fc9): (82, "ExternalStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x04019): (74, "ExternalOneByteStringMap"),
+ ("RO_SPACE", 0x04069): (98, "UncachedExternalStringMap"),
+ ("RO_SPACE", 0x040b9): (114, "UncachedExternalStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x04109): (0, "InternalizedStringMap"),
+ ("RO_SPACE", 0x04159): (2, "ExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x041a9): (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x041f9): (10, "ExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x04249): (34, "UncachedExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x04299): (50, "UncachedExternalInternalizedStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x042e9): (42, "UncachedExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x04339): (106, "UncachedExternalOneByteStringMap"),
+ ("RO_SPACE", 0x04389): (140, "FixedUint8ArrayMap"),
+ ("RO_SPACE", 0x043d9): (139, "FixedInt8ArrayMap"),
+ ("RO_SPACE", 0x04429): (142, "FixedUint16ArrayMap"),
+ ("RO_SPACE", 0x04479): (141, "FixedInt16ArrayMap"),
+ ("RO_SPACE", 0x044c9): (144, "FixedUint32ArrayMap"),
+ ("RO_SPACE", 0x04519): (143, "FixedInt32ArrayMap"),
+ ("RO_SPACE", 0x04569): (145, "FixedFloat32ArrayMap"),
+ ("RO_SPACE", 0x045b9): (146, "FixedFloat64ArrayMap"),
+ ("RO_SPACE", 0x04609): (147, "FixedUint8ClampedArrayMap"),
+ ("RO_SPACE", 0x04659): (149, "FixedBigUint64ArrayMap"),
+ ("RO_SPACE", 0x046a9): (148, "FixedBigInt64ArrayMap"),
+ ("RO_SPACE", 0x046f9): (131, "SelfReferenceMarkerMap"),
+ ("RO_SPACE", 0x04761): (171, "Tuple2Map"),
+ ("RO_SPACE", 0x04801): (173, "ArrayBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x04af1): (161, "InterceptorInfoMap"),
+ ("RO_SPACE", 0x06ea9): (153, "AccessCheckInfoMap"),
+ ("RO_SPACE", 0x06ef9): (154, "AccessorInfoMap"),
+ ("RO_SPACE", 0x06f49): (155, "AccessorPairMap"),
+ ("RO_SPACE", 0x06f99): (156, "AliasedArgumentsEntryMap"),
+ ("RO_SPACE", 0x06fe9): (157, "AllocationMementoMap"),
+ ("RO_SPACE", 0x07039): (158, "AsyncGeneratorRequestMap"),
+ ("RO_SPACE", 0x07089): (159, "DebugInfoMap"),
+ ("RO_SPACE", 0x070d9): (160, "FunctionTemplateInfoMap"),
+ ("RO_SPACE", 0x07129): (162, "InterpreterDataMap"),
+ ("RO_SPACE", 0x07179): (163, "ModuleInfoEntryMap"),
+ ("RO_SPACE", 0x071c9): (164, "ModuleMap"),
+ ("RO_SPACE", 0x07219): (165, "ObjectTemplateInfoMap"),
+ ("RO_SPACE", 0x07269): (166, "PromiseCapabilityMap"),
+ ("RO_SPACE", 0x072b9): (167, "PromiseReactionMap"),
+ ("RO_SPACE", 0x07309): (168, "PrototypeInfoMap"),
+ ("RO_SPACE", 0x07359): (169, "ScriptMap"),
+ ("RO_SPACE", 0x073a9): (170, "StackFrameInfoMap"),
+ ("RO_SPACE", 0x073f9): (172, "Tuple3Map"),
+ ("RO_SPACE", 0x07449): (174, "WasmDebugInfoMap"),
+ ("RO_SPACE", 0x07499): (175, "WasmExportedFunctionDataMap"),
+ ("RO_SPACE", 0x074e9): (176, "CallableTaskMap"),
+ ("RO_SPACE", 0x07539): (177, "CallbackTaskMap"),
+ ("RO_SPACE", 0x07589): (178, "PromiseFulfillReactionJobTaskMap"),
+ ("RO_SPACE", 0x075d9): (179, "PromiseRejectReactionJobTaskMap"),
+ ("RO_SPACE", 0x07629): (180, "PromiseResolveThenableJobTaskMap"),
+ ("RO_SPACE", 0x07679): (181, "MicrotaskQueueMap"),
+ ("RO_SPACE", 0x076c9): (182, "AllocationSiteWithWeakNextMap"),
+ ("RO_SPACE", 0x07719): (182, "AllocationSiteWithoutWeakNextMap"),
+ ("RO_SPACE", 0x07769): (214, "LoadHandler1Map"),
+ ("RO_SPACE", 0x077b9): (214, "LoadHandler2Map"),
+ ("RO_SPACE", 0x07809): (214, "LoadHandler3Map"),
+ ("RO_SPACE", 0x07859): (221, "StoreHandler0Map"),
+ ("RO_SPACE", 0x078a9): (221, "StoreHandler1Map"),
+ ("RO_SPACE", 0x078f9): (221, "StoreHandler2Map"),
+ ("RO_SPACE", 0x07949): (221, "StoreHandler3Map"),
("MAP_SPACE", 0x02201): (1057, "ExternalMap"),
("MAP_SPACE", 0x02251): (1072, "JSMessageObjectMap"),
}
@@ -319,39 +335,39 @@ KNOWN_OBJECTS = {
("RO_SPACE", 0x022a1): "NullValue",
("RO_SPACE", 0x02321): "EmptyDescriptorArray",
("RO_SPACE", 0x02491): "UninitializedValue",
- ("RO_SPACE", 0x025b1): "UndefinedValue",
- ("RO_SPACE", 0x02631): "NanValue",
- ("RO_SPACE", 0x02691): "TheHoleValue",
- ("RO_SPACE", 0x02731): "HoleNanValue",
- ("RO_SPACE", 0x02741): "TrueValue",
- ("RO_SPACE", 0x02801): "FalseValue",
- ("RO_SPACE", 0x02851): "empty_string",
- ("RO_SPACE", 0x02d19): "EmptyScopeInfo",
- ("RO_SPACE", 0x02d29): "EmptyFixedArray",
- ("RO_SPACE", 0x02d39): "ArgumentsMarker",
- ("RO_SPACE", 0x02de1): "Exception",
- ("RO_SPACE", 0x02e89): "TerminationException",
- ("RO_SPACE", 0x02f39): "OptimizedOut",
- ("RO_SPACE", 0x02fe1): "StaleRegister",
- ("RO_SPACE", 0x047d1): "EmptyByteArray",
- ("RO_SPACE", 0x04861): "EmptyFixedUint8Array",
- ("RO_SPACE", 0x04881): "EmptyFixedInt8Array",
- ("RO_SPACE", 0x048a1): "EmptyFixedUint16Array",
- ("RO_SPACE", 0x048c1): "EmptyFixedInt16Array",
- ("RO_SPACE", 0x048e1): "EmptyFixedUint32Array",
- ("RO_SPACE", 0x04901): "EmptyFixedInt32Array",
- ("RO_SPACE", 0x04921): "EmptyFixedFloat32Array",
- ("RO_SPACE", 0x04941): "EmptyFixedFloat64Array",
- ("RO_SPACE", 0x04961): "EmptyFixedUint8ClampedArray",
- ("RO_SPACE", 0x049c1): "EmptySloppyArgumentsElements",
- ("RO_SPACE", 0x049e1): "EmptySlowElementDictionary",
- ("RO_SPACE", 0x04a29): "EmptyOrderedHashMap",
- ("RO_SPACE", 0x04a51): "EmptyOrderedHashSet",
- ("RO_SPACE", 0x04a89): "EmptyPropertyCell",
- ("RO_SPACE", 0x04b69): "InfinityValue",
- ("RO_SPACE", 0x04b79): "MinusZeroValue",
- ("RO_SPACE", 0x04b89): "MinusInfinityValue",
- ("RO_SPACE", 0x04b99): "SelfReferenceMarker",
+ ("RO_SPACE", 0x025a1): "UndefinedValue",
+ ("RO_SPACE", 0x02621): "NanValue",
+ ("RO_SPACE", 0x02681): "TheHoleValue",
+ ("RO_SPACE", 0x02719): "HoleNanValue",
+ ("RO_SPACE", 0x02729): "TrueValue",
+ ("RO_SPACE", 0x027d9): "FalseValue",
+ ("RO_SPACE", 0x02821): "empty_string",
+ ("RO_SPACE", 0x02ce1): "EmptyScopeInfo",
+ ("RO_SPACE", 0x02cf1): "EmptyFixedArray",
+ ("RO_SPACE", 0x02d01): "ArgumentsMarker",
+ ("RO_SPACE", 0x02da1): "Exception",
+ ("RO_SPACE", 0x02e41): "TerminationException",
+ ("RO_SPACE", 0x02ee9): "OptimizedOut",
+ ("RO_SPACE", 0x02f89): "StaleRegister",
+ ("RO_SPACE", 0x047c1): "EmptyByteArray",
+ ("RO_SPACE", 0x04851): "EmptyFixedUint8Array",
+ ("RO_SPACE", 0x04871): "EmptyFixedInt8Array",
+ ("RO_SPACE", 0x04891): "EmptyFixedUint16Array",
+ ("RO_SPACE", 0x048b1): "EmptyFixedInt16Array",
+ ("RO_SPACE", 0x048d1): "EmptyFixedUint32Array",
+ ("RO_SPACE", 0x048f1): "EmptyFixedInt32Array",
+ ("RO_SPACE", 0x04911): "EmptyFixedFloat32Array",
+ ("RO_SPACE", 0x04931): "EmptyFixedFloat64Array",
+ ("RO_SPACE", 0x04951): "EmptyFixedUint8ClampedArray",
+ ("RO_SPACE", 0x049b1): "EmptySloppyArgumentsElements",
+ ("RO_SPACE", 0x049d1): "EmptySlowElementDictionary",
+ ("RO_SPACE", 0x04a19): "EmptyOrderedHashMap",
+ ("RO_SPACE", 0x04a41): "EmptyOrderedHashSet",
+ ("RO_SPACE", 0x04a79): "EmptyPropertyCell",
+ ("RO_SPACE", 0x04b59): "InfinityValue",
+ ("RO_SPACE", 0x04b69): "MinusZeroValue",
+ ("RO_SPACE", 0x04b79): "MinusInfinityValue",
+ ("RO_SPACE", 0x04b89): "SelfReferenceMarker",
("OLD_SPACE", 0x02211): "EmptyScript",
("OLD_SPACE", 0x02291): "ManyClosuresCell",
("OLD_SPACE", 0x022b1): "NoElementsProtector",
@@ -362,6 +378,7 @@ KNOWN_OBJECTS = {
("OLD_SPACE", 0x02361): "StringLengthProtector",
("OLD_SPACE", 0x02371): "ArrayIteratorProtector",
("OLD_SPACE", 0x02399): "ArrayBufferNeuteringProtector",
+ ("OLD_SPACE", 0x02421): "StringIteratorProtector",
}
# List of known V8 Frame Markers.
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 654e19f7e9..11f5364162 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -6,5 +6,5 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly....
-The bartender starts to shake the bottles...
+The autoroller bought a round of Himbeerbrause. Suddenly.....
+The bartender starts to shake the bottles......